[TG3]: Add 5722 and 5756 support.
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
202         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
203         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
206         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
207         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
208         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
209         {}
210 };
211
212 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
213
214 static const struct {
215         const char string[ETH_GSTRING_LEN];
216 } ethtool_stats_keys[TG3_NUM_STATS] = {
217         { "rx_octets" },
218         { "rx_fragments" },
219         { "rx_ucast_packets" },
220         { "rx_mcast_packets" },
221         { "rx_bcast_packets" },
222         { "rx_fcs_errors" },
223         { "rx_align_errors" },
224         { "rx_xon_pause_rcvd" },
225         { "rx_xoff_pause_rcvd" },
226         { "rx_mac_ctrl_rcvd" },
227         { "rx_xoff_entered" },
228         { "rx_frame_too_long_errors" },
229         { "rx_jabbers" },
230         { "rx_undersize_packets" },
231         { "rx_in_length_errors" },
232         { "rx_out_length_errors" },
233         { "rx_64_or_less_octet_packets" },
234         { "rx_65_to_127_octet_packets" },
235         { "rx_128_to_255_octet_packets" },
236         { "rx_256_to_511_octet_packets" },
237         { "rx_512_to_1023_octet_packets" },
238         { "rx_1024_to_1522_octet_packets" },
239         { "rx_1523_to_2047_octet_packets" },
240         { "rx_2048_to_4095_octet_packets" },
241         { "rx_4096_to_8191_octet_packets" },
242         { "rx_8192_to_9022_octet_packets" },
243
244         { "tx_octets" },
245         { "tx_collisions" },
246
247         { "tx_xon_sent" },
248         { "tx_xoff_sent" },
249         { "tx_flow_control" },
250         { "tx_mac_errors" },
251         { "tx_single_collisions" },
252         { "tx_mult_collisions" },
253         { "tx_deferred" },
254         { "tx_excessive_collisions" },
255         { "tx_late_collisions" },
256         { "tx_collide_2times" },
257         { "tx_collide_3times" },
258         { "tx_collide_4times" },
259         { "tx_collide_5times" },
260         { "tx_collide_6times" },
261         { "tx_collide_7times" },
262         { "tx_collide_8times" },
263         { "tx_collide_9times" },
264         { "tx_collide_10times" },
265         { "tx_collide_11times" },
266         { "tx_collide_12times" },
267         { "tx_collide_13times" },
268         { "tx_collide_14times" },
269         { "tx_collide_15times" },
270         { "tx_ucast_packets" },
271         { "tx_mcast_packets" },
272         { "tx_bcast_packets" },
273         { "tx_carrier_sense_errors" },
274         { "tx_discards" },
275         { "tx_errors" },
276
277         { "dma_writeq_full" },
278         { "dma_write_prioq_full" },
279         { "rxbds_empty" },
280         { "rx_discards" },
281         { "rx_errors" },
282         { "rx_threshold_hit" },
283
284         { "dma_readq_full" },
285         { "dma_read_prioq_full" },
286         { "tx_comp_queue_full" },
287
288         { "ring_set_send_prod_index" },
289         { "ring_status_update" },
290         { "nic_irqs" },
291         { "nic_avoided_irqs" },
292         { "nic_tx_threshold_hit" }
293 };
294
295 static const struct {
296         const char string[ETH_GSTRING_LEN];
297 } ethtool_test_keys[TG3_NUM_TEST] = {
298         { "nvram test     (online) " },
299         { "link test      (online) " },
300         { "register test  (offline)" },
301         { "memory test    (offline)" },
302         { "loopback test  (offline)" },
303         { "interrupt test (offline)" },
304 };
305
306 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
307 {
308         writel(val, tp->regs + off);
309 }
310
311 static u32 tg3_read32(struct tg3 *tp, u32 off)
312 {
313         return (readl(tp->regs + off));
314 }
315
316 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
317 {
318         unsigned long flags;
319
320         spin_lock_irqsave(&tp->indirect_lock, flags);
321         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
322         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
323         spin_unlock_irqrestore(&tp->indirect_lock, flags);
324 }
325
326 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
327 {
328         writel(val, tp->regs + off);
329         readl(tp->regs + off);
330 }
331
332 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
333 {
334         unsigned long flags;
335         u32 val;
336
337         spin_lock_irqsave(&tp->indirect_lock, flags);
338         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
340         spin_unlock_irqrestore(&tp->indirect_lock, flags);
341         return val;
342 }
343
344 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
345 {
346         unsigned long flags;
347
348         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
349                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
350                                        TG3_64BIT_REG_LOW, val);
351                 return;
352         }
353         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
354                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
355                                        TG3_64BIT_REG_LOW, val);
356                 return;
357         }
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
361         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363
364         /* In indirect mode when disabling interrupts, we also need
365          * to clear the interrupt bit in the GRC local ctrl register.
366          */
367         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
368             (val == 0x1)) {
369                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
370                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
371         }
372 }
373
374 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
375 {
376         unsigned long flags;
377         u32 val;
378
379         spin_lock_irqsave(&tp->indirect_lock, flags);
380         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
381         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
382         spin_unlock_irqrestore(&tp->indirect_lock, flags);
383         return val;
384 }
385
386 /* usec_wait specifies the wait time in usec when writing to certain registers
387  * where it is unsafe to read back the register without some delay.
388  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
389  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
390  */
391 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
392 {
393         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
394             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
395                 /* Non-posted methods */
396                 tp->write32(tp, off, val);
397         else {
398                 /* Posted method */
399                 tg3_write32(tp, off, val);
400                 if (usec_wait)
401                         udelay(usec_wait);
402                 tp->read32(tp, off);
403         }
404         /* Wait again after the read for the posted method to guarantee that
405          * the wait time is met.
406          */
407         if (usec_wait)
408                 udelay(usec_wait);
409 }
410
411 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
412 {
413         tp->write32_mbox(tp, off, val);
414         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
415             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
416                 tp->read32_mbox(tp, off);
417 }
418
419 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
420 {
421         void __iomem *mbox = tp->regs + off;
422         writel(val, mbox);
423         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
424                 writel(val, mbox);
425         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
426                 readl(mbox);
427 }
428
429 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
430 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
431 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
432 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
433 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
434
435 #define tw32(reg,val)           tp->write32(tp, reg, val)
436 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
437 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
438 #define tr32(reg)               tp->read32(tp, reg)
439
440 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
441 {
442         unsigned long flags;
443
444         spin_lock_irqsave(&tp->indirect_lock, flags);
445         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
446                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
447                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
448
449                 /* Always leave this as zero. */
450                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
451         } else {
452                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
453                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
454
455                 /* Always leave this as zero. */
456                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
457         }
458         spin_unlock_irqrestore(&tp->indirect_lock, flags);
459 }
460
461 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
462 {
463         unsigned long flags;
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
467                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
468                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
469
470                 /* Always leave this as zero. */
471                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
472         } else {
473                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
474                 *val = tr32(TG3PCI_MEM_WIN_DATA);
475
476                 /* Always leave this as zero. */
477                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
478         }
479         spin_unlock_irqrestore(&tp->indirect_lock, flags);
480 }
481
482 static void tg3_disable_ints(struct tg3 *tp)
483 {
484         tw32(TG3PCI_MISC_HOST_CTRL,
485              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
486         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
487 }
488
489 static inline void tg3_cond_int(struct tg3 *tp)
490 {
491         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
492             (tp->hw_status->status & SD_STATUS_UPDATED))
493                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
494 }
495
496 static void tg3_enable_ints(struct tg3 *tp)
497 {
498         tp->irq_sync = 0;
499         wmb();
500
501         tw32(TG3PCI_MISC_HOST_CTRL,
502              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
503         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
504                        (tp->last_tag << 24));
505         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
506                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
507                                (tp->last_tag << 24));
508         tg3_cond_int(tp);
509 }
510
511 static inline unsigned int tg3_has_work(struct tg3 *tp)
512 {
513         struct tg3_hw_status *sblk = tp->hw_status;
514         unsigned int work_exists = 0;
515
516         /* check for phy events */
517         if (!(tp->tg3_flags &
518               (TG3_FLAG_USE_LINKCHG_REG |
519                TG3_FLAG_POLL_SERDES))) {
520                 if (sblk->status & SD_STATUS_LINK_CHG)
521                         work_exists = 1;
522         }
523         /* check for RX/TX work to do */
524         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
525             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
526                 work_exists = 1;
527
528         return work_exists;
529 }
530
531 /* tg3_restart_ints
532  *  similar to tg3_enable_ints, but it accurately determines whether there
533  *  is new work pending and can return without flushing the PIO write
534  *  which reenables interrupts
535  */
536 static void tg3_restart_ints(struct tg3 *tp)
537 {
538         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
539                      tp->last_tag << 24);
540         mmiowb();
541
542         /* When doing tagged status, this work check is unnecessary.
543          * The last_tag we write above tells the chip which piece of
544          * work we've completed.
545          */
546         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
547             tg3_has_work(tp))
548                 tw32(HOSTCC_MODE, tp->coalesce_mode |
549                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
550 }
551
552 static inline void tg3_netif_stop(struct tg3 *tp)
553 {
554         tp->dev->trans_start = jiffies; /* prevent tx timeout */
555         netif_poll_disable(tp->dev);
556         netif_tx_disable(tp->dev);
557 }
558
559 static inline void tg3_netif_start(struct tg3 *tp)
560 {
561         netif_wake_queue(tp->dev);
562         /* NOTE: unconditional netif_wake_queue is only appropriate
563          * so long as all callers are assured to have free tx slots
564          * (such as after tg3_init_hw)
565          */
566         netif_poll_enable(tp->dev);
567         tp->hw_status->status |= SD_STATUS_UPDATED;
568         tg3_enable_ints(tp);
569 }
570
571 static void tg3_switch_clocks(struct tg3 *tp)
572 {
573         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
574         u32 orig_clock_ctrl;
575
576         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
577                 return;
578
579         orig_clock_ctrl = clock_ctrl;
580         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
581                        CLOCK_CTRL_CLKRUN_OENABLE |
582                        0x1f);
583         tp->pci_clock_ctrl = clock_ctrl;
584
585         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
586                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
587                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
588                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
589                 }
590         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
591                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
592                             clock_ctrl |
593                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
594                             40);
595                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
596                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
597                             40);
598         }
599         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
600 }
601
602 #define PHY_BUSY_LOOPS  5000
603
604 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
605 {
606         u32 frame_val;
607         unsigned int loops;
608         int ret;
609
610         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
611                 tw32_f(MAC_MI_MODE,
612                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
613                 udelay(80);
614         }
615
616         *val = 0x0;
617
618         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
619                       MI_COM_PHY_ADDR_MASK);
620         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
621                       MI_COM_REG_ADDR_MASK);
622         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
623
624         tw32_f(MAC_MI_COM, frame_val);
625
626         loops = PHY_BUSY_LOOPS;
627         while (loops != 0) {
628                 udelay(10);
629                 frame_val = tr32(MAC_MI_COM);
630
631                 if ((frame_val & MI_COM_BUSY) == 0) {
632                         udelay(5);
633                         frame_val = tr32(MAC_MI_COM);
634                         break;
635                 }
636                 loops -= 1;
637         }
638
639         ret = -EBUSY;
640         if (loops != 0) {
641                 *val = frame_val & MI_COM_DATA_MASK;
642                 ret = 0;
643         }
644
645         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
646                 tw32_f(MAC_MI_MODE, tp->mi_mode);
647                 udelay(80);
648         }
649
650         return ret;
651 }
652
653 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
654 {
655         u32 frame_val;
656         unsigned int loops;
657         int ret;
658
659         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
660                 tw32_f(MAC_MI_MODE,
661                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
662                 udelay(80);
663         }
664
665         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
666                       MI_COM_PHY_ADDR_MASK);
667         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
668                       MI_COM_REG_ADDR_MASK);
669         frame_val |= (val & MI_COM_DATA_MASK);
670         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
671
672         tw32_f(MAC_MI_COM, frame_val);
673
674         loops = PHY_BUSY_LOOPS;
675         while (loops != 0) {
676                 udelay(10);
677                 frame_val = tr32(MAC_MI_COM);
678                 if ((frame_val & MI_COM_BUSY) == 0) {
679                         udelay(5);
680                         frame_val = tr32(MAC_MI_COM);
681                         break;
682                 }
683                 loops -= 1;
684         }
685
686         ret = -EBUSY;
687         if (loops != 0)
688                 ret = 0;
689
690         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
691                 tw32_f(MAC_MI_MODE, tp->mi_mode);
692                 udelay(80);
693         }
694
695         return ret;
696 }
697
698 static void tg3_phy_set_wirespeed(struct tg3 *tp)
699 {
700         u32 val;
701
702         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
703                 return;
704
705         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
706             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
707                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
708                              (val | (1 << 15) | (1 << 4)));
709 }
710
711 static int tg3_bmcr_reset(struct tg3 *tp)
712 {
713         u32 phy_control;
714         int limit, err;
715
716         /* OK, reset it, and poll the BMCR_RESET bit until it
717          * clears or we time out.
718          */
719         phy_control = BMCR_RESET;
720         err = tg3_writephy(tp, MII_BMCR, phy_control);
721         if (err != 0)
722                 return -EBUSY;
723
724         limit = 5000;
725         while (limit--) {
726                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
727                 if (err != 0)
728                         return -EBUSY;
729
730                 if ((phy_control & BMCR_RESET) == 0) {
731                         udelay(40);
732                         break;
733                 }
734                 udelay(10);
735         }
736         if (limit <= 0)
737                 return -EBUSY;
738
739         return 0;
740 }
741
742 static int tg3_wait_macro_done(struct tg3 *tp)
743 {
744         int limit = 100;
745
746         while (limit--) {
747                 u32 tmp32;
748
749                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
750                         if ((tmp32 & 0x1000) == 0)
751                                 break;
752                 }
753         }
754         if (limit <= 0)
755                 return -EBUSY;
756
757         return 0;
758 }
759
760 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
761 {
762         static const u32 test_pat[4][6] = {
763         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
764         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
765         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
766         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
767         };
768         int chan;
769
770         for (chan = 0; chan < 4; chan++) {
771                 int i;
772
773                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
774                              (chan * 0x2000) | 0x0200);
775                 tg3_writephy(tp, 0x16, 0x0002);
776
777                 for (i = 0; i < 6; i++)
778                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
779                                      test_pat[chan][i]);
780
781                 tg3_writephy(tp, 0x16, 0x0202);
782                 if (tg3_wait_macro_done(tp)) {
783                         *resetp = 1;
784                         return -EBUSY;
785                 }
786
787                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
788                              (chan * 0x2000) | 0x0200);
789                 tg3_writephy(tp, 0x16, 0x0082);
790                 if (tg3_wait_macro_done(tp)) {
791                         *resetp = 1;
792                         return -EBUSY;
793                 }
794
795                 tg3_writephy(tp, 0x16, 0x0802);
796                 if (tg3_wait_macro_done(tp)) {
797                         *resetp = 1;
798                         return -EBUSY;
799                 }
800
801                 for (i = 0; i < 6; i += 2) {
802                         u32 low, high;
803
804                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
805                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
806                             tg3_wait_macro_done(tp)) {
807                                 *resetp = 1;
808                                 return -EBUSY;
809                         }
810                         low &= 0x7fff;
811                         high &= 0x000f;
812                         if (low != test_pat[chan][i] ||
813                             high != test_pat[chan][i+1]) {
814                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
815                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
816                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
817
818                                 return -EBUSY;
819                         }
820                 }
821         }
822
823         return 0;
824 }
825
826 static int tg3_phy_reset_chanpat(struct tg3 *tp)
827 {
828         int chan;
829
830         for (chan = 0; chan < 4; chan++) {
831                 int i;
832
833                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
834                              (chan * 0x2000) | 0x0200);
835                 tg3_writephy(tp, 0x16, 0x0002);
836                 for (i = 0; i < 6; i++)
837                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
838                 tg3_writephy(tp, 0x16, 0x0202);
839                 if (tg3_wait_macro_done(tp))
840                         return -EBUSY;
841         }
842
843         return 0;
844 }
845
846 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
847 {
848         u32 reg32, phy9_orig;
849         int retries, do_phy_reset, err;
850
851         retries = 10;
852         do_phy_reset = 1;
853         do {
854                 if (do_phy_reset) {
855                         err = tg3_bmcr_reset(tp);
856                         if (err)
857                                 return err;
858                         do_phy_reset = 0;
859                 }
860
861                 /* Disable transmitter and interrupt.  */
862                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
863                         continue;
864
865                 reg32 |= 0x3000;
866                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
867
868                 /* Set full-duplex, 1000 mbps.  */
869                 tg3_writephy(tp, MII_BMCR,
870                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
871
872                 /* Set to master mode.  */
873                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
874                         continue;
875
876                 tg3_writephy(tp, MII_TG3_CTRL,
877                              (MII_TG3_CTRL_AS_MASTER |
878                               MII_TG3_CTRL_ENABLE_AS_MASTER));
879
880                 /* Enable SM_DSP_CLOCK and 6dB.  */
881                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
882
883                 /* Block the PHY control access.  */
884                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
885                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
886
887                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
888                 if (!err)
889                         break;
890         } while (--retries);
891
892         err = tg3_phy_reset_chanpat(tp);
893         if (err)
894                 return err;
895
896         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
897         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
898
899         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
900         tg3_writephy(tp, 0x16, 0x0000);
901
902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
904                 /* Set Extended packet length bit for jumbo frames */
905                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
906         }
907         else {
908                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
909         }
910
911         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
912
913         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
914                 reg32 &= ~0x3000;
915                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
916         } else if (!err)
917                 err = -EBUSY;
918
919         return err;
920 }
921
922 static void tg3_link_report(struct tg3 *);
923
924 /* This will reset the tigon3 PHY if there is no valid
925  * link unless the FORCE argument is non-zero.
926  */
927 static int tg3_phy_reset(struct tg3 *tp)
928 {
929         u32 phy_status;
930         int err;
931
932         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
933         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
934         if (err != 0)
935                 return -EBUSY;
936
937         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
938                 netif_carrier_off(tp->dev);
939                 tg3_link_report(tp);
940         }
941
942         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
943             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
945                 err = tg3_phy_reset_5703_4_5(tp);
946                 if (err)
947                         return err;
948                 goto out;
949         }
950
951         err = tg3_bmcr_reset(tp);
952         if (err)
953                 return err;
954
955 out:
956         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
957                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
958                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
959                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
960                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
961                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
962                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
963         }
964         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
965                 tg3_writephy(tp, 0x1c, 0x8d68);
966                 tg3_writephy(tp, 0x1c, 0x8d68);
967         }
968         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
969                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
970                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
971                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
972                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
973                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
974                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
975                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
976                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
977         }
978         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
979                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
980                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
981                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
982                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
983         }
984         /* Set Extended packet length bit (bit 14) on all chips that */
985         /* support jumbo frames */
986         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
987                 /* Cannot do read-modify-write on 5401 */
988                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
989         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
990                 u32 phy_reg;
991
992                 /* Set bit 14 with read-modify-write to preserve other bits */
993                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
994                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
995                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
996         }
997
998         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
999          * jumbo frames transmission.
1000          */
1001         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1002                 u32 phy_reg;
1003
1004                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1005                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1006                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1007         }
1008
1009         tg3_phy_set_wirespeed(tp);
1010         return 0;
1011 }
1012
1013 static void tg3_frob_aux_power(struct tg3 *tp)
1014 {
1015         struct tg3 *tp_peer = tp;
1016
1017         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1018                 return;
1019
1020         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1021             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1022                 struct net_device *dev_peer;
1023
1024                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1025                 /* remove_one() may have been run on the peer. */
1026                 if (!dev_peer)
1027                         tp_peer = tp;
1028                 else
1029                         tp_peer = netdev_priv(dev_peer);
1030         }
1031
1032         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1033             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1034             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1035             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1037                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1038                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1039                                     (GRC_LCLCTRL_GPIO_OE0 |
1040                                      GRC_LCLCTRL_GPIO_OE1 |
1041                                      GRC_LCLCTRL_GPIO_OE2 |
1042                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1043                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1044                                     100);
1045                 } else {
1046                         u32 no_gpio2;
1047                         u32 grc_local_ctrl = 0;
1048
1049                         if (tp_peer != tp &&
1050                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1051                                 return;
1052
1053                         /* Workaround to prevent overdrawing Amps. */
1054                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1055                             ASIC_REV_5714) {
1056                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1057                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1058                                             grc_local_ctrl, 100);
1059                         }
1060
1061                         /* On 5753 and variants, GPIO2 cannot be used. */
1062                         no_gpio2 = tp->nic_sram_data_cfg &
1063                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1064
1065                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1066                                          GRC_LCLCTRL_GPIO_OE1 |
1067                                          GRC_LCLCTRL_GPIO_OE2 |
1068                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1069                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1070                         if (no_gpio2) {
1071                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1072                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1073                         }
1074                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1075                                                     grc_local_ctrl, 100);
1076
1077                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1078
1079                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1080                                                     grc_local_ctrl, 100);
1081
1082                         if (!no_gpio2) {
1083                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1084                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1085                                             grc_local_ctrl, 100);
1086                         }
1087                 }
1088         } else {
1089                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1090                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1091                         if (tp_peer != tp &&
1092                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1093                                 return;
1094
1095                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1096                                     (GRC_LCLCTRL_GPIO_OE1 |
1097                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1098
1099                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1100                                     GRC_LCLCTRL_GPIO_OE1, 100);
1101
1102                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1103                                     (GRC_LCLCTRL_GPIO_OE1 |
1104                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1105                 }
1106         }
1107 }
1108
1109 static int tg3_setup_phy(struct tg3 *, int);
1110
1111 #define RESET_KIND_SHUTDOWN     0
1112 #define RESET_KIND_INIT         1
1113 #define RESET_KIND_SUSPEND      2
1114
1115 static void tg3_write_sig_post_reset(struct tg3 *, int);
1116 static int tg3_halt_cpu(struct tg3 *, u32);
1117 static int tg3_nvram_lock(struct tg3 *);
1118 static void tg3_nvram_unlock(struct tg3 *);
1119
1120 static void tg3_power_down_phy(struct tg3 *tp)
1121 {
1122         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
1123                 return;
1124
1125         tg3_writephy(tp, MII_TG3_EXT_CTRL, MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1126         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1127
1128         /* The PHY should not be powered down on some chips because
1129          * of bugs.
1130          */
1131         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1132             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1133             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1134              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1135                 return;
1136         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1137 }
1138
1139 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1140 {
1141         u32 misc_host_ctrl;
1142         u16 power_control, power_caps;
1143         int pm = tp->pm_cap;
1144
1145         /* Make sure register accesses (indirect or otherwise)
1146          * will function correctly.
1147          */
1148         pci_write_config_dword(tp->pdev,
1149                                TG3PCI_MISC_HOST_CTRL,
1150                                tp->misc_host_ctrl);
1151
1152         pci_read_config_word(tp->pdev,
1153                              pm + PCI_PM_CTRL,
1154                              &power_control);
1155         power_control |= PCI_PM_CTRL_PME_STATUS;
1156         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1157         switch (state) {
1158         case PCI_D0:
1159                 power_control |= 0;
1160                 pci_write_config_word(tp->pdev,
1161                                       pm + PCI_PM_CTRL,
1162                                       power_control);
1163                 udelay(100);    /* Delay after power state change */
1164
1165                 /* Switch out of Vaux if it is not a LOM */
1166                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1167                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1168
1169                 return 0;
1170
1171         case PCI_D1:
1172                 power_control |= 1;
1173                 break;
1174
1175         case PCI_D2:
1176                 power_control |= 2;
1177                 break;
1178
1179         case PCI_D3hot:
1180                 power_control |= 3;
1181                 break;
1182
1183         default:
1184                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1185                        "requested.\n",
1186                        tp->dev->name, state);
1187                 return -EINVAL;
1188         };
1189
1190         power_control |= PCI_PM_CTRL_PME_ENABLE;
1191
1192         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1193         tw32(TG3PCI_MISC_HOST_CTRL,
1194              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1195
1196         if (tp->link_config.phy_is_low_power == 0) {
1197                 tp->link_config.phy_is_low_power = 1;
1198                 tp->link_config.orig_speed = tp->link_config.speed;
1199                 tp->link_config.orig_duplex = tp->link_config.duplex;
1200                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1201         }
1202
1203         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1204                 tp->link_config.speed = SPEED_10;
1205                 tp->link_config.duplex = DUPLEX_HALF;
1206                 tp->link_config.autoneg = AUTONEG_ENABLE;
1207                 tg3_setup_phy(tp, 0);
1208         }
1209
1210         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1211                 int i;
1212                 u32 val;
1213
1214                 for (i = 0; i < 200; i++) {
1215                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1216                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1217                                 break;
1218                         msleep(1);
1219                 }
1220         }
1221         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1222                                              WOL_DRV_STATE_SHUTDOWN |
1223                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1224
1225         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1226
1227         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1228                 u32 mac_mode;
1229
1230                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1231                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1232                         udelay(40);
1233
1234                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1235                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
1236                         else
1237                                 mac_mode = MAC_MODE_PORT_MODE_MII;
1238
1239                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1240                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1241                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1242                 } else {
1243                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1244                 }
1245
1246                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1247                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1248
1249                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1250                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1251                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1252
1253                 tw32_f(MAC_MODE, mac_mode);
1254                 udelay(100);
1255
1256                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1257                 udelay(10);
1258         }
1259
1260         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1261             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1262              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1263                 u32 base_val;
1264
1265                 base_val = tp->pci_clock_ctrl;
1266                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1267                              CLOCK_CTRL_TXCLK_DISABLE);
1268
1269                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1270                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1271         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1272                 /* do nothing */
1273         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1274                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1275                 u32 newbits1, newbits2;
1276
1277                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1278                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1279                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1280                                     CLOCK_CTRL_TXCLK_DISABLE |
1281                                     CLOCK_CTRL_ALTCLK);
1282                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1283                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1284                         newbits1 = CLOCK_CTRL_625_CORE;
1285                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1286                 } else {
1287                         newbits1 = CLOCK_CTRL_ALTCLK;
1288                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1289                 }
1290
1291                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1292                             40);
1293
1294                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1295                             40);
1296
1297                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1298                         u32 newbits3;
1299
1300                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1301                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1302                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1303                                             CLOCK_CTRL_TXCLK_DISABLE |
1304                                             CLOCK_CTRL_44MHZ_CORE);
1305                         } else {
1306                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1307                         }
1308
1309                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1310                                     tp->pci_clock_ctrl | newbits3, 40);
1311                 }
1312         }
1313
1314         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1315             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1316                 tg3_power_down_phy(tp);
1317
1318         tg3_frob_aux_power(tp);
1319
1320         /* Workaround for unstable PLL clock */
1321         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1322             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1323                 u32 val = tr32(0x7d00);
1324
1325                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1326                 tw32(0x7d00, val);
1327                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1328                         int err;
1329
1330                         err = tg3_nvram_lock(tp);
1331                         tg3_halt_cpu(tp, RX_CPU_BASE);
1332                         if (!err)
1333                                 tg3_nvram_unlock(tp);
1334                 }
1335         }
1336
1337         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1338
1339         /* Finally, set the new power state. */
1340         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1341         udelay(100);    /* Delay after power state change */
1342
1343         return 0;
1344 }
1345
1346 static void tg3_link_report(struct tg3 *tp)
1347 {
1348         if (!netif_carrier_ok(tp->dev)) {
1349                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1350         } else {
1351                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1352                        tp->dev->name,
1353                        (tp->link_config.active_speed == SPEED_1000 ?
1354                         1000 :
1355                         (tp->link_config.active_speed == SPEED_100 ?
1356                          100 : 10)),
1357                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1358                         "full" : "half"));
1359
1360                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1361                        "%s for RX.\n",
1362                        tp->dev->name,
1363                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1364                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1365         }
1366 }
1367
1368 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1369 {
1370         u32 new_tg3_flags = 0;
1371         u32 old_rx_mode = tp->rx_mode;
1372         u32 old_tx_mode = tp->tx_mode;
1373
1374         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1375
1376                 /* Convert 1000BaseX flow control bits to 1000BaseT
1377                  * bits before resolving flow control.
1378                  */
1379                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1380                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1381                                        ADVERTISE_PAUSE_ASYM);
1382                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1383
1384                         if (local_adv & ADVERTISE_1000XPAUSE)
1385                                 local_adv |= ADVERTISE_PAUSE_CAP;
1386                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1387                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1388                         if (remote_adv & LPA_1000XPAUSE)
1389                                 remote_adv |= LPA_PAUSE_CAP;
1390                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1391                                 remote_adv |= LPA_PAUSE_ASYM;
1392                 }
1393
1394                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1395                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1396                                 if (remote_adv & LPA_PAUSE_CAP)
1397                                         new_tg3_flags |=
1398                                                 (TG3_FLAG_RX_PAUSE |
1399                                                 TG3_FLAG_TX_PAUSE);
1400                                 else if (remote_adv & LPA_PAUSE_ASYM)
1401                                         new_tg3_flags |=
1402                                                 (TG3_FLAG_RX_PAUSE);
1403                         } else {
1404                                 if (remote_adv & LPA_PAUSE_CAP)
1405                                         new_tg3_flags |=
1406                                                 (TG3_FLAG_RX_PAUSE |
1407                                                 TG3_FLAG_TX_PAUSE);
1408                         }
1409                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1410                         if ((remote_adv & LPA_PAUSE_CAP) &&
1411                         (remote_adv & LPA_PAUSE_ASYM))
1412                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1413                 }
1414
1415                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1416                 tp->tg3_flags |= new_tg3_flags;
1417         } else {
1418                 new_tg3_flags = tp->tg3_flags;
1419         }
1420
1421         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1422                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1423         else
1424                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1425
1426         if (old_rx_mode != tp->rx_mode) {
1427                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1428         }
1429
1430         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1431                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1432         else
1433                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1434
1435         if (old_tx_mode != tp->tx_mode) {
1436                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1437         }
1438 }
1439
1440 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1441 {
1442         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1443         case MII_TG3_AUX_STAT_10HALF:
1444                 *speed = SPEED_10;
1445                 *duplex = DUPLEX_HALF;
1446                 break;
1447
1448         case MII_TG3_AUX_STAT_10FULL:
1449                 *speed = SPEED_10;
1450                 *duplex = DUPLEX_FULL;
1451                 break;
1452
1453         case MII_TG3_AUX_STAT_100HALF:
1454                 *speed = SPEED_100;
1455                 *duplex = DUPLEX_HALF;
1456                 break;
1457
1458         case MII_TG3_AUX_STAT_100FULL:
1459                 *speed = SPEED_100;
1460                 *duplex = DUPLEX_FULL;
1461                 break;
1462
1463         case MII_TG3_AUX_STAT_1000HALF:
1464                 *speed = SPEED_1000;
1465                 *duplex = DUPLEX_HALF;
1466                 break;
1467
1468         case MII_TG3_AUX_STAT_1000FULL:
1469                 *speed = SPEED_1000;
1470                 *duplex = DUPLEX_FULL;
1471                 break;
1472
1473         default:
1474                 *speed = SPEED_INVALID;
1475                 *duplex = DUPLEX_INVALID;
1476                 break;
1477         };
1478 }
1479
1480 static void tg3_phy_copper_begin(struct tg3 *tp)
1481 {
1482         u32 new_adv;
1483         int i;
1484
1485         if (tp->link_config.phy_is_low_power) {
1486                 /* Entering low power mode.  Disable gigabit and
1487                  * 100baseT advertisements.
1488                  */
1489                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1490
1491                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1492                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1493                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1494                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1495
1496                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1497         } else if (tp->link_config.speed == SPEED_INVALID) {
1498                 tp->link_config.advertising =
1499                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1500                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1501                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1502                          ADVERTISED_Autoneg | ADVERTISED_MII);
1503
1504                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1505                         tp->link_config.advertising &=
1506                                 ~(ADVERTISED_1000baseT_Half |
1507                                   ADVERTISED_1000baseT_Full);
1508
1509                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1510                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1511                         new_adv |= ADVERTISE_10HALF;
1512                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1513                         new_adv |= ADVERTISE_10FULL;
1514                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1515                         new_adv |= ADVERTISE_100HALF;
1516                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1517                         new_adv |= ADVERTISE_100FULL;
1518                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1519
1520                 if (tp->link_config.advertising &
1521                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1522                         new_adv = 0;
1523                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1524                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1525                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1526                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1527                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1528                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1529                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1530                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1531                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1532                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1533                 } else {
1534                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1535                 }
1536         } else {
1537                 /* Asking for a specific link mode. */
1538                 if (tp->link_config.speed == SPEED_1000) {
1539                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1540                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1541
1542                         if (tp->link_config.duplex == DUPLEX_FULL)
1543                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1544                         else
1545                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1546                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1547                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1548                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1549                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1550                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1551                 } else {
1552                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1553
1554                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1555                         if (tp->link_config.speed == SPEED_100) {
1556                                 if (tp->link_config.duplex == DUPLEX_FULL)
1557                                         new_adv |= ADVERTISE_100FULL;
1558                                 else
1559                                         new_adv |= ADVERTISE_100HALF;
1560                         } else {
1561                                 if (tp->link_config.duplex == DUPLEX_FULL)
1562                                         new_adv |= ADVERTISE_10FULL;
1563                                 else
1564                                         new_adv |= ADVERTISE_10HALF;
1565                         }
1566                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1567                 }
1568         }
1569
1570         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1571             tp->link_config.speed != SPEED_INVALID) {
1572                 u32 bmcr, orig_bmcr;
1573
1574                 tp->link_config.active_speed = tp->link_config.speed;
1575                 tp->link_config.active_duplex = tp->link_config.duplex;
1576
1577                 bmcr = 0;
1578                 switch (tp->link_config.speed) {
1579                 default:
1580                 case SPEED_10:
1581                         break;
1582
1583                 case SPEED_100:
1584                         bmcr |= BMCR_SPEED100;
1585                         break;
1586
1587                 case SPEED_1000:
1588                         bmcr |= TG3_BMCR_SPEED1000;
1589                         break;
1590                 };
1591
1592                 if (tp->link_config.duplex == DUPLEX_FULL)
1593                         bmcr |= BMCR_FULLDPLX;
1594
1595                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1596                     (bmcr != orig_bmcr)) {
1597                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1598                         for (i = 0; i < 1500; i++) {
1599                                 u32 tmp;
1600
1601                                 udelay(10);
1602                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1603                                     tg3_readphy(tp, MII_BMSR, &tmp))
1604                                         continue;
1605                                 if (!(tmp & BMSR_LSTATUS)) {
1606                                         udelay(40);
1607                                         break;
1608                                 }
1609                         }
1610                         tg3_writephy(tp, MII_BMCR, bmcr);
1611                         udelay(40);
1612                 }
1613         } else {
1614                 tg3_writephy(tp, MII_BMCR,
1615                              BMCR_ANENABLE | BMCR_ANRESTART);
1616         }
1617 }
1618
1619 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1620 {
1621         int err;
1622
1623         /* Turn off tap power management. */
1624         /* Set Extended packet length bit */
1625         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1626
1627         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1628         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1629
1630         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1631         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1632
1633         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1634         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1635
1636         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1637         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1638
1639         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1640         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1641
1642         udelay(40);
1643
1644         return err;
1645 }
1646
1647 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1648 {
1649         u32 adv_reg, all_mask;
1650
1651         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1652                 return 0;
1653
1654         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1655                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1656         if ((adv_reg & all_mask) != all_mask)
1657                 return 0;
1658         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1659                 u32 tg3_ctrl;
1660
1661                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1662                         return 0;
1663
1664                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1665                             MII_TG3_CTRL_ADV_1000_FULL);
1666                 if ((tg3_ctrl & all_mask) != all_mask)
1667                         return 0;
1668         }
1669         return 1;
1670 }
1671
1672 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1673 {
1674         int current_link_up;
1675         u32 bmsr, dummy;
1676         u16 current_speed;
1677         u8 current_duplex;
1678         int i, err;
1679
1680         tw32(MAC_EVENT, 0);
1681
1682         tw32_f(MAC_STATUS,
1683              (MAC_STATUS_SYNC_CHANGED |
1684               MAC_STATUS_CFG_CHANGED |
1685               MAC_STATUS_MI_COMPLETION |
1686               MAC_STATUS_LNKSTATE_CHANGED));
1687         udelay(40);
1688
1689         tp->mi_mode = MAC_MI_MODE_BASE;
1690         tw32_f(MAC_MI_MODE, tp->mi_mode);
1691         udelay(80);
1692
1693         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1694
1695         /* Some third-party PHYs need to be reset on link going
1696          * down.
1697          */
1698         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1699              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1700              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1701             netif_carrier_ok(tp->dev)) {
1702                 tg3_readphy(tp, MII_BMSR, &bmsr);
1703                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1704                     !(bmsr & BMSR_LSTATUS))
1705                         force_reset = 1;
1706         }
1707         if (force_reset)
1708                 tg3_phy_reset(tp);
1709
1710         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1711                 tg3_readphy(tp, MII_BMSR, &bmsr);
1712                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1713                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1714                         bmsr = 0;
1715
1716                 if (!(bmsr & BMSR_LSTATUS)) {
1717                         err = tg3_init_5401phy_dsp(tp);
1718                         if (err)
1719                                 return err;
1720
1721                         tg3_readphy(tp, MII_BMSR, &bmsr);
1722                         for (i = 0; i < 1000; i++) {
1723                                 udelay(10);
1724                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1725                                     (bmsr & BMSR_LSTATUS)) {
1726                                         udelay(40);
1727                                         break;
1728                                 }
1729                         }
1730
1731                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1732                             !(bmsr & BMSR_LSTATUS) &&
1733                             tp->link_config.active_speed == SPEED_1000) {
1734                                 err = tg3_phy_reset(tp);
1735                                 if (!err)
1736                                         err = tg3_init_5401phy_dsp(tp);
1737                                 if (err)
1738                                         return err;
1739                         }
1740                 }
1741         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1742                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1743                 /* 5701 {A0,B0} CRC bug workaround */
1744                 tg3_writephy(tp, 0x15, 0x0a75);
1745                 tg3_writephy(tp, 0x1c, 0x8c68);
1746                 tg3_writephy(tp, 0x1c, 0x8d68);
1747                 tg3_writephy(tp, 0x1c, 0x8c68);
1748         }
1749
1750         /* Clear pending interrupts... */
1751         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1752         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1753
1754         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1755                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1756         else
1757                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1758
1759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1760             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1761                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1762                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1763                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1764                 else
1765                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1766         }
1767
1768         current_link_up = 0;
1769         current_speed = SPEED_INVALID;
1770         current_duplex = DUPLEX_INVALID;
1771
1772         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1773                 u32 val;
1774
1775                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1776                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1777                 if (!(val & (1 << 10))) {
1778                         val |= (1 << 10);
1779                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1780                         goto relink;
1781                 }
1782         }
1783
1784         bmsr = 0;
1785         for (i = 0; i < 100; i++) {
1786                 tg3_readphy(tp, MII_BMSR, &bmsr);
1787                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1788                     (bmsr & BMSR_LSTATUS))
1789                         break;
1790                 udelay(40);
1791         }
1792
1793         if (bmsr & BMSR_LSTATUS) {
1794                 u32 aux_stat, bmcr;
1795
1796                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1797                 for (i = 0; i < 2000; i++) {
1798                         udelay(10);
1799                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1800                             aux_stat)
1801                                 break;
1802                 }
1803
1804                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1805                                              &current_speed,
1806                                              &current_duplex);
1807
1808                 bmcr = 0;
1809                 for (i = 0; i < 200; i++) {
1810                         tg3_readphy(tp, MII_BMCR, &bmcr);
1811                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1812                                 continue;
1813                         if (bmcr && bmcr != 0x7fff)
1814                                 break;
1815                         udelay(10);
1816                 }
1817
1818                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1819                         if (bmcr & BMCR_ANENABLE) {
1820                                 current_link_up = 1;
1821
1822                                 /* Force autoneg restart if we are exiting
1823                                  * low power mode.
1824                                  */
1825                                 if (!tg3_copper_is_advertising_all(tp))
1826                                         current_link_up = 0;
1827                         } else {
1828                                 current_link_up = 0;
1829                         }
1830                 } else {
1831                         if (!(bmcr & BMCR_ANENABLE) &&
1832                             tp->link_config.speed == current_speed &&
1833                             tp->link_config.duplex == current_duplex) {
1834                                 current_link_up = 1;
1835                         } else {
1836                                 current_link_up = 0;
1837                         }
1838                 }
1839
1840                 tp->link_config.active_speed = current_speed;
1841                 tp->link_config.active_duplex = current_duplex;
1842         }
1843
1844         if (current_link_up == 1 &&
1845             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1846             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1847                 u32 local_adv, remote_adv;
1848
1849                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1850                         local_adv = 0;
1851                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1852
1853                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1854                         remote_adv = 0;
1855
1856                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1857
1858                 /* If we are not advertising full pause capability,
1859                  * something is wrong.  Bring the link down and reconfigure.
1860                  */
1861                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1862                         current_link_up = 0;
1863                 } else {
1864                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1865                 }
1866         }
1867 relink:
1868         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1869                 u32 tmp;
1870
1871                 tg3_phy_copper_begin(tp);
1872
1873                 tg3_readphy(tp, MII_BMSR, &tmp);
1874                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1875                     (tmp & BMSR_LSTATUS))
1876                         current_link_up = 1;
1877         }
1878
1879         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1880         if (current_link_up == 1) {
1881                 if (tp->link_config.active_speed == SPEED_100 ||
1882                     tp->link_config.active_speed == SPEED_10)
1883                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1884                 else
1885                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1886         } else
1887                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1888
1889         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1890         if (tp->link_config.active_duplex == DUPLEX_HALF)
1891                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1892
1893         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1895                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1896                     (current_link_up == 1 &&
1897                      tp->link_config.active_speed == SPEED_10))
1898                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1899         } else {
1900                 if (current_link_up == 1)
1901                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1902         }
1903
1904         /* ??? Without this setting Netgear GA302T PHY does not
1905          * ??? send/receive packets...
1906          */
1907         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1908             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1909                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1910                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1911                 udelay(80);
1912         }
1913
1914         tw32_f(MAC_MODE, tp->mac_mode);
1915         udelay(40);
1916
1917         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1918                 /* Polled via timer. */
1919                 tw32_f(MAC_EVENT, 0);
1920         } else {
1921                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1922         }
1923         udelay(40);
1924
1925         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1926             current_link_up == 1 &&
1927             tp->link_config.active_speed == SPEED_1000 &&
1928             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1929              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1930                 udelay(120);
1931                 tw32_f(MAC_STATUS,
1932                      (MAC_STATUS_SYNC_CHANGED |
1933                       MAC_STATUS_CFG_CHANGED));
1934                 udelay(40);
1935                 tg3_write_mem(tp,
1936                               NIC_SRAM_FIRMWARE_MBOX,
1937                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1938         }
1939
1940         if (current_link_up != netif_carrier_ok(tp->dev)) {
1941                 if (current_link_up)
1942                         netif_carrier_on(tp->dev);
1943                 else
1944                         netif_carrier_off(tp->dev);
1945                 tg3_link_report(tp);
1946         }
1947
1948         return 0;
1949 }
1950
1951 struct tg3_fiber_aneginfo {
1952         int state;
1953 #define ANEG_STATE_UNKNOWN              0
1954 #define ANEG_STATE_AN_ENABLE            1
1955 #define ANEG_STATE_RESTART_INIT         2
1956 #define ANEG_STATE_RESTART              3
1957 #define ANEG_STATE_DISABLE_LINK_OK      4
1958 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1959 #define ANEG_STATE_ABILITY_DETECT       6
1960 #define ANEG_STATE_ACK_DETECT_INIT      7
1961 #define ANEG_STATE_ACK_DETECT           8
1962 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1963 #define ANEG_STATE_COMPLETE_ACK         10
1964 #define ANEG_STATE_IDLE_DETECT_INIT     11
1965 #define ANEG_STATE_IDLE_DETECT          12
1966 #define ANEG_STATE_LINK_OK              13
1967 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1968 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1969
1970         u32 flags;
1971 #define MR_AN_ENABLE            0x00000001
1972 #define MR_RESTART_AN           0x00000002
1973 #define MR_AN_COMPLETE          0x00000004
1974 #define MR_PAGE_RX              0x00000008
1975 #define MR_NP_LOADED            0x00000010
1976 #define MR_TOGGLE_TX            0x00000020
1977 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1978 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1979 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1980 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1981 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1982 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1983 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1984 #define MR_TOGGLE_RX            0x00002000
1985 #define MR_NP_RX                0x00004000
1986
1987 #define MR_LINK_OK              0x80000000
1988
1989         unsigned long link_time, cur_time;
1990
1991         u32 ability_match_cfg;
1992         int ability_match_count;
1993
1994         char ability_match, idle_match, ack_match;
1995
1996         u32 txconfig, rxconfig;
1997 #define ANEG_CFG_NP             0x00000080
1998 #define ANEG_CFG_ACK            0x00000040
1999 #define ANEG_CFG_RF2            0x00000020
2000 #define ANEG_CFG_RF1            0x00000010
2001 #define ANEG_CFG_PS2            0x00000001
2002 #define ANEG_CFG_PS1            0x00008000
2003 #define ANEG_CFG_HD             0x00004000
2004 #define ANEG_CFG_FD             0x00002000
2005 #define ANEG_CFG_INVAL          0x00001f06
2006
2007 };
2008 #define ANEG_OK         0
2009 #define ANEG_DONE       1
2010 #define ANEG_TIMER_ENAB 2
2011 #define ANEG_FAILED     -1
2012
2013 #define ANEG_STATE_SETTLE_TIME  10000
2014
2015 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2016                                    struct tg3_fiber_aneginfo *ap)
2017 {
2018         unsigned long delta;
2019         u32 rx_cfg_reg;
2020         int ret;
2021
2022         if (ap->state == ANEG_STATE_UNKNOWN) {
2023                 ap->rxconfig = 0;
2024                 ap->link_time = 0;
2025                 ap->cur_time = 0;
2026                 ap->ability_match_cfg = 0;
2027                 ap->ability_match_count = 0;
2028                 ap->ability_match = 0;
2029                 ap->idle_match = 0;
2030                 ap->ack_match = 0;
2031         }
2032         ap->cur_time++;
2033
2034         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2035                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2036
2037                 if (rx_cfg_reg != ap->ability_match_cfg) {
2038                         ap->ability_match_cfg = rx_cfg_reg;
2039                         ap->ability_match = 0;
2040                         ap->ability_match_count = 0;
2041                 } else {
2042                         if (++ap->ability_match_count > 1) {
2043                                 ap->ability_match = 1;
2044                                 ap->ability_match_cfg = rx_cfg_reg;
2045                         }
2046                 }
2047                 if (rx_cfg_reg & ANEG_CFG_ACK)
2048                         ap->ack_match = 1;
2049                 else
2050                         ap->ack_match = 0;
2051
2052                 ap->idle_match = 0;
2053         } else {
2054                 ap->idle_match = 1;
2055                 ap->ability_match_cfg = 0;
2056                 ap->ability_match_count = 0;
2057                 ap->ability_match = 0;
2058                 ap->ack_match = 0;
2059
2060                 rx_cfg_reg = 0;
2061         }
2062
2063         ap->rxconfig = rx_cfg_reg;
2064         ret = ANEG_OK;
2065
2066         switch(ap->state) {
2067         case ANEG_STATE_UNKNOWN:
2068                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2069                         ap->state = ANEG_STATE_AN_ENABLE;
2070
2071                 /* fallthru */
2072         case ANEG_STATE_AN_ENABLE:
2073                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2074                 if (ap->flags & MR_AN_ENABLE) {
2075                         ap->link_time = 0;
2076                         ap->cur_time = 0;
2077                         ap->ability_match_cfg = 0;
2078                         ap->ability_match_count = 0;
2079                         ap->ability_match = 0;
2080                         ap->idle_match = 0;
2081                         ap->ack_match = 0;
2082
2083                         ap->state = ANEG_STATE_RESTART_INIT;
2084                 } else {
2085                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2086                 }
2087                 break;
2088
2089         case ANEG_STATE_RESTART_INIT:
2090                 ap->link_time = ap->cur_time;
2091                 ap->flags &= ~(MR_NP_LOADED);
2092                 ap->txconfig = 0;
2093                 tw32(MAC_TX_AUTO_NEG, 0);
2094                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2095                 tw32_f(MAC_MODE, tp->mac_mode);
2096                 udelay(40);
2097
2098                 ret = ANEG_TIMER_ENAB;
2099                 ap->state = ANEG_STATE_RESTART;
2100
2101                 /* fallthru */
2102         case ANEG_STATE_RESTART:
2103                 delta = ap->cur_time - ap->link_time;
2104                 if (delta > ANEG_STATE_SETTLE_TIME) {
2105                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2106                 } else {
2107                         ret = ANEG_TIMER_ENAB;
2108                 }
2109                 break;
2110
2111         case ANEG_STATE_DISABLE_LINK_OK:
2112                 ret = ANEG_DONE;
2113                 break;
2114
2115         case ANEG_STATE_ABILITY_DETECT_INIT:
2116                 ap->flags &= ~(MR_TOGGLE_TX);
2117                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2118                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2119                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2120                 tw32_f(MAC_MODE, tp->mac_mode);
2121                 udelay(40);
2122
2123                 ap->state = ANEG_STATE_ABILITY_DETECT;
2124                 break;
2125
2126         case ANEG_STATE_ABILITY_DETECT:
2127                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2128                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2129                 }
2130                 break;
2131
2132         case ANEG_STATE_ACK_DETECT_INIT:
2133                 ap->txconfig |= ANEG_CFG_ACK;
2134                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2135                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2136                 tw32_f(MAC_MODE, tp->mac_mode);
2137                 udelay(40);
2138
2139                 ap->state = ANEG_STATE_ACK_DETECT;
2140
2141                 /* fallthru */
2142         case ANEG_STATE_ACK_DETECT:
2143                 if (ap->ack_match != 0) {
2144                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2145                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2146                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2147                         } else {
2148                                 ap->state = ANEG_STATE_AN_ENABLE;
2149                         }
2150                 } else if (ap->ability_match != 0 &&
2151                            ap->rxconfig == 0) {
2152                         ap->state = ANEG_STATE_AN_ENABLE;
2153                 }
2154                 break;
2155
2156         case ANEG_STATE_COMPLETE_ACK_INIT:
2157                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2158                         ret = ANEG_FAILED;
2159                         break;
2160                 }
2161                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2162                                MR_LP_ADV_HALF_DUPLEX |
2163                                MR_LP_ADV_SYM_PAUSE |
2164                                MR_LP_ADV_ASYM_PAUSE |
2165                                MR_LP_ADV_REMOTE_FAULT1 |
2166                                MR_LP_ADV_REMOTE_FAULT2 |
2167                                MR_LP_ADV_NEXT_PAGE |
2168                                MR_TOGGLE_RX |
2169                                MR_NP_RX);
2170                 if (ap->rxconfig & ANEG_CFG_FD)
2171                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2172                 if (ap->rxconfig & ANEG_CFG_HD)
2173                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2174                 if (ap->rxconfig & ANEG_CFG_PS1)
2175                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2176                 if (ap->rxconfig & ANEG_CFG_PS2)
2177                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2178                 if (ap->rxconfig & ANEG_CFG_RF1)
2179                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2180                 if (ap->rxconfig & ANEG_CFG_RF2)
2181                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2182                 if (ap->rxconfig & ANEG_CFG_NP)
2183                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2184
2185                 ap->link_time = ap->cur_time;
2186
2187                 ap->flags ^= (MR_TOGGLE_TX);
2188                 if (ap->rxconfig & 0x0008)
2189                         ap->flags |= MR_TOGGLE_RX;
2190                 if (ap->rxconfig & ANEG_CFG_NP)
2191                         ap->flags |= MR_NP_RX;
2192                 ap->flags |= MR_PAGE_RX;
2193
2194                 ap->state = ANEG_STATE_COMPLETE_ACK;
2195                 ret = ANEG_TIMER_ENAB;
2196                 break;
2197
2198         case ANEG_STATE_COMPLETE_ACK:
2199                 if (ap->ability_match != 0 &&
2200                     ap->rxconfig == 0) {
2201                         ap->state = ANEG_STATE_AN_ENABLE;
2202                         break;
2203                 }
2204                 delta = ap->cur_time - ap->link_time;
2205                 if (delta > ANEG_STATE_SETTLE_TIME) {
2206                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2207                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2208                         } else {
2209                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2210                                     !(ap->flags & MR_NP_RX)) {
2211                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2212                                 } else {
2213                                         ret = ANEG_FAILED;
2214                                 }
2215                         }
2216                 }
2217                 break;
2218
2219         case ANEG_STATE_IDLE_DETECT_INIT:
2220                 ap->link_time = ap->cur_time;
2221                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2222                 tw32_f(MAC_MODE, tp->mac_mode);
2223                 udelay(40);
2224
2225                 ap->state = ANEG_STATE_IDLE_DETECT;
2226                 ret = ANEG_TIMER_ENAB;
2227                 break;
2228
2229         case ANEG_STATE_IDLE_DETECT:
2230                 if (ap->ability_match != 0 &&
2231                     ap->rxconfig == 0) {
2232                         ap->state = ANEG_STATE_AN_ENABLE;
2233                         break;
2234                 }
2235                 delta = ap->cur_time - ap->link_time;
2236                 if (delta > ANEG_STATE_SETTLE_TIME) {
2237                         /* XXX another gem from the Broadcom driver :( */
2238                         ap->state = ANEG_STATE_LINK_OK;
2239                 }
2240                 break;
2241
2242         case ANEG_STATE_LINK_OK:
2243                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2244                 ret = ANEG_DONE;
2245                 break;
2246
2247         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2248                 /* ??? unimplemented */
2249                 break;
2250
2251         case ANEG_STATE_NEXT_PAGE_WAIT:
2252                 /* ??? unimplemented */
2253                 break;
2254
2255         default:
2256                 ret = ANEG_FAILED;
2257                 break;
2258         };
2259
2260         return ret;
2261 }
2262
2263 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2264 {
2265         int res = 0;
2266         struct tg3_fiber_aneginfo aninfo;
2267         int status = ANEG_FAILED;
2268         unsigned int tick;
2269         u32 tmp;
2270
2271         tw32_f(MAC_TX_AUTO_NEG, 0);
2272
2273         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2274         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2275         udelay(40);
2276
2277         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2278         udelay(40);
2279
2280         memset(&aninfo, 0, sizeof(aninfo));
2281         aninfo.flags |= MR_AN_ENABLE;
2282         aninfo.state = ANEG_STATE_UNKNOWN;
2283         aninfo.cur_time = 0;
2284         tick = 0;
2285         while (++tick < 195000) {
2286                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2287                 if (status == ANEG_DONE || status == ANEG_FAILED)
2288                         break;
2289
2290                 udelay(1);
2291         }
2292
2293         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2294         tw32_f(MAC_MODE, tp->mac_mode);
2295         udelay(40);
2296
2297         *flags = aninfo.flags;
2298
2299         if (status == ANEG_DONE &&
2300             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2301                              MR_LP_ADV_FULL_DUPLEX)))
2302                 res = 1;
2303
2304         return res;
2305 }
2306
2307 static void tg3_init_bcm8002(struct tg3 *tp)
2308 {
2309         u32 mac_status = tr32(MAC_STATUS);
2310         int i;
2311
2312         /* Reset when initting first time or we have a link. */
2313         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2314             !(mac_status & MAC_STATUS_PCS_SYNCED))
2315                 return;
2316
2317         /* Set PLL lock range. */
2318         tg3_writephy(tp, 0x16, 0x8007);
2319
2320         /* SW reset */
2321         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2322
2323         /* Wait for reset to complete. */
2324         /* XXX schedule_timeout() ... */
2325         for (i = 0; i < 500; i++)
2326                 udelay(10);
2327
2328         /* Config mode; select PMA/Ch 1 regs. */
2329         tg3_writephy(tp, 0x10, 0x8411);
2330
2331         /* Enable auto-lock and comdet, select txclk for tx. */
2332         tg3_writephy(tp, 0x11, 0x0a10);
2333
2334         tg3_writephy(tp, 0x18, 0x00a0);
2335         tg3_writephy(tp, 0x16, 0x41ff);
2336
2337         /* Assert and deassert POR. */
2338         tg3_writephy(tp, 0x13, 0x0400);
2339         udelay(40);
2340         tg3_writephy(tp, 0x13, 0x0000);
2341
2342         tg3_writephy(tp, 0x11, 0x0a50);
2343         udelay(40);
2344         tg3_writephy(tp, 0x11, 0x0a10);
2345
2346         /* Wait for signal to stabilize */
2347         /* XXX schedule_timeout() ... */
2348         for (i = 0; i < 15000; i++)
2349                 udelay(10);
2350
2351         /* Deselect the channel register so we can read the PHYID
2352          * later.
2353          */
2354         tg3_writephy(tp, 0x10, 0x8011);
2355 }
2356
2357 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2358 {
2359         u32 sg_dig_ctrl, sg_dig_status;
2360         u32 serdes_cfg, expected_sg_dig_ctrl;
2361         int workaround, port_a;
2362         int current_link_up;
2363
2364         serdes_cfg = 0;
2365         expected_sg_dig_ctrl = 0;
2366         workaround = 0;
2367         port_a = 1;
2368         current_link_up = 0;
2369
2370         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2371             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2372                 workaround = 1;
2373                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2374                         port_a = 0;
2375
2376                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2377                 /* preserve bits 20-23 for voltage regulator */
2378                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2379         }
2380
2381         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2382
2383         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2384                 if (sg_dig_ctrl & (1 << 31)) {
2385                         if (workaround) {
2386                                 u32 val = serdes_cfg;
2387
2388                                 if (port_a)
2389                                         val |= 0xc010000;
2390                                 else
2391                                         val |= 0x4010000;
2392                                 tw32_f(MAC_SERDES_CFG, val);
2393                         }
2394                         tw32_f(SG_DIG_CTRL, 0x01388400);
2395                 }
2396                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2397                         tg3_setup_flow_control(tp, 0, 0);
2398                         current_link_up = 1;
2399                 }
2400                 goto out;
2401         }
2402
2403         /* Want auto-negotiation.  */
2404         expected_sg_dig_ctrl = 0x81388400;
2405
2406         /* Pause capability */
2407         expected_sg_dig_ctrl |= (1 << 11);
2408
2409         /* Asymettric pause */
2410         expected_sg_dig_ctrl |= (1 << 12);
2411
2412         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2413                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2414                     tp->serdes_counter &&
2415                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2416                                     MAC_STATUS_RCVD_CFG)) ==
2417                      MAC_STATUS_PCS_SYNCED)) {
2418                         tp->serdes_counter--;
2419                         current_link_up = 1;
2420                         goto out;
2421                 }
2422 restart_autoneg:
2423                 if (workaround)
2424                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2425                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2426                 udelay(5);
2427                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2428
2429                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2430                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2431         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2432                                  MAC_STATUS_SIGNAL_DET)) {
2433                 sg_dig_status = tr32(SG_DIG_STATUS);
2434                 mac_status = tr32(MAC_STATUS);
2435
2436                 if ((sg_dig_status & (1 << 1)) &&
2437                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2438                         u32 local_adv, remote_adv;
2439
2440                         local_adv = ADVERTISE_PAUSE_CAP;
2441                         remote_adv = 0;
2442                         if (sg_dig_status & (1 << 19))
2443                                 remote_adv |= LPA_PAUSE_CAP;
2444                         if (sg_dig_status & (1 << 20))
2445                                 remote_adv |= LPA_PAUSE_ASYM;
2446
2447                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2448                         current_link_up = 1;
2449                         tp->serdes_counter = 0;
2450                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2451                 } else if (!(sg_dig_status & (1 << 1))) {
2452                         if (tp->serdes_counter)
2453                                 tp->serdes_counter--;
2454                         else {
2455                                 if (workaround) {
2456                                         u32 val = serdes_cfg;
2457
2458                                         if (port_a)
2459                                                 val |= 0xc010000;
2460                                         else
2461                                                 val |= 0x4010000;
2462
2463                                         tw32_f(MAC_SERDES_CFG, val);
2464                                 }
2465
2466                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2467                                 udelay(40);
2468
2469                                 /* Link parallel detection - link is up */
2470                                 /* only if we have PCS_SYNC and not */
2471                                 /* receiving config code words */
2472                                 mac_status = tr32(MAC_STATUS);
2473                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2474                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2475                                         tg3_setup_flow_control(tp, 0, 0);
2476                                         current_link_up = 1;
2477                                         tp->tg3_flags2 |=
2478                                                 TG3_FLG2_PARALLEL_DETECT;
2479                                         tp->serdes_counter =
2480                                                 SERDES_PARALLEL_DET_TIMEOUT;
2481                                 } else
2482                                         goto restart_autoneg;
2483                         }
2484                 }
2485         } else {
2486                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2487                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2488         }
2489
2490 out:
2491         return current_link_up;
2492 }
2493
2494 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2495 {
2496         int current_link_up = 0;
2497
2498         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2499                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2500                 goto out;
2501         }
2502
2503         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2504                 u32 flags;
2505                 int i;
2506
2507                 if (fiber_autoneg(tp, &flags)) {
2508                         u32 local_adv, remote_adv;
2509
2510                         local_adv = ADVERTISE_PAUSE_CAP;
2511                         remote_adv = 0;
2512                         if (flags & MR_LP_ADV_SYM_PAUSE)
2513                                 remote_adv |= LPA_PAUSE_CAP;
2514                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2515                                 remote_adv |= LPA_PAUSE_ASYM;
2516
2517                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2518
2519                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2520                         current_link_up = 1;
2521                 }
2522                 for (i = 0; i < 30; i++) {
2523                         udelay(20);
2524                         tw32_f(MAC_STATUS,
2525                                (MAC_STATUS_SYNC_CHANGED |
2526                                 MAC_STATUS_CFG_CHANGED));
2527                         udelay(40);
2528                         if ((tr32(MAC_STATUS) &
2529                              (MAC_STATUS_SYNC_CHANGED |
2530                               MAC_STATUS_CFG_CHANGED)) == 0)
2531                                 break;
2532                 }
2533
2534                 mac_status = tr32(MAC_STATUS);
2535                 if (current_link_up == 0 &&
2536                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2537                     !(mac_status & MAC_STATUS_RCVD_CFG))
2538                         current_link_up = 1;
2539         } else {
2540                 /* Forcing 1000FD link up. */
2541                 current_link_up = 1;
2542                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2543
2544                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2545                 udelay(40);
2546         }
2547
2548 out:
2549         return current_link_up;
2550 }
2551
2552 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2553 {
2554         u32 orig_pause_cfg;
2555         u16 orig_active_speed;
2556         u8 orig_active_duplex;
2557         u32 mac_status;
2558         int current_link_up;
2559         int i;
2560
2561         orig_pause_cfg =
2562                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2563                                   TG3_FLAG_TX_PAUSE));
2564         orig_active_speed = tp->link_config.active_speed;
2565         orig_active_duplex = tp->link_config.active_duplex;
2566
2567         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2568             netif_carrier_ok(tp->dev) &&
2569             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2570                 mac_status = tr32(MAC_STATUS);
2571                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2572                                MAC_STATUS_SIGNAL_DET |
2573                                MAC_STATUS_CFG_CHANGED |
2574                                MAC_STATUS_RCVD_CFG);
2575                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2576                                    MAC_STATUS_SIGNAL_DET)) {
2577                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2578                                             MAC_STATUS_CFG_CHANGED));
2579                         return 0;
2580                 }
2581         }
2582
2583         tw32_f(MAC_TX_AUTO_NEG, 0);
2584
2585         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2586         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2587         tw32_f(MAC_MODE, tp->mac_mode);
2588         udelay(40);
2589
2590         if (tp->phy_id == PHY_ID_BCM8002)
2591                 tg3_init_bcm8002(tp);
2592
2593         /* Enable link change event even when serdes polling.  */
2594         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2595         udelay(40);
2596
2597         current_link_up = 0;
2598         mac_status = tr32(MAC_STATUS);
2599
2600         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2601                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2602         else
2603                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2604
2605         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2606         tw32_f(MAC_MODE, tp->mac_mode);
2607         udelay(40);
2608
2609         tp->hw_status->status =
2610                 (SD_STATUS_UPDATED |
2611                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2612
2613         for (i = 0; i < 100; i++) {
2614                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2615                                     MAC_STATUS_CFG_CHANGED));
2616                 udelay(5);
2617                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2618                                          MAC_STATUS_CFG_CHANGED |
2619                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2620                         break;
2621         }
2622
2623         mac_status = tr32(MAC_STATUS);
2624         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2625                 current_link_up = 0;
2626                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2627                     tp->serdes_counter == 0) {
2628                         tw32_f(MAC_MODE, (tp->mac_mode |
2629                                           MAC_MODE_SEND_CONFIGS));
2630                         udelay(1);
2631                         tw32_f(MAC_MODE, tp->mac_mode);
2632                 }
2633         }
2634
2635         if (current_link_up == 1) {
2636                 tp->link_config.active_speed = SPEED_1000;
2637                 tp->link_config.active_duplex = DUPLEX_FULL;
2638                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2639                                     LED_CTRL_LNKLED_OVERRIDE |
2640                                     LED_CTRL_1000MBPS_ON));
2641         } else {
2642                 tp->link_config.active_speed = SPEED_INVALID;
2643                 tp->link_config.active_duplex = DUPLEX_INVALID;
2644                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2645                                     LED_CTRL_LNKLED_OVERRIDE |
2646                                     LED_CTRL_TRAFFIC_OVERRIDE));
2647         }
2648
2649         if (current_link_up != netif_carrier_ok(tp->dev)) {
2650                 if (current_link_up)
2651                         netif_carrier_on(tp->dev);
2652                 else
2653                         netif_carrier_off(tp->dev);
2654                 tg3_link_report(tp);
2655         } else {
2656                 u32 now_pause_cfg =
2657                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2658                                          TG3_FLAG_TX_PAUSE);
2659                 if (orig_pause_cfg != now_pause_cfg ||
2660                     orig_active_speed != tp->link_config.active_speed ||
2661                     orig_active_duplex != tp->link_config.active_duplex)
2662                         tg3_link_report(tp);
2663         }
2664
2665         return 0;
2666 }
2667
2668 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2669 {
2670         int current_link_up, err = 0;
2671         u32 bmsr, bmcr;
2672         u16 current_speed;
2673         u8 current_duplex;
2674
2675         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2676         tw32_f(MAC_MODE, tp->mac_mode);
2677         udelay(40);
2678
2679         tw32(MAC_EVENT, 0);
2680
2681         tw32_f(MAC_STATUS,
2682              (MAC_STATUS_SYNC_CHANGED |
2683               MAC_STATUS_CFG_CHANGED |
2684               MAC_STATUS_MI_COMPLETION |
2685               MAC_STATUS_LNKSTATE_CHANGED));
2686         udelay(40);
2687
2688         if (force_reset)
2689                 tg3_phy_reset(tp);
2690
2691         current_link_up = 0;
2692         current_speed = SPEED_INVALID;
2693         current_duplex = DUPLEX_INVALID;
2694
2695         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2696         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2698                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2699                         bmsr |= BMSR_LSTATUS;
2700                 else
2701                         bmsr &= ~BMSR_LSTATUS;
2702         }
2703
2704         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2705
2706         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2707             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2708                 /* do nothing, just check for link up at the end */
2709         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2710                 u32 adv, new_adv;
2711
2712                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2713                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2714                                   ADVERTISE_1000XPAUSE |
2715                                   ADVERTISE_1000XPSE_ASYM |
2716                                   ADVERTISE_SLCT);
2717
2718                 /* Always advertise symmetric PAUSE just like copper */
2719                 new_adv |= ADVERTISE_1000XPAUSE;
2720
2721                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2722                         new_adv |= ADVERTISE_1000XHALF;
2723                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2724                         new_adv |= ADVERTISE_1000XFULL;
2725
2726                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2727                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2728                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2729                         tg3_writephy(tp, MII_BMCR, bmcr);
2730
2731                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2732                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2733                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2734
2735                         return err;
2736                 }
2737         } else {
2738                 u32 new_bmcr;
2739
2740                 bmcr &= ~BMCR_SPEED1000;
2741                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2742
2743                 if (tp->link_config.duplex == DUPLEX_FULL)
2744                         new_bmcr |= BMCR_FULLDPLX;
2745
2746                 if (new_bmcr != bmcr) {
2747                         /* BMCR_SPEED1000 is a reserved bit that needs
2748                          * to be set on write.
2749                          */
2750                         new_bmcr |= BMCR_SPEED1000;
2751
2752                         /* Force a linkdown */
2753                         if (netif_carrier_ok(tp->dev)) {
2754                                 u32 adv;
2755
2756                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2757                                 adv &= ~(ADVERTISE_1000XFULL |
2758                                          ADVERTISE_1000XHALF |
2759                                          ADVERTISE_SLCT);
2760                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2761                                 tg3_writephy(tp, MII_BMCR, bmcr |
2762                                                            BMCR_ANRESTART |
2763                                                            BMCR_ANENABLE);
2764                                 udelay(10);
2765                                 netif_carrier_off(tp->dev);
2766                         }
2767                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2768                         bmcr = new_bmcr;
2769                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2770                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2771                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2772                             ASIC_REV_5714) {
2773                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2774                                         bmsr |= BMSR_LSTATUS;
2775                                 else
2776                                         bmsr &= ~BMSR_LSTATUS;
2777                         }
2778                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2779                 }
2780         }
2781
2782         if (bmsr & BMSR_LSTATUS) {
2783                 current_speed = SPEED_1000;
2784                 current_link_up = 1;
2785                 if (bmcr & BMCR_FULLDPLX)
2786                         current_duplex = DUPLEX_FULL;
2787                 else
2788                         current_duplex = DUPLEX_HALF;
2789
2790                 if (bmcr & BMCR_ANENABLE) {
2791                         u32 local_adv, remote_adv, common;
2792
2793                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2794                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2795                         common = local_adv & remote_adv;
2796                         if (common & (ADVERTISE_1000XHALF |
2797                                       ADVERTISE_1000XFULL)) {
2798                                 if (common & ADVERTISE_1000XFULL)
2799                                         current_duplex = DUPLEX_FULL;
2800                                 else
2801                                         current_duplex = DUPLEX_HALF;
2802
2803                                 tg3_setup_flow_control(tp, local_adv,
2804                                                        remote_adv);
2805                         }
2806                         else
2807                                 current_link_up = 0;
2808                 }
2809         }
2810
2811         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2812         if (tp->link_config.active_duplex == DUPLEX_HALF)
2813                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2814
2815         tw32_f(MAC_MODE, tp->mac_mode);
2816         udelay(40);
2817
2818         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2819
2820         tp->link_config.active_speed = current_speed;
2821         tp->link_config.active_duplex = current_duplex;
2822
2823         if (current_link_up != netif_carrier_ok(tp->dev)) {
2824                 if (current_link_up)
2825                         netif_carrier_on(tp->dev);
2826                 else {
2827                         netif_carrier_off(tp->dev);
2828                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2829                 }
2830                 tg3_link_report(tp);
2831         }
2832         return err;
2833 }
2834
2835 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2836 {
2837         if (tp->serdes_counter) {
2838                 /* Give autoneg time to complete. */
2839                 tp->serdes_counter--;
2840                 return;
2841         }
2842         if (!netif_carrier_ok(tp->dev) &&
2843             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2844                 u32 bmcr;
2845
2846                 tg3_readphy(tp, MII_BMCR, &bmcr);
2847                 if (bmcr & BMCR_ANENABLE) {
2848                         u32 phy1, phy2;
2849
2850                         /* Select shadow register 0x1f */
2851                         tg3_writephy(tp, 0x1c, 0x7c00);
2852                         tg3_readphy(tp, 0x1c, &phy1);
2853
2854                         /* Select expansion interrupt status register */
2855                         tg3_writephy(tp, 0x17, 0x0f01);
2856                         tg3_readphy(tp, 0x15, &phy2);
2857                         tg3_readphy(tp, 0x15, &phy2);
2858
2859                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2860                                 /* We have signal detect and not receiving
2861                                  * config code words, link is up by parallel
2862                                  * detection.
2863                                  */
2864
2865                                 bmcr &= ~BMCR_ANENABLE;
2866                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2867                                 tg3_writephy(tp, MII_BMCR, bmcr);
2868                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2869                         }
2870                 }
2871         }
2872         else if (netif_carrier_ok(tp->dev) &&
2873                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2874                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2875                 u32 phy2;
2876
2877                 /* Select expansion interrupt status register */
2878                 tg3_writephy(tp, 0x17, 0x0f01);
2879                 tg3_readphy(tp, 0x15, &phy2);
2880                 if (phy2 & 0x20) {
2881                         u32 bmcr;
2882
2883                         /* Config code words received, turn on autoneg. */
2884                         tg3_readphy(tp, MII_BMCR, &bmcr);
2885                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2886
2887                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2888
2889                 }
2890         }
2891 }
2892
2893 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2894 {
2895         int err;
2896
2897         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2898                 err = tg3_setup_fiber_phy(tp, force_reset);
2899         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2900                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2901         } else {
2902                 err = tg3_setup_copper_phy(tp, force_reset);
2903         }
2904
2905         if (tp->link_config.active_speed == SPEED_1000 &&
2906             tp->link_config.active_duplex == DUPLEX_HALF)
2907                 tw32(MAC_TX_LENGTHS,
2908                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2909                       (6 << TX_LENGTHS_IPG_SHIFT) |
2910                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2911         else
2912                 tw32(MAC_TX_LENGTHS,
2913                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2914                       (6 << TX_LENGTHS_IPG_SHIFT) |
2915                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2916
2917         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2918                 if (netif_carrier_ok(tp->dev)) {
2919                         tw32(HOSTCC_STAT_COAL_TICKS,
2920                              tp->coal.stats_block_coalesce_usecs);
2921                 } else {
2922                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2923                 }
2924         }
2925
2926         return err;
2927 }
2928
2929 /* This is called whenever we suspect that the system chipset is re-
2930  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2931  * is bogus tx completions. We try to recover by setting the
2932  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2933  * in the workqueue.
2934  */
2935 static void tg3_tx_recover(struct tg3 *tp)
2936 {
2937         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2938                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2939
2940         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2941                "mapped I/O cycles to the network device, attempting to "
2942                "recover. Please report the problem to the driver maintainer "
2943                "and include system chipset information.\n", tp->dev->name);
2944
2945         spin_lock(&tp->lock);
2946         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2947         spin_unlock(&tp->lock);
2948 }
2949
2950 static inline u32 tg3_tx_avail(struct tg3 *tp)
2951 {
2952         smp_mb();
2953         return (tp->tx_pending -
2954                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2955 }
2956
2957 /* Tigon3 never reports partial packet sends.  So we do not
2958  * need special logic to handle SKBs that have not had all
2959  * of their frags sent yet, like SunGEM does.
2960  */
2961 static void tg3_tx(struct tg3 *tp)
2962 {
2963         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2964         u32 sw_idx = tp->tx_cons;
2965
2966         while (sw_idx != hw_idx) {
2967                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2968                 struct sk_buff *skb = ri->skb;
2969                 int i, tx_bug = 0;
2970
2971                 if (unlikely(skb == NULL)) {
2972                         tg3_tx_recover(tp);
2973                         return;
2974                 }
2975
2976                 pci_unmap_single(tp->pdev,
2977                                  pci_unmap_addr(ri, mapping),
2978                                  skb_headlen(skb),
2979                                  PCI_DMA_TODEVICE);
2980
2981                 ri->skb = NULL;
2982
2983                 sw_idx = NEXT_TX(sw_idx);
2984
2985                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2986                         ri = &tp->tx_buffers[sw_idx];
2987                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
2988                                 tx_bug = 1;
2989
2990                         pci_unmap_page(tp->pdev,
2991                                        pci_unmap_addr(ri, mapping),
2992                                        skb_shinfo(skb)->frags[i].size,
2993                                        PCI_DMA_TODEVICE);
2994
2995                         sw_idx = NEXT_TX(sw_idx);
2996                 }
2997
2998                 dev_kfree_skb(skb);
2999
3000                 if (unlikely(tx_bug)) {
3001                         tg3_tx_recover(tp);
3002                         return;
3003                 }
3004         }
3005
3006         tp->tx_cons = sw_idx;
3007
3008         /* Need to make the tx_cons update visible to tg3_start_xmit()
3009          * before checking for netif_queue_stopped().  Without the
3010          * memory barrier, there is a small possibility that tg3_start_xmit()
3011          * will miss it and cause the queue to be stopped forever.
3012          */
3013         smp_mb();
3014
3015         if (unlikely(netif_queue_stopped(tp->dev) &&
3016                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3017                 netif_tx_lock(tp->dev);
3018                 if (netif_queue_stopped(tp->dev) &&
3019                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3020                         netif_wake_queue(tp->dev);
3021                 netif_tx_unlock(tp->dev);
3022         }
3023 }
3024
3025 /* Returns size of skb allocated or < 0 on error.
3026  *
3027  * We only need to fill in the address because the other members
3028  * of the RX descriptor are invariant, see tg3_init_rings.
3029  *
3030  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3031  * posting buffers we only dirty the first cache line of the RX
3032  * descriptor (containing the address).  Whereas for the RX status
3033  * buffers the cpu only reads the last cacheline of the RX descriptor
3034  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3035  */
3036 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3037                             int src_idx, u32 dest_idx_unmasked)
3038 {
3039         struct tg3_rx_buffer_desc *desc;
3040         struct ring_info *map, *src_map;
3041         struct sk_buff *skb;
3042         dma_addr_t mapping;
3043         int skb_size, dest_idx;
3044
3045         src_map = NULL;
3046         switch (opaque_key) {
3047         case RXD_OPAQUE_RING_STD:
3048                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3049                 desc = &tp->rx_std[dest_idx];
3050                 map = &tp->rx_std_buffers[dest_idx];
3051                 if (src_idx >= 0)
3052                         src_map = &tp->rx_std_buffers[src_idx];
3053                 skb_size = tp->rx_pkt_buf_sz;
3054                 break;
3055
3056         case RXD_OPAQUE_RING_JUMBO:
3057                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3058                 desc = &tp->rx_jumbo[dest_idx];
3059                 map = &tp->rx_jumbo_buffers[dest_idx];
3060                 if (src_idx >= 0)
3061                         src_map = &tp->rx_jumbo_buffers[src_idx];
3062                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3063                 break;
3064
3065         default:
3066                 return -EINVAL;
3067         };
3068
3069         /* Do not overwrite any of the map or rp information
3070          * until we are sure we can commit to a new buffer.
3071          *
3072          * Callers depend upon this behavior and assume that
3073          * we leave everything unchanged if we fail.
3074          */
3075         skb = netdev_alloc_skb(tp->dev, skb_size);
3076         if (skb == NULL)
3077                 return -ENOMEM;
3078
3079         skb_reserve(skb, tp->rx_offset);
3080
3081         mapping = pci_map_single(tp->pdev, skb->data,
3082                                  skb_size - tp->rx_offset,
3083                                  PCI_DMA_FROMDEVICE);
3084
3085         map->skb = skb;
3086         pci_unmap_addr_set(map, mapping, mapping);
3087
3088         if (src_map != NULL)
3089                 src_map->skb = NULL;
3090
3091         desc->addr_hi = ((u64)mapping >> 32);
3092         desc->addr_lo = ((u64)mapping & 0xffffffff);
3093
3094         return skb_size;
3095 }
3096
3097 /* We only need to move over in the address because the other
3098  * members of the RX descriptor are invariant.  See notes above
3099  * tg3_alloc_rx_skb for full details.
3100  */
3101 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3102                            int src_idx, u32 dest_idx_unmasked)
3103 {
3104         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3105         struct ring_info *src_map, *dest_map;
3106         int dest_idx;
3107
3108         switch (opaque_key) {
3109         case RXD_OPAQUE_RING_STD:
3110                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3111                 dest_desc = &tp->rx_std[dest_idx];
3112                 dest_map = &tp->rx_std_buffers[dest_idx];
3113                 src_desc = &tp->rx_std[src_idx];
3114                 src_map = &tp->rx_std_buffers[src_idx];
3115                 break;
3116
3117         case RXD_OPAQUE_RING_JUMBO:
3118                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3119                 dest_desc = &tp->rx_jumbo[dest_idx];
3120                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3121                 src_desc = &tp->rx_jumbo[src_idx];
3122                 src_map = &tp->rx_jumbo_buffers[src_idx];
3123                 break;
3124
3125         default:
3126                 return;
3127         };
3128
3129         dest_map->skb = src_map->skb;
3130         pci_unmap_addr_set(dest_map, mapping,
3131                            pci_unmap_addr(src_map, mapping));
3132         dest_desc->addr_hi = src_desc->addr_hi;
3133         dest_desc->addr_lo = src_desc->addr_lo;
3134
3135         src_map->skb = NULL;
3136 }
3137
3138 #if TG3_VLAN_TAG_USED
3139 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3140 {
3141         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3142 }
3143 #endif
3144
3145 /* The RX ring scheme is composed of multiple rings which post fresh
3146  * buffers to the chip, and one special ring the chip uses to report
3147  * status back to the host.
3148  *
3149  * The special ring reports the status of received packets to the
3150  * host.  The chip does not write into the original descriptor the
3151  * RX buffer was obtained from.  The chip simply takes the original
3152  * descriptor as provided by the host, updates the status and length
3153  * field, then writes this into the next status ring entry.
3154  *
3155  * Each ring the host uses to post buffers to the chip is described
3156  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3157  * it is first placed into the on-chip ram.  When the packet's length
3158  * is known, it walks down the TG3_BDINFO entries to select the ring.
3159  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3160  * which is within the range of the new packet's length is chosen.
3161  *
3162  * The "separate ring for rx status" scheme may sound queer, but it makes
3163  * sense from a cache coherency perspective.  If only the host writes
3164  * to the buffer post rings, and only the chip writes to the rx status
3165  * rings, then cache lines never move beyond shared-modified state.
3166  * If both the host and chip were to write into the same ring, cache line
3167  * eviction could occur since both entities want it in an exclusive state.
3168  */
3169 static int tg3_rx(struct tg3 *tp, int budget)
3170 {
3171         u32 work_mask, rx_std_posted = 0;
3172         u32 sw_idx = tp->rx_rcb_ptr;
3173         u16 hw_idx;
3174         int received;
3175
3176         hw_idx = tp->hw_status->idx[0].rx_producer;
3177         /*
3178          * We need to order the read of hw_idx and the read of
3179          * the opaque cookie.
3180          */
3181         rmb();
3182         work_mask = 0;
3183         received = 0;
3184         while (sw_idx != hw_idx && budget > 0) {
3185                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3186                 unsigned int len;
3187                 struct sk_buff *skb;
3188                 dma_addr_t dma_addr;
3189                 u32 opaque_key, desc_idx, *post_ptr;
3190
3191                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3192                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3193                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3194                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3195                                                   mapping);
3196                         skb = tp->rx_std_buffers[desc_idx].skb;
3197                         post_ptr = &tp->rx_std_ptr;
3198                         rx_std_posted++;
3199                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3200                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3201                                                   mapping);
3202                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3203                         post_ptr = &tp->rx_jumbo_ptr;
3204                 }
3205                 else {
3206                         goto next_pkt_nopost;
3207                 }
3208
3209                 work_mask |= opaque_key;
3210
3211                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3212                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3213                 drop_it:
3214                         tg3_recycle_rx(tp, opaque_key,
3215                                        desc_idx, *post_ptr);
3216                 drop_it_no_recycle:
3217                         /* Other statistics kept track of by card. */
3218                         tp->net_stats.rx_dropped++;
3219                         goto next_pkt;
3220                 }
3221
3222                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3223
3224                 if (len > RX_COPY_THRESHOLD
3225                         && tp->rx_offset == 2
3226                         /* rx_offset != 2 iff this is a 5701 card running
3227                          * in PCI-X mode [see tg3_get_invariants()] */
3228                 ) {
3229                         int skb_size;
3230
3231                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3232                                                     desc_idx, *post_ptr);
3233                         if (skb_size < 0)
3234                                 goto drop_it;
3235
3236                         pci_unmap_single(tp->pdev, dma_addr,
3237                                          skb_size - tp->rx_offset,
3238                                          PCI_DMA_FROMDEVICE);
3239
3240                         skb_put(skb, len);
3241                 } else {
3242                         struct sk_buff *copy_skb;
3243
3244                         tg3_recycle_rx(tp, opaque_key,
3245                                        desc_idx, *post_ptr);
3246
3247                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3248                         if (copy_skb == NULL)
3249                                 goto drop_it_no_recycle;
3250
3251                         skb_reserve(copy_skb, 2);
3252                         skb_put(copy_skb, len);
3253                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3254                         memcpy(copy_skb->data, skb->data, len);
3255                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3256
3257                         /* We'll reuse the original ring buffer. */
3258                         skb = copy_skb;
3259                 }
3260
3261                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3262                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3263                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3264                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3265                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3266                 else
3267                         skb->ip_summed = CHECKSUM_NONE;
3268
3269                 skb->protocol = eth_type_trans(skb, tp->dev);
3270 #if TG3_VLAN_TAG_USED
3271                 if (tp->vlgrp != NULL &&
3272                     desc->type_flags & RXD_FLAG_VLAN) {
3273                         tg3_vlan_rx(tp, skb,
3274                                     desc->err_vlan & RXD_VLAN_MASK);
3275                 } else
3276 #endif
3277                         netif_receive_skb(skb);
3278
3279                 tp->dev->last_rx = jiffies;
3280                 received++;
3281                 budget--;
3282
3283 next_pkt:
3284                 (*post_ptr)++;
3285
3286                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3287                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3288
3289                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3290                                      TG3_64BIT_REG_LOW, idx);
3291                         work_mask &= ~RXD_OPAQUE_RING_STD;
3292                         rx_std_posted = 0;
3293                 }
3294 next_pkt_nopost:
3295                 sw_idx++;
3296                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3297
3298                 /* Refresh hw_idx to see if there is new work */
3299                 if (sw_idx == hw_idx) {
3300                         hw_idx = tp->hw_status->idx[0].rx_producer;
3301                         rmb();
3302                 }
3303         }
3304
3305         /* ACK the status ring. */
3306         tp->rx_rcb_ptr = sw_idx;
3307         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3308
3309         /* Refill RX ring(s). */
3310         if (work_mask & RXD_OPAQUE_RING_STD) {
3311                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3312                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3313                              sw_idx);
3314         }
3315         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3316                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3317                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3318                              sw_idx);
3319         }
3320         mmiowb();
3321
3322         return received;
3323 }
3324
3325 static int tg3_poll(struct net_device *netdev, int *budget)
3326 {
3327         struct tg3 *tp = netdev_priv(netdev);
3328         struct tg3_hw_status *sblk = tp->hw_status;
3329         int done;
3330
3331         /* handle link change and other phy events */
3332         if (!(tp->tg3_flags &
3333               (TG3_FLAG_USE_LINKCHG_REG |
3334                TG3_FLAG_POLL_SERDES))) {
3335                 if (sblk->status & SD_STATUS_LINK_CHG) {
3336                         sblk->status = SD_STATUS_UPDATED |
3337                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3338                         spin_lock(&tp->lock);
3339                         tg3_setup_phy(tp, 0);
3340                         spin_unlock(&tp->lock);
3341                 }
3342         }
3343
3344         /* run TX completion thread */
3345         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3346                 tg3_tx(tp);
3347                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3348                         netif_rx_complete(netdev);
3349                         schedule_work(&tp->reset_task);
3350                         return 0;
3351                 }
3352         }
3353
3354         /* run RX thread, within the bounds set by NAPI.
3355          * All RX "locking" is done by ensuring outside
3356          * code synchronizes with dev->poll()
3357          */
3358         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3359                 int orig_budget = *budget;
3360                 int work_done;
3361
3362                 if (orig_budget > netdev->quota)
3363                         orig_budget = netdev->quota;
3364
3365                 work_done = tg3_rx(tp, orig_budget);
3366
3367                 *budget -= work_done;
3368                 netdev->quota -= work_done;
3369         }
3370
3371         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3372                 tp->last_tag = sblk->status_tag;
3373                 rmb();
3374         } else
3375                 sblk->status &= ~SD_STATUS_UPDATED;
3376
3377         /* if no more work, tell net stack and NIC we're done */
3378         done = !tg3_has_work(tp);
3379         if (done) {
3380                 netif_rx_complete(netdev);
3381                 tg3_restart_ints(tp);
3382         }
3383
3384         return (done ? 0 : 1);
3385 }
3386
3387 static void tg3_irq_quiesce(struct tg3 *tp)
3388 {
3389         BUG_ON(tp->irq_sync);
3390
3391         tp->irq_sync = 1;
3392         smp_mb();
3393
3394         synchronize_irq(tp->pdev->irq);
3395 }
3396
3397 static inline int tg3_irq_sync(struct tg3 *tp)
3398 {
3399         return tp->irq_sync;
3400 }
3401
3402 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3403  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3404  * with as well.  Most of the time, this is not necessary except when
3405  * shutting down the device.
3406  */
3407 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3408 {
3409         if (irq_sync)
3410                 tg3_irq_quiesce(tp);
3411         spin_lock_bh(&tp->lock);
3412 }
3413
3414 static inline void tg3_full_unlock(struct tg3 *tp)
3415 {
3416         spin_unlock_bh(&tp->lock);
3417 }
3418
3419 /* One-shot MSI handler - Chip automatically disables interrupt
3420  * after sending MSI so driver doesn't have to do it.
3421  */
3422 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3423 {
3424         struct net_device *dev = dev_id;
3425         struct tg3 *tp = netdev_priv(dev);
3426
3427         prefetch(tp->hw_status);
3428         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3429
3430         if (likely(!tg3_irq_sync(tp)))
3431                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3432
3433         return IRQ_HANDLED;
3434 }
3435
3436 /* MSI ISR - No need to check for interrupt sharing and no need to
3437  * flush status block and interrupt mailbox. PCI ordering rules
3438  * guarantee that MSI will arrive after the status block.
3439  */
3440 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3441 {
3442         struct net_device *dev = dev_id;
3443         struct tg3 *tp = netdev_priv(dev);
3444
3445         prefetch(tp->hw_status);
3446         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3447         /*
3448          * Writing any value to intr-mbox-0 clears PCI INTA# and
3449          * chip-internal interrupt pending events.
3450          * Writing non-zero to intr-mbox-0 additional tells the
3451          * NIC to stop sending us irqs, engaging "in-intr-handler"
3452          * event coalescing.
3453          */
3454         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3455         if (likely(!tg3_irq_sync(tp)))
3456                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3457
3458         return IRQ_RETVAL(1);
3459 }
3460
3461 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3462 {
3463         struct net_device *dev = dev_id;
3464         struct tg3 *tp = netdev_priv(dev);
3465         struct tg3_hw_status *sblk = tp->hw_status;
3466         unsigned int handled = 1;
3467
3468         /* In INTx mode, it is possible for the interrupt to arrive at
3469          * the CPU before the status block posted prior to the interrupt.
3470          * Reading the PCI State register will confirm whether the
3471          * interrupt is ours and will flush the status block.
3472          */
3473         if ((sblk->status & SD_STATUS_UPDATED) ||
3474             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3475                 /*
3476                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3477                  * chip-internal interrupt pending events.
3478                  * Writing non-zero to intr-mbox-0 additional tells the
3479                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3480                  * event coalescing.
3481                  */
3482                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3483                              0x00000001);
3484                 if (tg3_irq_sync(tp))
3485                         goto out;
3486                 sblk->status &= ~SD_STATUS_UPDATED;
3487                 if (likely(tg3_has_work(tp))) {
3488                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3489                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3490                 } else {
3491                         /* No work, shared interrupt perhaps?  re-enable
3492                          * interrupts, and flush that PCI write
3493                          */
3494                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3495                                 0x00000000);
3496                 }
3497         } else {        /* shared interrupt */
3498                 handled = 0;
3499         }
3500 out:
3501         return IRQ_RETVAL(handled);
3502 }
3503
3504 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3505 {
3506         struct net_device *dev = dev_id;
3507         struct tg3 *tp = netdev_priv(dev);
3508         struct tg3_hw_status *sblk = tp->hw_status;
3509         unsigned int handled = 1;
3510
3511         /* In INTx mode, it is possible for the interrupt to arrive at
3512          * the CPU before the status block posted prior to the interrupt.
3513          * Reading the PCI State register will confirm whether the
3514          * interrupt is ours and will flush the status block.
3515          */
3516         if ((sblk->status_tag != tp->last_tag) ||
3517             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3518                 /*
3519                  * writing any value to intr-mbox-0 clears PCI INTA# and
3520                  * chip-internal interrupt pending events.
3521                  * writing non-zero to intr-mbox-0 additional tells the
3522                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3523                  * event coalescing.
3524                  */
3525                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3526                              0x00000001);
3527                 if (tg3_irq_sync(tp))
3528                         goto out;
3529                 if (netif_rx_schedule_prep(dev)) {
3530                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3531                         /* Update last_tag to mark that this status has been
3532                          * seen. Because interrupt may be shared, we may be
3533                          * racing with tg3_poll(), so only update last_tag
3534                          * if tg3_poll() is not scheduled.
3535                          */
3536                         tp->last_tag = sblk->status_tag;
3537                         __netif_rx_schedule(dev);
3538                 }
3539         } else {        /* shared interrupt */
3540                 handled = 0;
3541         }
3542 out:
3543         return IRQ_RETVAL(handled);
3544 }
3545
3546 /* ISR for interrupt test */
3547 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3548                 struct pt_regs *regs)
3549 {
3550         struct net_device *dev = dev_id;
3551         struct tg3 *tp = netdev_priv(dev);
3552         struct tg3_hw_status *sblk = tp->hw_status;
3553
3554         if ((sblk->status & SD_STATUS_UPDATED) ||
3555             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3556                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3557                              0x00000001);
3558                 return IRQ_RETVAL(1);
3559         }
3560         return IRQ_RETVAL(0);
3561 }
3562
3563 static int tg3_init_hw(struct tg3 *, int);
3564 static int tg3_halt(struct tg3 *, int, int);
3565
3566 /* Restart hardware after configuration changes, self-test, etc.
3567  * Invoked with tp->lock held.
3568  */
3569 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3570 {
3571         int err;
3572
3573         err = tg3_init_hw(tp, reset_phy);
3574         if (err) {
3575                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3576                        "aborting.\n", tp->dev->name);
3577                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3578                 tg3_full_unlock(tp);
3579                 del_timer_sync(&tp->timer);
3580                 tp->irq_sync = 0;
3581                 netif_poll_enable(tp->dev);
3582                 dev_close(tp->dev);
3583                 tg3_full_lock(tp, 0);
3584         }
3585         return err;
3586 }
3587
3588 #ifdef CONFIG_NET_POLL_CONTROLLER
3589 static void tg3_poll_controller(struct net_device *dev)
3590 {
3591         struct tg3 *tp = netdev_priv(dev);
3592
3593         tg3_interrupt(tp->pdev->irq, dev, NULL);
3594 }
3595 #endif
3596
3597 static void tg3_reset_task(void *_data)
3598 {
3599         struct tg3 *tp = _data;
3600         unsigned int restart_timer;
3601
3602         tg3_full_lock(tp, 0);
3603         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3604
3605         if (!netif_running(tp->dev)) {
3606                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3607                 tg3_full_unlock(tp);
3608                 return;
3609         }
3610
3611         tg3_full_unlock(tp);
3612
3613         tg3_netif_stop(tp);
3614
3615         tg3_full_lock(tp, 1);
3616
3617         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3618         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3619
3620         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3621                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3622                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3623                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3624                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3625         }
3626
3627         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3628         if (tg3_init_hw(tp, 1))
3629                 goto out;
3630
3631         tg3_netif_start(tp);
3632
3633         if (restart_timer)
3634                 mod_timer(&tp->timer, jiffies + 1);
3635
3636 out:
3637         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3638
3639         tg3_full_unlock(tp);
3640 }
3641
3642 static void tg3_tx_timeout(struct net_device *dev)
3643 {
3644         struct tg3 *tp = netdev_priv(dev);
3645
3646         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3647                dev->name);
3648
3649         schedule_work(&tp->reset_task);
3650 }
3651
3652 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3653 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3654 {
3655         u32 base = (u32) mapping & 0xffffffff;
3656
3657         return ((base > 0xffffdcc0) &&
3658                 (base + len + 8 < base));
3659 }
3660
3661 /* Test for DMA addresses > 40-bit */
3662 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3663                                           int len)
3664 {
3665 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3666         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3667                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3668         return 0;
3669 #else
3670         return 0;
3671 #endif
3672 }
3673
3674 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3675
3676 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3677 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3678                                        u32 last_plus_one, u32 *start,
3679                                        u32 base_flags, u32 mss)
3680 {
3681         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3682         dma_addr_t new_addr = 0;
3683         u32 entry = *start;
3684         int i, ret = 0;
3685
3686         if (!new_skb) {
3687                 ret = -1;
3688         } else {
3689                 /* New SKB is guaranteed to be linear. */
3690                 entry = *start;
3691                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3692                                           PCI_DMA_TODEVICE);
3693                 /* Make sure new skb does not cross any 4G boundaries.
3694                  * Drop the packet if it does.
3695                  */
3696                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3697                         ret = -1;
3698                         dev_kfree_skb(new_skb);
3699                         new_skb = NULL;
3700                 } else {
3701                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3702                                     base_flags, 1 | (mss << 1));
3703                         *start = NEXT_TX(entry);
3704                 }
3705         }
3706
3707         /* Now clean up the sw ring entries. */
3708         i = 0;
3709         while (entry != last_plus_one) {
3710                 int len;
3711
3712                 if (i == 0)
3713                         len = skb_headlen(skb);
3714                 else
3715                         len = skb_shinfo(skb)->frags[i-1].size;
3716                 pci_unmap_single(tp->pdev,
3717                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3718                                  len, PCI_DMA_TODEVICE);
3719                 if (i == 0) {
3720                         tp->tx_buffers[entry].skb = new_skb;
3721                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3722                 } else {
3723                         tp->tx_buffers[entry].skb = NULL;
3724                 }
3725                 entry = NEXT_TX(entry);
3726                 i++;
3727         }
3728
3729         dev_kfree_skb(skb);
3730
3731         return ret;
3732 }
3733
3734 static void tg3_set_txd(struct tg3 *tp, int entry,
3735                         dma_addr_t mapping, int len, u32 flags,
3736                         u32 mss_and_is_end)
3737 {
3738         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3739         int is_end = (mss_and_is_end & 0x1);
3740         u32 mss = (mss_and_is_end >> 1);
3741         u32 vlan_tag = 0;
3742
3743         if (is_end)
3744                 flags |= TXD_FLAG_END;
3745         if (flags & TXD_FLAG_VLAN) {
3746                 vlan_tag = flags >> 16;
3747                 flags &= 0xffff;
3748         }
3749         vlan_tag |= (mss << TXD_MSS_SHIFT);
3750
3751         txd->addr_hi = ((u64) mapping >> 32);
3752         txd->addr_lo = ((u64) mapping & 0xffffffff);
3753         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3754         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3755 }
3756
3757 /* hard_start_xmit for devices that don't have any bugs and
3758  * support TG3_FLG2_HW_TSO_2 only.
3759  */
3760 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3761 {
3762         struct tg3 *tp = netdev_priv(dev);
3763         dma_addr_t mapping;
3764         u32 len, entry, base_flags, mss;
3765
3766         len = skb_headlen(skb);
3767
3768         /* We are running in BH disabled context with netif_tx_lock
3769          * and TX reclaim runs via tp->poll inside of a software
3770          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3771          * no IRQ context deadlocks to worry about either.  Rejoice!
3772          */
3773         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3774                 if (!netif_queue_stopped(dev)) {
3775                         netif_stop_queue(dev);
3776
3777                         /* This is a hard error, log it. */
3778                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3779                                "queue awake!\n", dev->name);
3780                 }
3781                 return NETDEV_TX_BUSY;
3782         }
3783
3784         entry = tp->tx_prod;
3785         base_flags = 0;
3786 #if TG3_TSO_SUPPORT != 0
3787         mss = 0;
3788         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3789             (mss = skb_shinfo(skb)->gso_size) != 0) {
3790                 int tcp_opt_len, ip_tcp_len;
3791
3792                 if (skb_header_cloned(skb) &&
3793                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3794                         dev_kfree_skb(skb);
3795                         goto out_unlock;
3796                 }
3797
3798                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3799                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3800                 else {
3801                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3802                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3803                                      sizeof(struct tcphdr);
3804
3805                         skb->nh.iph->check = 0;
3806                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3807                                                      tcp_opt_len);
3808                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3809                 }
3810
3811                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3812                                TXD_FLAG_CPU_POST_DMA);
3813
3814                 skb->h.th->check = 0;
3815
3816         }
3817         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3818                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3819 #else
3820         mss = 0;
3821         if (skb->ip_summed == CHECKSUM_PARTIAL)
3822                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3823 #endif
3824 #if TG3_VLAN_TAG_USED
3825         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3826                 base_flags |= (TXD_FLAG_VLAN |
3827                                (vlan_tx_tag_get(skb) << 16));
3828 #endif
3829
3830         /* Queue skb data, a.k.a. the main skb fragment. */
3831         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3832
3833         tp->tx_buffers[entry].skb = skb;
3834         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3835
3836         tg3_set_txd(tp, entry, mapping, len, base_flags,
3837                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3838
3839         entry = NEXT_TX(entry);
3840
3841         /* Now loop through additional data fragments, and queue them. */
3842         if (skb_shinfo(skb)->nr_frags > 0) {
3843                 unsigned int i, last;
3844
3845                 last = skb_shinfo(skb)->nr_frags - 1;
3846                 for (i = 0; i <= last; i++) {
3847                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3848
3849                         len = frag->size;
3850                         mapping = pci_map_page(tp->pdev,
3851                                                frag->page,
3852                                                frag->page_offset,
3853                                                len, PCI_DMA_TODEVICE);
3854
3855                         tp->tx_buffers[entry].skb = NULL;
3856                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3857
3858                         tg3_set_txd(tp, entry, mapping, len,
3859                                     base_flags, (i == last) | (mss << 1));
3860
3861                         entry = NEXT_TX(entry);
3862                 }
3863         }
3864
3865         /* Packets are ready, update Tx producer idx local and on card. */
3866         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3867
3868         tp->tx_prod = entry;
3869         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3870                 netif_stop_queue(dev);
3871                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3872                         netif_wake_queue(tp->dev);
3873         }
3874
3875 out_unlock:
3876         mmiowb();
3877
3878         dev->trans_start = jiffies;
3879
3880         return NETDEV_TX_OK;
3881 }
3882
3883 #if TG3_TSO_SUPPORT != 0
3884 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3885
3886 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3887  * TSO header is greater than 80 bytes.
3888  */
3889 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3890 {
3891         struct sk_buff *segs, *nskb;
3892
3893         /* Estimate the number of fragments in the worst case */
3894         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3895                 netif_stop_queue(tp->dev);
3896                 return NETDEV_TX_BUSY;
3897         }
3898
3899         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3900         if (unlikely(IS_ERR(segs)))
3901                 goto tg3_tso_bug_end;
3902
3903         do {
3904                 nskb = segs;
3905                 segs = segs->next;
3906                 nskb->next = NULL;
3907                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3908         } while (segs);
3909
3910 tg3_tso_bug_end:
3911         dev_kfree_skb(skb);
3912
3913         return NETDEV_TX_OK;
3914 }
3915 #endif
3916
3917 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3918  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3919  */
3920 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3921 {
3922         struct tg3 *tp = netdev_priv(dev);
3923         dma_addr_t mapping;
3924         u32 len, entry, base_flags, mss;
3925         int would_hit_hwbug;
3926
3927         len = skb_headlen(skb);
3928
3929         /* We are running in BH disabled context with netif_tx_lock
3930          * and TX reclaim runs via tp->poll inside of a software
3931          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3932          * no IRQ context deadlocks to worry about either.  Rejoice!
3933          */
3934         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3935                 if (!netif_queue_stopped(dev)) {
3936                         netif_stop_queue(dev);
3937
3938                         /* This is a hard error, log it. */
3939                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3940                                "queue awake!\n", dev->name);
3941                 }
3942                 return NETDEV_TX_BUSY;
3943         }
3944
3945         entry = tp->tx_prod;
3946         base_flags = 0;
3947         if (skb->ip_summed == CHECKSUM_PARTIAL)
3948                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3949 #if TG3_TSO_SUPPORT != 0
3950         mss = 0;
3951         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3952             (mss = skb_shinfo(skb)->gso_size) != 0) {
3953                 int tcp_opt_len, ip_tcp_len, hdr_len;
3954
3955                 if (skb_header_cloned(skb) &&
3956                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3957                         dev_kfree_skb(skb);
3958                         goto out_unlock;
3959                 }
3960
3961                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3962                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3963
3964                 hdr_len = ip_tcp_len + tcp_opt_len;
3965                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3966                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3967                         return (tg3_tso_bug(tp, skb));
3968
3969                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3970                                TXD_FLAG_CPU_POST_DMA);
3971
3972                 skb->nh.iph->check = 0;
3973                 skb->nh.iph->tot_len = htons(mss + hdr_len);
3974                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3975                         skb->h.th->check = 0;
3976                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3977                 }
3978                 else {
3979                         skb->h.th->check =
3980                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3981                                                    skb->nh.iph->daddr,
3982                                                    0, IPPROTO_TCP, 0);
3983                 }
3984
3985                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3986                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3987                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3988                                 int tsflags;
3989
3990                                 tsflags = ((skb->nh.iph->ihl - 5) +
3991                                            (tcp_opt_len >> 2));
3992                                 mss |= (tsflags << 11);
3993                         }
3994                 } else {
3995                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3996                                 int tsflags;
3997
3998                                 tsflags = ((skb->nh.iph->ihl - 5) +
3999                                            (tcp_opt_len >> 2));
4000                                 base_flags |= tsflags << 12;
4001                         }
4002                 }
4003         }
4004 #else
4005         mss = 0;
4006 #endif
4007 #if TG3_VLAN_TAG_USED
4008         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4009                 base_flags |= (TXD_FLAG_VLAN |
4010                                (vlan_tx_tag_get(skb) << 16));
4011 #endif
4012
4013         /* Queue skb data, a.k.a. the main skb fragment. */
4014         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4015
4016         tp->tx_buffers[entry].skb = skb;
4017         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4018
4019         would_hit_hwbug = 0;
4020
4021         if (tg3_4g_overflow_test(mapping, len))
4022                 would_hit_hwbug = 1;
4023
4024         tg3_set_txd(tp, entry, mapping, len, base_flags,
4025                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4026
4027         entry = NEXT_TX(entry);
4028
4029         /* Now loop through additional data fragments, and queue them. */
4030         if (skb_shinfo(skb)->nr_frags > 0) {
4031                 unsigned int i, last;
4032
4033                 last = skb_shinfo(skb)->nr_frags - 1;
4034                 for (i = 0; i <= last; i++) {
4035                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4036
4037                         len = frag->size;
4038                         mapping = pci_map_page(tp->pdev,
4039                                                frag->page,
4040                                                frag->page_offset,
4041                                                len, PCI_DMA_TODEVICE);
4042
4043                         tp->tx_buffers[entry].skb = NULL;
4044                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4045
4046                         if (tg3_4g_overflow_test(mapping, len))
4047                                 would_hit_hwbug = 1;
4048
4049                         if (tg3_40bit_overflow_test(tp, mapping, len))
4050                                 would_hit_hwbug = 1;
4051
4052                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4053                                 tg3_set_txd(tp, entry, mapping, len,
4054                                             base_flags, (i == last)|(mss << 1));
4055                         else
4056                                 tg3_set_txd(tp, entry, mapping, len,
4057                                             base_flags, (i == last));
4058
4059                         entry = NEXT_TX(entry);
4060                 }
4061         }
4062
4063         if (would_hit_hwbug) {
4064                 u32 last_plus_one = entry;
4065                 u32 start;
4066
4067                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4068                 start &= (TG3_TX_RING_SIZE - 1);
4069
4070                 /* If the workaround fails due to memory/mapping
4071                  * failure, silently drop this packet.
4072                  */
4073                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4074                                                 &start, base_flags, mss))
4075                         goto out_unlock;
4076
4077                 entry = start;
4078         }
4079
4080         /* Packets are ready, update Tx producer idx local and on card. */
4081         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4082
4083         tp->tx_prod = entry;
4084         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4085                 netif_stop_queue(dev);
4086                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4087                         netif_wake_queue(tp->dev);
4088         }
4089
4090 out_unlock:
4091         mmiowb();
4092
4093         dev->trans_start = jiffies;
4094
4095         return NETDEV_TX_OK;
4096 }
4097
4098 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4099                                int new_mtu)
4100 {
4101         dev->mtu = new_mtu;
4102
4103         if (new_mtu > ETH_DATA_LEN) {
4104                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4105                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4106                         ethtool_op_set_tso(dev, 0);
4107                 }
4108                 else
4109                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4110         } else {
4111                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4112                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4113                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4114         }
4115 }
4116
4117 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4118 {
4119         struct tg3 *tp = netdev_priv(dev);
4120         int err;
4121
4122         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4123                 return -EINVAL;
4124
4125         if (!netif_running(dev)) {
4126                 /* We'll just catch it later when the
4127                  * device is up'd.
4128                  */
4129                 tg3_set_mtu(dev, tp, new_mtu);
4130                 return 0;
4131         }
4132
4133         tg3_netif_stop(tp);
4134
4135         tg3_full_lock(tp, 1);
4136
4137         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4138
4139         tg3_set_mtu(dev, tp, new_mtu);
4140
4141         err = tg3_restart_hw(tp, 0);
4142
4143         if (!err)
4144                 tg3_netif_start(tp);
4145
4146         tg3_full_unlock(tp);
4147
4148         return err;
4149 }
4150
4151 /* Free up pending packets in all rx/tx rings.
4152  *
4153  * The chip has been shut down and the driver detached from
4154  * the networking, so no interrupts or new tx packets will
4155  * end up in the driver.  tp->{tx,}lock is not held and we are not
4156  * in an interrupt context and thus may sleep.
4157  */
4158 static void tg3_free_rings(struct tg3 *tp)
4159 {
4160         struct ring_info *rxp;
4161         int i;
4162
4163         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4164                 rxp = &tp->rx_std_buffers[i];
4165
4166                 if (rxp->skb == NULL)
4167                         continue;
4168                 pci_unmap_single(tp->pdev,
4169                                  pci_unmap_addr(rxp, mapping),
4170                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4171                                  PCI_DMA_FROMDEVICE);
4172                 dev_kfree_skb_any(rxp->skb);
4173                 rxp->skb = NULL;
4174         }
4175
4176         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4177                 rxp = &tp->rx_jumbo_buffers[i];
4178
4179                 if (rxp->skb == NULL)
4180                         continue;
4181                 pci_unmap_single(tp->pdev,
4182                                  pci_unmap_addr(rxp, mapping),
4183                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4184                                  PCI_DMA_FROMDEVICE);
4185                 dev_kfree_skb_any(rxp->skb);
4186                 rxp->skb = NULL;
4187         }
4188
4189         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4190                 struct tx_ring_info *txp;
4191                 struct sk_buff *skb;
4192                 int j;
4193
4194                 txp = &tp->tx_buffers[i];
4195                 skb = txp->skb;
4196
4197                 if (skb == NULL) {
4198                         i++;
4199                         continue;
4200                 }
4201
4202                 pci_unmap_single(tp->pdev,
4203                                  pci_unmap_addr(txp, mapping),
4204                                  skb_headlen(skb),
4205                                  PCI_DMA_TODEVICE);
4206                 txp->skb = NULL;
4207
4208                 i++;
4209
4210                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4211                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4212                         pci_unmap_page(tp->pdev,
4213                                        pci_unmap_addr(txp, mapping),
4214                                        skb_shinfo(skb)->frags[j].size,
4215                                        PCI_DMA_TODEVICE);
4216                         i++;
4217                 }
4218
4219                 dev_kfree_skb_any(skb);
4220         }
4221 }
4222
4223 /* Initialize tx/rx rings for packet processing.
4224  *
4225  * The chip has been shut down and the driver detached from
4226  * the networking, so no interrupts or new tx packets will
4227  * end up in the driver.  tp->{tx,}lock are held and thus
4228  * we may not sleep.
4229  */
4230 static int tg3_init_rings(struct tg3 *tp)
4231 {
4232         u32 i;
4233
4234         /* Free up all the SKBs. */
4235         tg3_free_rings(tp);
4236
4237         /* Zero out all descriptors. */
4238         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4239         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4240         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4241         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4242
4243         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4244         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4245             (tp->dev->mtu > ETH_DATA_LEN))
4246                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4247
4248         /* Initialize invariants of the rings, we only set this
4249          * stuff once.  This works because the card does not
4250          * write into the rx buffer posting rings.
4251          */
4252         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4253                 struct tg3_rx_buffer_desc *rxd;
4254
4255                 rxd = &tp->rx_std[i];
4256                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4257                         << RXD_LEN_SHIFT;
4258                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4259                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4260                                (i << RXD_OPAQUE_INDEX_SHIFT));
4261         }
4262
4263         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4264                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4265                         struct tg3_rx_buffer_desc *rxd;
4266
4267                         rxd = &tp->rx_jumbo[i];
4268                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4269                                 << RXD_LEN_SHIFT;
4270                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4271                                 RXD_FLAG_JUMBO;
4272                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4273                                (i << RXD_OPAQUE_INDEX_SHIFT));
4274                 }
4275         }
4276
4277         /* Now allocate fresh SKBs for each rx ring. */
4278         for (i = 0; i < tp->rx_pending; i++) {
4279                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4280                         printk(KERN_WARNING PFX
4281                                "%s: Using a smaller RX standard ring, "
4282                                "only %d out of %d buffers were allocated "
4283                                "successfully.\n",
4284                                tp->dev->name, i, tp->rx_pending);
4285                         if (i == 0)
4286                                 return -ENOMEM;
4287                         tp->rx_pending = i;
4288                         break;
4289                 }
4290         }
4291
4292         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4293                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4294                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4295                                              -1, i) < 0) {
4296                                 printk(KERN_WARNING PFX
4297                                        "%s: Using a smaller RX jumbo ring, "
4298                                        "only %d out of %d buffers were "
4299                                        "allocated successfully.\n",
4300                                        tp->dev->name, i, tp->rx_jumbo_pending);
4301                                 if (i == 0) {
4302                                         tg3_free_rings(tp);
4303                                         return -ENOMEM;
4304                                 }
4305                                 tp->rx_jumbo_pending = i;
4306                                 break;
4307                         }
4308                 }
4309         }
4310         return 0;
4311 }
4312
4313 /*
4314  * Must not be invoked with interrupt sources disabled and
4315  * the hardware shutdown down.
4316  */
4317 static void tg3_free_consistent(struct tg3 *tp)
4318 {
4319         kfree(tp->rx_std_buffers);
4320         tp->rx_std_buffers = NULL;
4321         if (tp->rx_std) {
4322                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4323                                     tp->rx_std, tp->rx_std_mapping);
4324                 tp->rx_std = NULL;
4325         }
4326         if (tp->rx_jumbo) {
4327                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4328                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4329                 tp->rx_jumbo = NULL;
4330         }
4331         if (tp->rx_rcb) {
4332                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4333                                     tp->rx_rcb, tp->rx_rcb_mapping);
4334                 tp->rx_rcb = NULL;
4335         }
4336         if (tp->tx_ring) {
4337                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4338                         tp->tx_ring, tp->tx_desc_mapping);
4339                 tp->tx_ring = NULL;
4340         }
4341         if (tp->hw_status) {
4342                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4343                                     tp->hw_status, tp->status_mapping);
4344                 tp->hw_status = NULL;
4345         }
4346         if (tp->hw_stats) {
4347                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4348                                     tp->hw_stats, tp->stats_mapping);
4349                 tp->hw_stats = NULL;
4350         }
4351 }
4352
4353 /*
4354  * Must not be invoked with interrupt sources disabled and
4355  * the hardware shutdown down.  Can sleep.
4356  */
4357 static int tg3_alloc_consistent(struct tg3 *tp)
4358 {
4359         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4360                                       (TG3_RX_RING_SIZE +
4361                                        TG3_RX_JUMBO_RING_SIZE)) +
4362                                      (sizeof(struct tx_ring_info) *
4363                                       TG3_TX_RING_SIZE),
4364                                      GFP_KERNEL);
4365         if (!tp->rx_std_buffers)
4366                 return -ENOMEM;
4367
4368         memset(tp->rx_std_buffers, 0,
4369                (sizeof(struct ring_info) *
4370                 (TG3_RX_RING_SIZE +
4371                  TG3_RX_JUMBO_RING_SIZE)) +
4372                (sizeof(struct tx_ring_info) *
4373                 TG3_TX_RING_SIZE));
4374
4375         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4376         tp->tx_buffers = (struct tx_ring_info *)
4377                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4378
4379         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4380                                           &tp->rx_std_mapping);
4381         if (!tp->rx_std)
4382                 goto err_out;
4383
4384         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4385                                             &tp->rx_jumbo_mapping);
4386
4387         if (!tp->rx_jumbo)
4388                 goto err_out;
4389
4390         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4391                                           &tp->rx_rcb_mapping);
4392         if (!tp->rx_rcb)
4393                 goto err_out;
4394
4395         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4396                                            &tp->tx_desc_mapping);
4397         if (!tp->tx_ring)
4398                 goto err_out;
4399
4400         tp->hw_status = pci_alloc_consistent(tp->pdev,
4401                                              TG3_HW_STATUS_SIZE,
4402                                              &tp->status_mapping);
4403         if (!tp->hw_status)
4404                 goto err_out;
4405
4406         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4407                                             sizeof(struct tg3_hw_stats),
4408                                             &tp->stats_mapping);
4409         if (!tp->hw_stats)
4410                 goto err_out;
4411
4412         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4413         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4414
4415         return 0;
4416
4417 err_out:
4418         tg3_free_consistent(tp);
4419         return -ENOMEM;
4420 }
4421
4422 #define MAX_WAIT_CNT 1000
4423
4424 /* To stop a block, clear the enable bit and poll till it
4425  * clears.  tp->lock is held.
4426  */
4427 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4428 {
4429         unsigned int i;
4430         u32 val;
4431
4432         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4433                 switch (ofs) {
4434                 case RCVLSC_MODE:
4435                 case DMAC_MODE:
4436                 case MBFREE_MODE:
4437                 case BUFMGR_MODE:
4438                 case MEMARB_MODE:
4439                         /* We can't enable/disable these bits of the
4440                          * 5705/5750, just say success.
4441                          */
4442                         return 0;
4443
4444                 default:
4445                         break;
4446                 };
4447         }
4448
4449         val = tr32(ofs);
4450         val &= ~enable_bit;
4451         tw32_f(ofs, val);
4452
4453         for (i = 0; i < MAX_WAIT_CNT; i++) {
4454                 udelay(100);
4455                 val = tr32(ofs);
4456                 if ((val & enable_bit) == 0)
4457                         break;
4458         }
4459
4460         if (i == MAX_WAIT_CNT && !silent) {
4461                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4462                        "ofs=%lx enable_bit=%x\n",
4463                        ofs, enable_bit);
4464                 return -ENODEV;
4465         }
4466
4467         return 0;
4468 }
4469
4470 /* tp->lock is held. */
4471 static int tg3_abort_hw(struct tg3 *tp, int silent)
4472 {
4473         int i, err;
4474
4475         tg3_disable_ints(tp);
4476
4477         tp->rx_mode &= ~RX_MODE_ENABLE;
4478         tw32_f(MAC_RX_MODE, tp->rx_mode);
4479         udelay(10);
4480
4481         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4482         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4483         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4484         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4485         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4486         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4487
4488         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4489         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4490         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4491         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4492         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4493         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4494         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4495
4496         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4497         tw32_f(MAC_MODE, tp->mac_mode);
4498         udelay(40);
4499
4500         tp->tx_mode &= ~TX_MODE_ENABLE;
4501         tw32_f(MAC_TX_MODE, tp->tx_mode);
4502
4503         for (i = 0; i < MAX_WAIT_CNT; i++) {
4504                 udelay(100);
4505                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4506                         break;
4507         }
4508         if (i >= MAX_WAIT_CNT) {
4509                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4510                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4511                        tp->dev->name, tr32(MAC_TX_MODE));
4512                 err |= -ENODEV;
4513         }
4514
4515         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4516         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4517         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4518
4519         tw32(FTQ_RESET, 0xffffffff);
4520         tw32(FTQ_RESET, 0x00000000);
4521
4522         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4523         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4524
4525         if (tp->hw_status)
4526                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4527         if (tp->hw_stats)
4528                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4529
4530         return err;
4531 }
4532
4533 /* tp->lock is held. */
4534 static int tg3_nvram_lock(struct tg3 *tp)
4535 {
4536         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4537                 int i;
4538
4539                 if (tp->nvram_lock_cnt == 0) {
4540                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4541                         for (i = 0; i < 8000; i++) {
4542                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4543                                         break;
4544                                 udelay(20);
4545                         }
4546                         if (i == 8000) {
4547                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4548                                 return -ENODEV;
4549                         }
4550                 }
4551                 tp->nvram_lock_cnt++;
4552         }
4553         return 0;
4554 }
4555
4556 /* tp->lock is held. */
4557 static void tg3_nvram_unlock(struct tg3 *tp)
4558 {
4559         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4560                 if (tp->nvram_lock_cnt > 0)
4561                         tp->nvram_lock_cnt--;
4562                 if (tp->nvram_lock_cnt == 0)
4563                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4564         }
4565 }
4566
4567 /* tp->lock is held. */
4568 static void tg3_enable_nvram_access(struct tg3 *tp)
4569 {
4570         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4571             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4572                 u32 nvaccess = tr32(NVRAM_ACCESS);
4573
4574                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4575         }
4576 }
4577
4578 /* tp->lock is held. */
4579 static void tg3_disable_nvram_access(struct tg3 *tp)
4580 {
4581         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4582             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4583                 u32 nvaccess = tr32(NVRAM_ACCESS);
4584
4585                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4586         }
4587 }
4588
4589 /* tp->lock is held. */
4590 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4591 {
4592         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4593                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4594
4595         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4596                 switch (kind) {
4597                 case RESET_KIND_INIT:
4598                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4599                                       DRV_STATE_START);
4600                         break;
4601
4602                 case RESET_KIND_SHUTDOWN:
4603                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4604                                       DRV_STATE_UNLOAD);
4605                         break;
4606
4607                 case RESET_KIND_SUSPEND:
4608                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4609                                       DRV_STATE_SUSPEND);
4610                         break;
4611
4612                 default:
4613                         break;
4614                 };
4615         }
4616 }
4617
4618 /* tp->lock is held. */
4619 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4620 {
4621         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4622                 switch (kind) {
4623                 case RESET_KIND_INIT:
4624                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4625                                       DRV_STATE_START_DONE);
4626                         break;
4627
4628                 case RESET_KIND_SHUTDOWN:
4629                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4630                                       DRV_STATE_UNLOAD_DONE);
4631                         break;
4632
4633                 default:
4634                         break;
4635                 };
4636         }
4637 }
4638
4639 /* tp->lock is held. */
4640 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4641 {
4642         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4643                 switch (kind) {
4644                 case RESET_KIND_INIT:
4645                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4646                                       DRV_STATE_START);
4647                         break;
4648
4649                 case RESET_KIND_SHUTDOWN:
4650                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4651                                       DRV_STATE_UNLOAD);
4652                         break;
4653
4654                 case RESET_KIND_SUSPEND:
4655                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4656                                       DRV_STATE_SUSPEND);
4657                         break;
4658
4659                 default:
4660                         break;
4661                 };
4662         }
4663 }
4664
4665 static void tg3_stop_fw(struct tg3 *);
4666
4667 /* tp->lock is held. */
4668 static int tg3_chip_reset(struct tg3 *tp)
4669 {
4670         u32 val;
4671         void (*write_op)(struct tg3 *, u32, u32);
4672         int i;
4673
4674         tg3_nvram_lock(tp);
4675
4676         /* No matching tg3_nvram_unlock() after this because
4677          * chip reset below will undo the nvram lock.
4678          */
4679         tp->nvram_lock_cnt = 0;
4680
4681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4682             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4683             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4684                 tw32(GRC_FASTBOOT_PC, 0);
4685
4686         /*
4687          * We must avoid the readl() that normally takes place.
4688          * It locks machines, causes machine checks, and other
4689          * fun things.  So, temporarily disable the 5701
4690          * hardware workaround, while we do the reset.
4691          */
4692         write_op = tp->write32;
4693         if (write_op == tg3_write_flush_reg32)
4694                 tp->write32 = tg3_write32;
4695
4696         /* do the reset */
4697         val = GRC_MISC_CFG_CORECLK_RESET;
4698
4699         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4700                 if (tr32(0x7e2c) == 0x60) {
4701                         tw32(0x7e2c, 0x20);
4702                 }
4703                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4704                         tw32(GRC_MISC_CFG, (1 << 29));
4705                         val |= (1 << 29);
4706                 }
4707         }
4708
4709         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4710                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4711         tw32(GRC_MISC_CFG, val);
4712
4713         /* restore 5701 hardware bug workaround write method */
4714         tp->write32 = write_op;
4715
4716         /* Unfortunately, we have to delay before the PCI read back.
4717          * Some 575X chips even will not respond to a PCI cfg access
4718          * when the reset command is given to the chip.
4719          *
4720          * How do these hardware designers expect things to work
4721          * properly if the PCI write is posted for a long period
4722          * of time?  It is always necessary to have some method by
4723          * which a register read back can occur to push the write
4724          * out which does the reset.
4725          *
4726          * For most tg3 variants the trick below was working.
4727          * Ho hum...
4728          */
4729         udelay(120);
4730
4731         /* Flush PCI posted writes.  The normal MMIO registers
4732          * are inaccessible at this time so this is the only
4733          * way to make this reliably (actually, this is no longer
4734          * the case, see above).  I tried to use indirect
4735          * register read/write but this upset some 5701 variants.
4736          */
4737         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4738
4739         udelay(120);
4740
4741         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4742                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4743                         int i;
4744                         u32 cfg_val;
4745
4746                         /* Wait for link training to complete.  */
4747                         for (i = 0; i < 5000; i++)
4748                                 udelay(100);
4749
4750                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4751                         pci_write_config_dword(tp->pdev, 0xc4,
4752                                                cfg_val | (1 << 15));
4753                 }
4754                 /* Set PCIE max payload size and clear error status.  */
4755                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4756         }
4757
4758         /* Re-enable indirect register accesses. */
4759         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4760                                tp->misc_host_ctrl);
4761
4762         /* Set MAX PCI retry to zero. */
4763         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4764         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4765             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4766                 val |= PCISTATE_RETRY_SAME_DMA;
4767         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4768
4769         pci_restore_state(tp->pdev);
4770
4771         /* Make sure PCI-X relaxed ordering bit is clear. */
4772         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4773         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4774         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4775
4776         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4777                 u32 val;
4778
4779                 /* Chip reset on 5780 will reset MSI enable bit,
4780                  * so need to restore it.
4781                  */
4782                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4783                         u16 ctrl;
4784
4785                         pci_read_config_word(tp->pdev,
4786                                              tp->msi_cap + PCI_MSI_FLAGS,
4787                                              &ctrl);
4788                         pci_write_config_word(tp->pdev,
4789                                               tp->msi_cap + PCI_MSI_FLAGS,
4790                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4791                         val = tr32(MSGINT_MODE);
4792                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4793                 }
4794
4795                 val = tr32(MEMARB_MODE);
4796                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4797
4798         } else
4799                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4800
4801         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4802                 tg3_stop_fw(tp);
4803                 tw32(0x5000, 0x400);
4804         }
4805
4806         tw32(GRC_MODE, tp->grc_mode);
4807
4808         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4809                 u32 val = tr32(0xc4);
4810
4811                 tw32(0xc4, val | (1 << 15));
4812         }
4813
4814         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4815             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4816                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4817                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4818                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4819                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4820         }
4821
4822         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4823                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4824                 tw32_f(MAC_MODE, tp->mac_mode);
4825         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4826                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4827                 tw32_f(MAC_MODE, tp->mac_mode);
4828         } else
4829                 tw32_f(MAC_MODE, 0);
4830         udelay(40);
4831
4832         /* Wait for firmware initialization to complete. */
4833         for (i = 0; i < 100000; i++) {
4834                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4835                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4836                         break;
4837                 udelay(10);
4838         }
4839
4840         /* Chip might not be fitted with firmare.  Some Sun onboard
4841          * parts are configured like that.  So don't signal the timeout
4842          * of the above loop as an error, but do report the lack of
4843          * running firmware once.
4844          */
4845         if (i >= 100000 &&
4846             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4847                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4848
4849                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4850                        tp->dev->name);
4851         }
4852
4853         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4854             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4855                 u32 val = tr32(0x7c00);
4856
4857                 tw32(0x7c00, val | (1 << 25));
4858         }
4859
4860         /* Reprobe ASF enable state.  */
4861         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4862         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4863         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4864         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4865                 u32 nic_cfg;
4866
4867                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4868                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4869                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4870                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4871                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4872                 }
4873         }
4874
4875         return 0;
4876 }
4877
4878 /* tp->lock is held. */
4879 static void tg3_stop_fw(struct tg3 *tp)
4880 {
4881         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4882                 u32 val;
4883                 int i;
4884
4885                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4886                 val = tr32(GRC_RX_CPU_EVENT);
4887                 val |= (1 << 14);
4888                 tw32(GRC_RX_CPU_EVENT, val);
4889
4890                 /* Wait for RX cpu to ACK the event.  */
4891                 for (i = 0; i < 100; i++) {
4892                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4893                                 break;
4894                         udelay(1);
4895                 }
4896         }
4897 }
4898
4899 /* tp->lock is held. */
4900 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4901 {
4902         int err;
4903
4904         tg3_stop_fw(tp);
4905
4906         tg3_write_sig_pre_reset(tp, kind);
4907
4908         tg3_abort_hw(tp, silent);
4909         err = tg3_chip_reset(tp);
4910
4911         tg3_write_sig_legacy(tp, kind);
4912         tg3_write_sig_post_reset(tp, kind);
4913
4914         if (err)
4915                 return err;
4916
4917         return 0;
4918 }
4919
4920 #define TG3_FW_RELEASE_MAJOR    0x0
4921 #define TG3_FW_RELASE_MINOR     0x0
4922 #define TG3_FW_RELEASE_FIX      0x0
4923 #define TG3_FW_START_ADDR       0x08000000
4924 #define TG3_FW_TEXT_ADDR        0x08000000
4925 #define TG3_FW_TEXT_LEN         0x9c0
4926 #define TG3_FW_RODATA_ADDR      0x080009c0
4927 #define TG3_FW_RODATA_LEN       0x60
4928 #define TG3_FW_DATA_ADDR        0x08000a40
4929 #define TG3_FW_DATA_LEN         0x20
4930 #define TG3_FW_SBSS_ADDR        0x08000a60
4931 #define TG3_FW_SBSS_LEN         0xc
4932 #define TG3_FW_BSS_ADDR         0x08000a70
4933 #define TG3_FW_BSS_LEN          0x10
4934
4935 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4936         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4937         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4938         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4939         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4940         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4941         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4942         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4943         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4944         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4945         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4946         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4947         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4948         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4949         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4950         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4951         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4952         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4953         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4954         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4955         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4956         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4957         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4958         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4959         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4960         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4961         0, 0, 0, 0, 0, 0,
4962         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4963         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4964         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4965         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4966         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4967         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4968         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4969         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4970         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4971         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4972         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4973         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4974         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4975         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4976         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4977         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4978         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4979         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4980         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4981         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4982         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4983         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4984         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4985         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4986         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4987         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4988         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4989         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4990         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4991         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4992         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4993         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4994         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4995         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4996         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4997         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4998         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4999         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5000         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5001         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5002         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5003         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5004         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5005         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5006         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5007         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5008         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5009         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5010         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5011         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5012         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5013         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5014         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5015         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5016         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5017         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5018         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5019         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5020         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5021         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5022         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5023         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5024         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5025         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5026         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5027 };
5028
5029 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5030         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5031         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5032         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5033         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5034         0x00000000
5035 };
5036
5037 #if 0 /* All zeros, don't eat up space with it. */
5038 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5039         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5040         0x00000000, 0x00000000, 0x00000000, 0x00000000
5041 };
5042 #endif
5043
5044 #define RX_CPU_SCRATCH_BASE     0x30000
5045 #define RX_CPU_SCRATCH_SIZE     0x04000
5046 #define TX_CPU_SCRATCH_BASE     0x34000
5047 #define TX_CPU_SCRATCH_SIZE     0x04000
5048
5049 /* tp->lock is held. */
5050 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5051 {
5052         int i;
5053
5054         BUG_ON(offset == TX_CPU_BASE &&
5055             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5056
5057         if (offset == RX_CPU_BASE) {
5058                 for (i = 0; i < 10000; i++) {
5059                         tw32(offset + CPU_STATE, 0xffffffff);
5060                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5061                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5062                                 break;
5063                 }
5064
5065                 tw32(offset + CPU_STATE, 0xffffffff);
5066                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5067                 udelay(10);
5068         } else {
5069                 for (i = 0; i < 10000; i++) {
5070                         tw32(offset + CPU_STATE, 0xffffffff);
5071                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5072                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5073                                 break;
5074                 }
5075         }
5076
5077         if (i >= 10000) {
5078                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5079                        "and %s CPU\n",
5080                        tp->dev->name,
5081                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5082                 return -ENODEV;
5083         }
5084
5085         /* Clear firmware's nvram arbitration. */
5086         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5087                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5088         return 0;
5089 }
5090
5091 struct fw_info {
5092         unsigned int text_base;
5093         unsigned int text_len;
5094         const u32 *text_data;
5095         unsigned int rodata_base;
5096         unsigned int rodata_len;
5097         const u32 *rodata_data;
5098         unsigned int data_base;
5099         unsigned int data_len;
5100         const u32 *data_data;
5101 };
5102
5103 /* tp->lock is held. */
5104 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5105                                  int cpu_scratch_size, struct fw_info *info)
5106 {
5107         int err, lock_err, i;
5108         void (*write_op)(struct tg3 *, u32, u32);
5109
5110         if (cpu_base == TX_CPU_BASE &&
5111             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5112                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5113                        "TX cpu firmware on %s which is 5705.\n",
5114                        tp->dev->name);
5115                 return -EINVAL;
5116         }
5117
5118         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5119                 write_op = tg3_write_mem;
5120         else
5121                 write_op = tg3_write_indirect_reg32;
5122
5123         /* It is possible that bootcode is still loading at this point.
5124          * Get the nvram lock first before halting the cpu.
5125          */
5126         lock_err = tg3_nvram_lock(tp);
5127         err = tg3_halt_cpu(tp, cpu_base);
5128         if (!lock_err)
5129                 tg3_nvram_unlock(tp);
5130         if (err)
5131                 goto out;
5132
5133         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5134                 write_op(tp, cpu_scratch_base + i, 0);
5135         tw32(cpu_base + CPU_STATE, 0xffffffff);
5136         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5137         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5138                 write_op(tp, (cpu_scratch_base +
5139                               (info->text_base & 0xffff) +
5140                               (i * sizeof(u32))),
5141                          (info->text_data ?
5142                           info->text_data[i] : 0));
5143         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5144                 write_op(tp, (cpu_scratch_base +
5145                               (info->rodata_base & 0xffff) +
5146                               (i * sizeof(u32))),
5147                          (info->rodata_data ?
5148                           info->rodata_data[i] : 0));
5149         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5150                 write_op(tp, (cpu_scratch_base +
5151                               (info->data_base & 0xffff) +
5152                               (i * sizeof(u32))),
5153                          (info->data_data ?
5154                           info->data_data[i] : 0));
5155
5156         err = 0;
5157
5158 out:
5159         return err;
5160 }
5161
5162 /* tp->lock is held. */
5163 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5164 {
5165         struct fw_info info;
5166         int err, i;
5167
5168         info.text_base = TG3_FW_TEXT_ADDR;
5169         info.text_len = TG3_FW_TEXT_LEN;
5170         info.text_data = &tg3FwText[0];
5171         info.rodata_base = TG3_FW_RODATA_ADDR;
5172         info.rodata_len = TG3_FW_RODATA_LEN;
5173         info.rodata_data = &tg3FwRodata[0];
5174         info.data_base = TG3_FW_DATA_ADDR;
5175         info.data_len = TG3_FW_DATA_LEN;
5176         info.data_data = NULL;
5177
5178         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5179                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5180                                     &info);
5181         if (err)
5182                 return err;
5183
5184         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5185                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5186                                     &info);
5187         if (err)
5188                 return err;
5189
5190         /* Now startup only the RX cpu. */
5191         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5192         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5193
5194         for (i = 0; i < 5; i++) {
5195                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5196                         break;
5197                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5198                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5199                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5200                 udelay(1000);
5201         }
5202         if (i >= 5) {
5203                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5204                        "to set RX CPU PC, is %08x should be %08x\n",
5205                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5206                        TG3_FW_TEXT_ADDR);
5207                 return -ENODEV;
5208         }
5209         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5210         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5211
5212         return 0;
5213 }
5214
5215 #if TG3_TSO_SUPPORT != 0
5216
5217 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5218 #define TG3_TSO_FW_RELASE_MINOR         0x6
5219 #define TG3_TSO_FW_RELEASE_FIX          0x0
5220 #define TG3_TSO_FW_START_ADDR           0x08000000
5221 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5222 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5223 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5224 #define TG3_TSO_FW_RODATA_LEN           0x60
5225 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5226 #define TG3_TSO_FW_DATA_LEN             0x30
5227 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5228 #define TG3_TSO_FW_SBSS_LEN             0x2c
5229 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5230 #define TG3_TSO_FW_BSS_LEN              0x894
5231
5232 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5233         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5234         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5235         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5236         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5237         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5238         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5239         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5240         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5241         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5242         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5243         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5244         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5245         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5246         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5247         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5248         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5249         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5250         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5251         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5252         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5253         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5254         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5255         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5256         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5257         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5258         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5259         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5260         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5261         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5262         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5263         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5264         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5265         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5266         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5267         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5268         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5269         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5270         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5271         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5272         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5273         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5274         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5275         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5276         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5277         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5278         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5279         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5280         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5281         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5282         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5283         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5284         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5285         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5286         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5287         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5288         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5289         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5290         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5291         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5292         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5293         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5294         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5295         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5296         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5297         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5298         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5299         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5300         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5301         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5302         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5303         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5304         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5305         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5306         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5307         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5308         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5309         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5310         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5311         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5312         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5313         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5314         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5315         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5316         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5317         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5318         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5319         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5320         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5321         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5322         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5323         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5324         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5325         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5326         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5327         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5328         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5329         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5330         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5331         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5332         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5333         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5334         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5335         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5336         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5337         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5338         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5339         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5340         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5341         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5342         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5343         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5344         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5345         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5346         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5347         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5348         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5349         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5350         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5351         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5352         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5353         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5354         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5355         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5356         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5357         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5358         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5359         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5360         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5361         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5362         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5363         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5364         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5365         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5366         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5367         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5368         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5369         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5370         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5371         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5372         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5373         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5374         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5375         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5376         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5377         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5378         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5379         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5380         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5381         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5382         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5383         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5384         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5385         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5386         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5387         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5388         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5389         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5390         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5391         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5392         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5393         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5394         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5395         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5396         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5397         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5398         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5399         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5400         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5401         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5402         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5403         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5404         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5405         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5406         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5407         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5408         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5409         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5410         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5411         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5412         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5413         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5414         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5415         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5416         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5417         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5418         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5419         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5420         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5421         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5422         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5423         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5424         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5425         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5426         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5427         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5428         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5429         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5430         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5431         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5432         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5433         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5434         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5435         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5436         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5437         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5438         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5439         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5440         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5441         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5442         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5443         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5444         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5445         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5446         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5447         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5448         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5449         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5450         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5451         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5452         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5453         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5454         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5455         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5456         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5457         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5458         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5459         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5460         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5461         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5462         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5463         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5464         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5465         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5466         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5467         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5468         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5469         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5470         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5471         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5472         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5473         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5474         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5475         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5476         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5477         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5478         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5479         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5480         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5481         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5482         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5483         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5484         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5485         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5486         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5487         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5488         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5489         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5490         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5491         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5492         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5493         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5494         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5495         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5496         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5497         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5498         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5499         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5500         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5501         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5502         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5503         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5504         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5505         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5506         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5507         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5508         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5509         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5510         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5511         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5512         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5513         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5514         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5515         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5516         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5517 };
5518
5519 static const u32 tg3TsoFwRodata[] = {
5520         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5521         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5522         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5523         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5524         0x00000000,
5525 };
5526
5527 static const u32 tg3TsoFwData[] = {
5528         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5529         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5530         0x00000000,
5531 };
5532
5533 /* 5705 needs a special version of the TSO firmware.  */
5534 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5535 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5536 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5537 #define TG3_TSO5_FW_START_ADDR          0x00010000
5538 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5539 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5540 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5541 #define TG3_TSO5_FW_RODATA_LEN          0x50
5542 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5543 #define TG3_TSO5_FW_DATA_LEN            0x20
5544 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5545 #define TG3_TSO5_FW_SBSS_LEN            0x28
5546 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5547 #define TG3_TSO5_FW_BSS_LEN             0x88
5548
5549 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5550         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5551         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5552         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5553         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5554         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5555         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5556         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5557         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5558         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5559         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5560         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5561         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5562         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5563         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5564         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5565         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5566         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5567         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5568         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5569         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5570         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5571         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5572         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5573         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5574         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5575         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5576         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5577         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5578         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5579         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5580         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5581         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5582         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5583         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5584         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5585         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5586         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5587         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5588         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5589         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5590         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5591         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5592         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5593         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5594         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5595         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5596         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5597         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5598         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5599         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5600         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5601         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5602         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5603         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5604         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5605         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5606         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5607         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5608         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5609         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5610         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5611         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5612         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5613         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5614         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5615         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5616         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5617         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5618         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5619         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5620         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5621         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5622         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5623         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5624         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5625         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5626         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5627         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5628         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5629         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5630         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5631         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5632         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5633         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5634         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5635         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5636         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5637         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5638         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5639         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5640         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5641         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5642         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5643         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5644         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5645         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5646         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5647         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5648         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5649         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5650         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5651         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5652         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5653         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5654         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5655         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5656         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5657         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5658         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5659         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5660         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5661         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5662         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5663         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5664         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5665         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5666         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5667         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5668         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5669         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5670         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5671         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5672         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5673         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5674         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5675         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5676         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5677         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5678         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5679         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5680         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5681         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5682         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5683         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5684         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5685         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5686         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5687         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5688         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5689         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5690         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5691         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5692         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5693         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5694         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5695         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5696         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5697         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5698         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5699         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5700         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5701         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5702         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5703         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5704         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5705         0x00000000, 0x00000000, 0x00000000,
5706 };
5707
5708 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5709         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5710         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5711         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5712         0x00000000, 0x00000000, 0x00000000,
5713 };
5714
5715 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5716         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5717         0x00000000, 0x00000000, 0x00000000,
5718 };
5719
5720 /* tp->lock is held. */
5721 static int tg3_load_tso_firmware(struct tg3 *tp)
5722 {
5723         struct fw_info info;
5724         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5725         int err, i;
5726
5727         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5728                 return 0;
5729
5730         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5731                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5732                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5733                 info.text_data = &tg3Tso5FwText[0];
5734                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5735                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5736                 info.rodata_data = &tg3Tso5FwRodata[0];
5737                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5738                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5739                 info.data_data = &tg3Tso5FwData[0];
5740                 cpu_base = RX_CPU_BASE;
5741                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5742                 cpu_scratch_size = (info.text_len +
5743                                     info.rodata_len +
5744                                     info.data_len +
5745                                     TG3_TSO5_FW_SBSS_LEN +
5746                                     TG3_TSO5_FW_BSS_LEN);
5747         } else {
5748                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5749                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5750                 info.text_data = &tg3TsoFwText[0];
5751                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5752                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5753                 info.rodata_data = &tg3TsoFwRodata[0];
5754                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5755                 info.data_len = TG3_TSO_FW_DATA_LEN;
5756                 info.data_data = &tg3TsoFwData[0];
5757                 cpu_base = TX_CPU_BASE;
5758                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5759                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5760         }
5761
5762         err = tg3_load_firmware_cpu(tp, cpu_base,
5763                                     cpu_scratch_base, cpu_scratch_size,
5764                                     &info);
5765         if (err)
5766                 return err;
5767
5768         /* Now startup the cpu. */
5769         tw32(cpu_base + CPU_STATE, 0xffffffff);
5770         tw32_f(cpu_base + CPU_PC,    info.text_base);
5771
5772         for (i = 0; i < 5; i++) {
5773                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5774                         break;
5775                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5776                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5777                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5778                 udelay(1000);
5779         }
5780         if (i >= 5) {
5781                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5782                        "to set CPU PC, is %08x should be %08x\n",
5783                        tp->dev->name, tr32(cpu_base + CPU_PC),
5784                        info.text_base);
5785                 return -ENODEV;
5786         }
5787         tw32(cpu_base + CPU_STATE, 0xffffffff);
5788         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5789         return 0;
5790 }
5791
5792 #endif /* TG3_TSO_SUPPORT != 0 */
5793
5794 /* tp->lock is held. */
5795 static void __tg3_set_mac_addr(struct tg3 *tp)
5796 {
5797         u32 addr_high, addr_low;
5798         int i;
5799
5800         addr_high = ((tp->dev->dev_addr[0] << 8) |
5801                      tp->dev->dev_addr[1]);
5802         addr_low = ((tp->dev->dev_addr[2] << 24) |
5803                     (tp->dev->dev_addr[3] << 16) |
5804                     (tp->dev->dev_addr[4] <<  8) |
5805                     (tp->dev->dev_addr[5] <<  0));
5806         for (i = 0; i < 4; i++) {
5807                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5808                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5809         }
5810
5811         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5812             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5813                 for (i = 0; i < 12; i++) {
5814                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5815                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5816                 }
5817         }
5818
5819         addr_high = (tp->dev->dev_addr[0] +
5820                      tp->dev->dev_addr[1] +
5821                      tp->dev->dev_addr[2] +
5822                      tp->dev->dev_addr[3] +
5823                      tp->dev->dev_addr[4] +
5824                      tp->dev->dev_addr[5]) &
5825                 TX_BACKOFF_SEED_MASK;
5826         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5827 }
5828
5829 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5830 {
5831         struct tg3 *tp = netdev_priv(dev);
5832         struct sockaddr *addr = p;
5833         int err = 0;
5834
5835         if (!is_valid_ether_addr(addr->sa_data))
5836                 return -EINVAL;
5837
5838         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5839
5840         if (!netif_running(dev))
5841                 return 0;
5842
5843         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5844                 /* Reset chip so that ASF can re-init any MAC addresses it
5845                  * needs.
5846                  */
5847                 tg3_netif_stop(tp);
5848                 tg3_full_lock(tp, 1);
5849
5850                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5851                 err = tg3_restart_hw(tp, 0);
5852                 if (!err)
5853                         tg3_netif_start(tp);
5854                 tg3_full_unlock(tp);
5855         } else {
5856                 spin_lock_bh(&tp->lock);
5857                 __tg3_set_mac_addr(tp);
5858                 spin_unlock_bh(&tp->lock);
5859         }
5860
5861         return err;
5862 }
5863
5864 /* tp->lock is held. */
5865 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5866                            dma_addr_t mapping, u32 maxlen_flags,
5867                            u32 nic_addr)
5868 {
5869         tg3_write_mem(tp,
5870                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5871                       ((u64) mapping >> 32));
5872         tg3_write_mem(tp,
5873                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5874                       ((u64) mapping & 0xffffffff));
5875         tg3_write_mem(tp,
5876                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5877                        maxlen_flags);
5878
5879         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5880                 tg3_write_mem(tp,
5881                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5882                               nic_addr);
5883 }
5884
5885 static void __tg3_set_rx_mode(struct net_device *);
5886 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5887 {
5888         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5889         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5890         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5891         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5892         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5893                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5894                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5895         }
5896         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5897         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5898         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5899                 u32 val = ec->stats_block_coalesce_usecs;
5900
5901                 if (!netif_carrier_ok(tp->dev))
5902                         val = 0;
5903
5904                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5905         }
5906 }
5907
5908 /* tp->lock is held. */
5909 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5910 {
5911         u32 val, rdmac_mode;
5912         int i, err, limit;
5913
5914         tg3_disable_ints(tp);
5915
5916         tg3_stop_fw(tp);
5917
5918         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5919
5920         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5921                 tg3_abort_hw(tp, 1);
5922         }
5923
5924         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5925                 tg3_phy_reset(tp);
5926
5927         err = tg3_chip_reset(tp);
5928         if (err)
5929                 return err;
5930
5931         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5932
5933         /* This works around an issue with Athlon chipsets on
5934          * B3 tigon3 silicon.  This bit has no effect on any
5935          * other revision.  But do not set this on PCI Express
5936          * chips.
5937          */
5938         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5939                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5940         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5941
5942         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5943             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5944                 val = tr32(TG3PCI_PCISTATE);
5945                 val |= PCISTATE_RETRY_SAME_DMA;
5946                 tw32(TG3PCI_PCISTATE, val);
5947         }
5948
5949         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5950                 /* Enable some hw fixes.  */
5951                 val = tr32(TG3PCI_MSI_DATA);
5952                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5953                 tw32(TG3PCI_MSI_DATA, val);
5954         }
5955
5956         /* Descriptor ring init may make accesses to the
5957          * NIC SRAM area to setup the TX descriptors, so we
5958          * can only do this after the hardware has been
5959          * successfully reset.
5960          */
5961         err = tg3_init_rings(tp);
5962         if (err)
5963                 return err;
5964
5965         /* This value is determined during the probe time DMA
5966          * engine test, tg3_test_dma.
5967          */
5968         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5969
5970         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5971                           GRC_MODE_4X_NIC_SEND_RINGS |
5972                           GRC_MODE_NO_TX_PHDR_CSUM |
5973                           GRC_MODE_NO_RX_PHDR_CSUM);
5974         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5975
5976         /* Pseudo-header checksum is done by hardware logic and not
5977          * the offload processers, so make the chip do the pseudo-
5978          * header checksums on receive.  For transmit it is more
5979          * convenient to do the pseudo-header checksum in software
5980          * as Linux does that on transmit for us in all cases.
5981          */
5982         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5983
5984         tw32(GRC_MODE,
5985              tp->grc_mode |
5986              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5987
5988         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5989         val = tr32(GRC_MISC_CFG);
5990         val &= ~0xff;
5991         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5992         tw32(GRC_MISC_CFG, val);
5993
5994         /* Initialize MBUF/DESC pool. */
5995         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5996                 /* Do nothing.  */
5997         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5998                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5999                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6000                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6001                 else
6002                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6003                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6004                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6005         }
6006 #if TG3_TSO_SUPPORT != 0
6007         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6008                 int fw_len;
6009
6010                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6011                           TG3_TSO5_FW_RODATA_LEN +
6012                           TG3_TSO5_FW_DATA_LEN +
6013                           TG3_TSO5_FW_SBSS_LEN +
6014                           TG3_TSO5_FW_BSS_LEN);
6015                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6016                 tw32(BUFMGR_MB_POOL_ADDR,
6017                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6018                 tw32(BUFMGR_MB_POOL_SIZE,
6019                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6020         }
6021 #endif
6022
6023         if (tp->dev->mtu <= ETH_DATA_LEN) {
6024                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6025                      tp->bufmgr_config.mbuf_read_dma_low_water);
6026                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6027                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6028                 tw32(BUFMGR_MB_HIGH_WATER,
6029                      tp->bufmgr_config.mbuf_high_water);
6030         } else {
6031                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6032                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6033                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6034                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6035                 tw32(BUFMGR_MB_HIGH_WATER,
6036                      tp->bufmgr_config.mbuf_high_water_jumbo);
6037         }
6038         tw32(BUFMGR_DMA_LOW_WATER,
6039              tp->bufmgr_config.dma_low_water);
6040         tw32(BUFMGR_DMA_HIGH_WATER,
6041              tp->bufmgr_config.dma_high_water);
6042
6043         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6044         for (i = 0; i < 2000; i++) {
6045                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6046                         break;
6047                 udelay(10);
6048         }
6049         if (i >= 2000) {
6050                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6051                        tp->dev->name);
6052                 return -ENODEV;
6053         }
6054
6055         /* Setup replenish threshold. */
6056         val = tp->rx_pending / 8;
6057         if (val == 0)
6058                 val = 1;
6059         else if (val > tp->rx_std_max_post)
6060                 val = tp->rx_std_max_post;
6061
6062         tw32(RCVBDI_STD_THRESH, val);
6063
6064         /* Initialize TG3_BDINFO's at:
6065          *  RCVDBDI_STD_BD:     standard eth size rx ring
6066          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6067          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6068          *
6069          * like so:
6070          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6071          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6072          *                              ring attribute flags
6073          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6074          *
6075          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6076          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6077          *
6078          * The size of each ring is fixed in the firmware, but the location is
6079          * configurable.
6080          */
6081         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6082              ((u64) tp->rx_std_mapping >> 32));
6083         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6084              ((u64) tp->rx_std_mapping & 0xffffffff));
6085         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6086              NIC_SRAM_RX_BUFFER_DESC);
6087
6088         /* Don't even try to program the JUMBO/MINI buffer descriptor
6089          * configs on 5705.
6090          */
6091         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6092                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6093                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6094         } else {
6095                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6096                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6097
6098                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6099                      BDINFO_FLAGS_DISABLED);
6100
6101                 /* Setup replenish threshold. */
6102                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6103
6104                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6105                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6106                              ((u64) tp->rx_jumbo_mapping >> 32));
6107                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6108                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6109                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6110                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6111                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6112                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6113                 } else {
6114                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6115                              BDINFO_FLAGS_DISABLED);
6116                 }
6117
6118         }
6119
6120         /* There is only one send ring on 5705/5750, no need to explicitly
6121          * disable the others.
6122          */
6123         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6124                 /* Clear out send RCB ring in SRAM. */
6125                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6126                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6127                                       BDINFO_FLAGS_DISABLED);
6128         }
6129
6130         tp->tx_prod = 0;
6131         tp->tx_cons = 0;
6132         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6133         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6134
6135         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6136                        tp->tx_desc_mapping,
6137                        (TG3_TX_RING_SIZE <<
6138                         BDINFO_FLAGS_MAXLEN_SHIFT),
6139                        NIC_SRAM_TX_BUFFER_DESC);
6140
6141         /* There is only one receive return ring on 5705/5750, no need
6142          * to explicitly disable the others.
6143          */
6144         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6145                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6146                      i += TG3_BDINFO_SIZE) {
6147                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6148                                       BDINFO_FLAGS_DISABLED);
6149                 }
6150         }
6151
6152         tp->rx_rcb_ptr = 0;
6153         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6154
6155         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6156                        tp->rx_rcb_mapping,
6157                        (TG3_RX_RCB_RING_SIZE(tp) <<
6158                         BDINFO_FLAGS_MAXLEN_SHIFT),
6159                        0);
6160
6161         tp->rx_std_ptr = tp->rx_pending;
6162         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6163                      tp->rx_std_ptr);
6164
6165         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6166                                                 tp->rx_jumbo_pending : 0;
6167         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6168                      tp->rx_jumbo_ptr);
6169
6170         /* Initialize MAC address and backoff seed. */
6171         __tg3_set_mac_addr(tp);
6172
6173         /* MTU + ethernet header + FCS + optional VLAN tag */
6174         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6175
6176         /* The slot time is changed by tg3_setup_phy if we
6177          * run at gigabit with half duplex.
6178          */
6179         tw32(MAC_TX_LENGTHS,
6180              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6181              (6 << TX_LENGTHS_IPG_SHIFT) |
6182              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6183
6184         /* Receive rules. */
6185         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6186         tw32(RCVLPC_CONFIG, 0x0181);
6187
6188         /* Calculate RDMAC_MODE setting early, we need it to determine
6189          * the RCVLPC_STATE_ENABLE mask.
6190          */
6191         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6192                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6193                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6194                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6195                       RDMAC_MODE_LNGREAD_ENAB);
6196         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6197                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6198
6199         /* If statement applies to 5705 and 5750 PCI devices only */
6200         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6201              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6202             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6203                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6204                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6205                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6206                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6207                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6208                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6209                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6210                 }
6211         }
6212
6213         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6214                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6215
6216 #if TG3_TSO_SUPPORT != 0
6217         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6218                 rdmac_mode |= (1 << 27);
6219 #endif
6220
6221         /* Receive/send statistics. */
6222         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6223                 val = tr32(RCVLPC_STATS_ENABLE);
6224                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6225                 tw32(RCVLPC_STATS_ENABLE, val);
6226         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6227                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6228                 val = tr32(RCVLPC_STATS_ENABLE);
6229                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6230                 tw32(RCVLPC_STATS_ENABLE, val);
6231         } else {
6232                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6233         }
6234         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6235         tw32(SNDDATAI_STATSENAB, 0xffffff);
6236         tw32(SNDDATAI_STATSCTRL,
6237              (SNDDATAI_SCTRL_ENABLE |
6238               SNDDATAI_SCTRL_FASTUPD));
6239
6240         /* Setup host coalescing engine. */
6241         tw32(HOSTCC_MODE, 0);
6242         for (i = 0; i < 2000; i++) {
6243                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6244                         break;
6245                 udelay(10);
6246         }
6247
6248         __tg3_set_coalesce(tp, &tp->coal);
6249
6250         /* set status block DMA address */
6251         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6252              ((u64) tp->status_mapping >> 32));
6253         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6254              ((u64) tp->status_mapping & 0xffffffff));
6255
6256         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6257                 /* Status/statistics block address.  See tg3_timer,
6258                  * the tg3_periodic_fetch_stats call there, and
6259                  * tg3_get_stats to see how this works for 5705/5750 chips.
6260                  */
6261                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6262                      ((u64) tp->stats_mapping >> 32));
6263                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6264                      ((u64) tp->stats_mapping & 0xffffffff));
6265                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6266                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6267         }
6268
6269         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6270
6271         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6272         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6273         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6274                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6275
6276         /* Clear statistics/status block in chip, and status block in ram. */
6277         for (i = NIC_SRAM_STATS_BLK;
6278              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6279              i += sizeof(u32)) {
6280                 tg3_write_mem(tp, i, 0);
6281                 udelay(40);
6282         }
6283         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6284
6285         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6286                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6287                 /* reset to prevent losing 1st rx packet intermittently */
6288                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6289                 udelay(10);
6290         }
6291
6292         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6293                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6294         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6295         udelay(40);
6296
6297         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6298          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6299          * register to preserve the GPIO settings for LOMs. The GPIOs,
6300          * whether used as inputs or outputs, are set by boot code after
6301          * reset.
6302          */
6303         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6304                 u32 gpio_mask;
6305
6306                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6307                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6308
6309                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6310                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6311                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6312
6313                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6314                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6315
6316                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6317
6318                 /* GPIO1 must be driven high for eeprom write protect */
6319                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6320                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6321         }
6322         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6323         udelay(100);
6324
6325         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6326         tp->last_tag = 0;
6327
6328         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6329                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6330                 udelay(40);
6331         }
6332
6333         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6334                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6335                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6336                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6337                WDMAC_MODE_LNGREAD_ENAB);
6338
6339         /* If statement applies to 5705 and 5750 PCI devices only */
6340         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6341              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6342             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6343                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6344                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6345                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6346                         /* nothing */
6347                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6348                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6349                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6350                         val |= WDMAC_MODE_RX_ACCEL;
6351                 }
6352         }
6353
6354         /* Enable host coalescing bug fix */
6355         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6356             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6357                 val |= (1 << 29);
6358
6359         tw32_f(WDMAC_MODE, val);
6360         udelay(40);
6361
6362         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6363                 val = tr32(TG3PCI_X_CAPS);
6364                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6365                         val &= ~PCIX_CAPS_BURST_MASK;
6366                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6367                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6368                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6369                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6370                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6371                                 val |= (tp->split_mode_max_reqs <<
6372                                         PCIX_CAPS_SPLIT_SHIFT);
6373                 }
6374                 tw32(TG3PCI_X_CAPS, val);
6375         }
6376
6377         tw32_f(RDMAC_MODE, rdmac_mode);
6378         udelay(40);
6379
6380         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6381         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6382                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6383         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6384         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6385         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6386         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6387         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6388 #if TG3_TSO_SUPPORT != 0
6389         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6390                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6391 #endif
6392         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6393         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6394
6395         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6396                 err = tg3_load_5701_a0_firmware_fix(tp);
6397                 if (err)
6398                         return err;
6399         }
6400
6401 #if TG3_TSO_SUPPORT != 0
6402         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6403                 err = tg3_load_tso_firmware(tp);
6404                 if (err)
6405                         return err;
6406         }
6407 #endif
6408
6409         tp->tx_mode = TX_MODE_ENABLE;
6410         tw32_f(MAC_TX_MODE, tp->tx_mode);
6411         udelay(100);
6412
6413         tp->rx_mode = RX_MODE_ENABLE;
6414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6415                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6416
6417         tw32_f(MAC_RX_MODE, tp->rx_mode);
6418         udelay(10);
6419
6420         if (tp->link_config.phy_is_low_power) {
6421                 tp->link_config.phy_is_low_power = 0;
6422                 tp->link_config.speed = tp->link_config.orig_speed;
6423                 tp->link_config.duplex = tp->link_config.orig_duplex;
6424                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6425         }
6426
6427         tp->mi_mode = MAC_MI_MODE_BASE;
6428         tw32_f(MAC_MI_MODE, tp->mi_mode);
6429         udelay(80);
6430
6431         tw32(MAC_LED_CTRL, tp->led_ctrl);
6432
6433         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6434         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6435                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6436                 udelay(10);
6437         }
6438         tw32_f(MAC_RX_MODE, tp->rx_mode);
6439         udelay(10);
6440
6441         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6442                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6443                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6444                         /* Set drive transmission level to 1.2V  */
6445                         /* only if the signal pre-emphasis bit is not set  */
6446                         val = tr32(MAC_SERDES_CFG);
6447                         val &= 0xfffff000;
6448                         val |= 0x880;
6449                         tw32(MAC_SERDES_CFG, val);
6450                 }
6451                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6452                         tw32(MAC_SERDES_CFG, 0x616000);
6453         }
6454
6455         /* Prevent chip from dropping frames when flow control
6456          * is enabled.
6457          */
6458         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6459
6460         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6461             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6462                 /* Use hardware link auto-negotiation */
6463                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6464         }
6465
6466         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6467             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6468                 u32 tmp;
6469
6470                 tmp = tr32(SERDES_RX_CTRL);
6471                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6472                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6473                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6474                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6475         }
6476
6477         err = tg3_setup_phy(tp, reset_phy);
6478         if (err)
6479                 return err;
6480
6481         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6482                 u32 tmp;
6483
6484                 /* Clear CRC stats. */
6485                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6486                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6487                         tg3_readphy(tp, 0x14, &tmp);
6488                 }
6489         }
6490
6491         __tg3_set_rx_mode(tp->dev);
6492
6493         /* Initialize receive rules. */
6494         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6495         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6496         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6497         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6498
6499         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6500             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6501                 limit = 8;
6502         else
6503                 limit = 16;
6504         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6505                 limit -= 4;
6506         switch (limit) {
6507         case 16:
6508                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6509         case 15:
6510                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6511         case 14:
6512                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6513         case 13:
6514                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6515         case 12:
6516                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6517         case 11:
6518                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6519         case 10:
6520                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6521         case 9:
6522                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6523         case 8:
6524                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6525         case 7:
6526                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6527         case 6:
6528                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6529         case 5:
6530                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6531         case 4:
6532                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6533         case 3:
6534                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6535         case 2:
6536         case 1:
6537
6538         default:
6539                 break;
6540         };
6541
6542         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6543
6544         return 0;
6545 }
6546
6547 /* Called at device open time to get the chip ready for
6548  * packet processing.  Invoked with tp->lock held.
6549  */
6550 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6551 {
6552         int err;
6553
6554         /* Force the chip into D0. */
6555         err = tg3_set_power_state(tp, PCI_D0);
6556         if (err)
6557                 goto out;
6558
6559         tg3_switch_clocks(tp);
6560
6561         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6562
6563         err = tg3_reset_hw(tp, reset_phy);
6564
6565 out:
6566         return err;
6567 }
6568
6569 #define TG3_STAT_ADD32(PSTAT, REG) \
6570 do {    u32 __val = tr32(REG); \
6571         (PSTAT)->low += __val; \
6572         if ((PSTAT)->low < __val) \
6573                 (PSTAT)->high += 1; \
6574 } while (0)
6575
6576 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6577 {
6578         struct tg3_hw_stats *sp = tp->hw_stats;
6579
6580         if (!netif_carrier_ok(tp->dev))
6581                 return;
6582
6583         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6584         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6585         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6586         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6587         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6588         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6589         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6590         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6591         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6592         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6593         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6594         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6595         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6596
6597         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6598         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6599         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6600         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6601         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6602         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6603         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6604         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6605         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6606         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6607         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6608         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6609         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6610         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6611
6612         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6613         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6614         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6615 }
6616
6617 static void tg3_timer(unsigned long __opaque)
6618 {
6619         struct tg3 *tp = (struct tg3 *) __opaque;
6620
6621         if (tp->irq_sync)
6622                 goto restart_timer;
6623
6624         spin_lock(&tp->lock);
6625
6626         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6627                 /* All of this garbage is because when using non-tagged
6628                  * IRQ status the mailbox/status_block protocol the chip
6629                  * uses with the cpu is race prone.
6630                  */
6631                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6632                         tw32(GRC_LOCAL_CTRL,
6633                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6634                 } else {
6635                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6636                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6637                 }
6638
6639                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6640                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6641                         spin_unlock(&tp->lock);
6642                         schedule_work(&tp->reset_task);
6643                         return;
6644                 }
6645         }
6646
6647         /* This part only runs once per second. */
6648         if (!--tp->timer_counter) {
6649                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6650                         tg3_periodic_fetch_stats(tp);
6651
6652                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6653                         u32 mac_stat;
6654                         int phy_event;
6655
6656                         mac_stat = tr32(MAC_STATUS);
6657
6658                         phy_event = 0;
6659                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6660                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6661                                         phy_event = 1;
6662                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6663                                 phy_event = 1;
6664
6665                         if (phy_event)
6666                                 tg3_setup_phy(tp, 0);
6667                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6668                         u32 mac_stat = tr32(MAC_STATUS);
6669                         int need_setup = 0;
6670
6671                         if (netif_carrier_ok(tp->dev) &&
6672                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6673                                 need_setup = 1;
6674                         }
6675                         if (! netif_carrier_ok(tp->dev) &&
6676                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6677                                          MAC_STATUS_SIGNAL_DET))) {
6678                                 need_setup = 1;
6679                         }
6680                         if (need_setup) {
6681                                 if (!tp->serdes_counter) {
6682                                         tw32_f(MAC_MODE,
6683                                              (tp->mac_mode &
6684                                               ~MAC_MODE_PORT_MODE_MASK));
6685                                         udelay(40);
6686                                         tw32_f(MAC_MODE, tp->mac_mode);
6687                                         udelay(40);
6688                                 }
6689                                 tg3_setup_phy(tp, 0);
6690                         }
6691                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6692                         tg3_serdes_parallel_detect(tp);
6693
6694                 tp->timer_counter = tp->timer_multiplier;
6695         }
6696
6697         /* Heartbeat is only sent once every 2 seconds.
6698          *
6699          * The heartbeat is to tell the ASF firmware that the host
6700          * driver is still alive.  In the event that the OS crashes,
6701          * ASF needs to reset the hardware to free up the FIFO space
6702          * that may be filled with rx packets destined for the host.
6703          * If the FIFO is full, ASF will no longer function properly.
6704          *
6705          * Unintended resets have been reported on real time kernels
6706          * where the timer doesn't run on time.  Netpoll will also have
6707          * same problem.
6708          *
6709          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6710          * to check the ring condition when the heartbeat is expiring
6711          * before doing the reset.  This will prevent most unintended
6712          * resets.
6713          */
6714         if (!--tp->asf_counter) {
6715                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6716                         u32 val;
6717
6718                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6719                                       FWCMD_NICDRV_ALIVE3);
6720                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6721                         /* 5 seconds timeout */
6722                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6723                         val = tr32(GRC_RX_CPU_EVENT);
6724                         val |= (1 << 14);
6725                         tw32(GRC_RX_CPU_EVENT, val);
6726                 }
6727                 tp->asf_counter = tp->asf_multiplier;
6728         }
6729
6730         spin_unlock(&tp->lock);
6731
6732 restart_timer:
6733         tp->timer.expires = jiffies + tp->timer_offset;
6734         add_timer(&tp->timer);
6735 }
6736
6737 static int tg3_request_irq(struct tg3 *tp)
6738 {
6739         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6740         unsigned long flags;
6741         struct net_device *dev = tp->dev;
6742
6743         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6744                 fn = tg3_msi;
6745                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6746                         fn = tg3_msi_1shot;
6747                 flags = IRQF_SAMPLE_RANDOM;
6748         } else {
6749                 fn = tg3_interrupt;
6750                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6751                         fn = tg3_interrupt_tagged;
6752                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6753         }
6754         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6755 }
6756
6757 static int tg3_test_interrupt(struct tg3 *tp)
6758 {
6759         struct net_device *dev = tp->dev;
6760         int err, i;
6761         u32 int_mbox = 0;
6762
6763         if (!netif_running(dev))
6764                 return -ENODEV;
6765
6766         tg3_disable_ints(tp);
6767
6768         free_irq(tp->pdev->irq, dev);
6769
6770         err = request_irq(tp->pdev->irq, tg3_test_isr,
6771                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6772         if (err)
6773                 return err;
6774
6775         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6776         tg3_enable_ints(tp);
6777
6778         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6779                HOSTCC_MODE_NOW);
6780
6781         for (i = 0; i < 5; i++) {
6782                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6783                                         TG3_64BIT_REG_LOW);
6784                 if (int_mbox != 0)
6785                         break;
6786                 msleep(10);
6787         }
6788
6789         tg3_disable_ints(tp);
6790
6791         free_irq(tp->pdev->irq, dev);
6792
6793         err = tg3_request_irq(tp);
6794
6795         if (err)
6796                 return err;
6797
6798         if (int_mbox != 0)
6799                 return 0;
6800
6801         return -EIO;
6802 }
6803
6804 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6805  * successfully restored
6806  */
6807 static int tg3_test_msi(struct tg3 *tp)
6808 {
6809         struct net_device *dev = tp->dev;
6810         int err;
6811         u16 pci_cmd;
6812
6813         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6814                 return 0;
6815
6816         /* Turn off SERR reporting in case MSI terminates with Master
6817          * Abort.
6818          */
6819         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6820         pci_write_config_word(tp->pdev, PCI_COMMAND,
6821                               pci_cmd & ~PCI_COMMAND_SERR);
6822
6823         err = tg3_test_interrupt(tp);
6824
6825         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6826
6827         if (!err)
6828                 return 0;
6829
6830         /* other failures */
6831         if (err != -EIO)
6832                 return err;
6833
6834         /* MSI test failed, go back to INTx mode */
6835         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6836                "switching to INTx mode. Please report this failure to "
6837                "the PCI maintainer and include system chipset information.\n",
6838                        tp->dev->name);
6839
6840         free_irq(tp->pdev->irq, dev);
6841         pci_disable_msi(tp->pdev);
6842
6843         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6844
6845         err = tg3_request_irq(tp);
6846         if (err)
6847                 return err;
6848
6849         /* Need to reset the chip because the MSI cycle may have terminated
6850          * with Master Abort.
6851          */
6852         tg3_full_lock(tp, 1);
6853
6854         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6855         err = tg3_init_hw(tp, 1);
6856
6857         tg3_full_unlock(tp);
6858
6859         if (err)
6860                 free_irq(tp->pdev->irq, dev);
6861
6862         return err;
6863 }
6864
6865 static int tg3_open(struct net_device *dev)
6866 {
6867         struct tg3 *tp = netdev_priv(dev);
6868         int err;
6869
6870         tg3_full_lock(tp, 0);
6871
6872         err = tg3_set_power_state(tp, PCI_D0);
6873         if (err)
6874                 return err;
6875
6876         tg3_disable_ints(tp);
6877         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6878
6879         tg3_full_unlock(tp);
6880
6881         /* The placement of this call is tied
6882          * to the setup and use of Host TX descriptors.
6883          */
6884         err = tg3_alloc_consistent(tp);
6885         if (err)
6886                 return err;
6887
6888         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6889             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6890             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6891             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6892               (tp->pdev_peer == tp->pdev))) {
6893                 /* All MSI supporting chips should support tagged
6894                  * status.  Assert that this is the case.
6895                  */
6896                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6897                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6898                                "Not using MSI.\n", tp->dev->name);
6899                 } else if (pci_enable_msi(tp->pdev) == 0) {
6900                         u32 msi_mode;
6901
6902                         msi_mode = tr32(MSGINT_MODE);
6903                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6904                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6905                 }
6906         }
6907         err = tg3_request_irq(tp);
6908
6909         if (err) {
6910                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6911                         pci_disable_msi(tp->pdev);
6912                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6913                 }
6914                 tg3_free_consistent(tp);
6915                 return err;
6916         }
6917
6918         tg3_full_lock(tp, 0);
6919
6920         err = tg3_init_hw(tp, 1);
6921         if (err) {
6922                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6923                 tg3_free_rings(tp);
6924         } else {
6925                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6926                         tp->timer_offset = HZ;
6927                 else
6928                         tp->timer_offset = HZ / 10;
6929
6930                 BUG_ON(tp->timer_offset > HZ);
6931                 tp->timer_counter = tp->timer_multiplier =
6932                         (HZ / tp->timer_offset);
6933                 tp->asf_counter = tp->asf_multiplier =
6934                         ((HZ / tp->timer_offset) * 2);
6935
6936                 init_timer(&tp->timer);
6937                 tp->timer.expires = jiffies + tp->timer_offset;
6938                 tp->timer.data = (unsigned long) tp;
6939                 tp->timer.function = tg3_timer;
6940         }
6941
6942         tg3_full_unlock(tp);
6943
6944         if (err) {
6945                 free_irq(tp->pdev->irq, dev);
6946                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6947                         pci_disable_msi(tp->pdev);
6948                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6949                 }
6950                 tg3_free_consistent(tp);
6951                 return err;
6952         }
6953
6954         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6955                 err = tg3_test_msi(tp);
6956
6957                 if (err) {
6958                         tg3_full_lock(tp, 0);
6959
6960                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6961                                 pci_disable_msi(tp->pdev);
6962                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6963                         }
6964                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6965                         tg3_free_rings(tp);
6966                         tg3_free_consistent(tp);
6967
6968                         tg3_full_unlock(tp);
6969
6970                         return err;
6971                 }
6972
6973                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6974                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6975                                 u32 val = tr32(0x7c04);
6976
6977                                 tw32(0x7c04, val | (1 << 29));
6978                         }
6979                 }
6980         }
6981
6982         tg3_full_lock(tp, 0);
6983
6984         add_timer(&tp->timer);
6985         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6986         tg3_enable_ints(tp);
6987
6988         tg3_full_unlock(tp);
6989
6990         netif_start_queue(dev);
6991
6992         return 0;
6993 }
6994
6995 #if 0
6996 /*static*/ void tg3_dump_state(struct tg3 *tp)
6997 {
6998         u32 val32, val32_2, val32_3, val32_4, val32_5;
6999         u16 val16;
7000         int i;
7001
7002         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7003         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7004         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7005                val16, val32);
7006
7007         /* MAC block */
7008         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7009                tr32(MAC_MODE), tr32(MAC_STATUS));
7010         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7011                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7012         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7013                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7014         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7015                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7016
7017         /* Send data initiator control block */
7018         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7019                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7020         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7021                tr32(SNDDATAI_STATSCTRL));
7022
7023         /* Send data completion control block */
7024         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7025
7026         /* Send BD ring selector block */
7027         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7028                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7029
7030         /* Send BD initiator control block */
7031         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7032                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7033
7034         /* Send BD completion control block */
7035         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7036
7037         /* Receive list placement control block */
7038         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7039                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7040         printk("       RCVLPC_STATSCTRL[%08x]\n",
7041                tr32(RCVLPC_STATSCTRL));
7042
7043         /* Receive data and receive BD initiator control block */
7044         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7045                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7046
7047         /* Receive data completion control block */
7048         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7049                tr32(RCVDCC_MODE));
7050
7051         /* Receive BD initiator control block */
7052         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7053                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7054
7055         /* Receive BD completion control block */
7056         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7057                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7058
7059         /* Receive list selector control block */
7060         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7061                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7062
7063         /* Mbuf cluster free block */
7064         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7065                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7066
7067         /* Host coalescing control block */
7068         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7069                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7070         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7071                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7072                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7073         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7074                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7075                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7076         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7077                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7078         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7079                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7080
7081         /* Memory arbiter control block */
7082         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7083                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7084
7085         /* Buffer manager control block */
7086         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7087                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7088         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7089                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7090         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7091                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7092                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7093                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7094
7095         /* Read DMA control block */
7096         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7097                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7098
7099         /* Write DMA control block */
7100         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7101                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7102
7103         /* DMA completion block */
7104         printk("DEBUG: DMAC_MODE[%08x]\n",
7105                tr32(DMAC_MODE));
7106
7107         /* GRC block */
7108         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7109                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7110         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7111                tr32(GRC_LOCAL_CTRL));
7112
7113         /* TG3_BDINFOs */
7114         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7115                tr32(RCVDBDI_JUMBO_BD + 0x0),
7116                tr32(RCVDBDI_JUMBO_BD + 0x4),
7117                tr32(RCVDBDI_JUMBO_BD + 0x8),
7118                tr32(RCVDBDI_JUMBO_BD + 0xc));
7119         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7120                tr32(RCVDBDI_STD_BD + 0x0),
7121                tr32(RCVDBDI_STD_BD + 0x4),
7122                tr32(RCVDBDI_STD_BD + 0x8),
7123                tr32(RCVDBDI_STD_BD + 0xc));
7124         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7125                tr32(RCVDBDI_MINI_BD + 0x0),
7126                tr32(RCVDBDI_MINI_BD + 0x4),
7127                tr32(RCVDBDI_MINI_BD + 0x8),
7128                tr32(RCVDBDI_MINI_BD + 0xc));
7129
7130         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7131         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7132         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7133         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7134         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7135                val32, val32_2, val32_3, val32_4);
7136
7137         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7138         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7139         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7140         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7141         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7142                val32, val32_2, val32_3, val32_4);
7143
7144         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7145         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7146         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7147         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7148         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7149         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7150                val32, val32_2, val32_3, val32_4, val32_5);
7151
7152         /* SW status block */
7153         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7154                tp->hw_status->status,
7155                tp->hw_status->status_tag,
7156                tp->hw_status->rx_jumbo_consumer,
7157                tp->hw_status->rx_consumer,
7158                tp->hw_status->rx_mini_consumer,
7159                tp->hw_status->idx[0].rx_producer,
7160                tp->hw_status->idx[0].tx_consumer);
7161
7162         /* SW statistics block */
7163         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7164                ((u32 *)tp->hw_stats)[0],
7165                ((u32 *)tp->hw_stats)[1],
7166                ((u32 *)tp->hw_stats)[2],
7167                ((u32 *)tp->hw_stats)[3]);
7168
7169         /* Mailboxes */
7170         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7171                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7172                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7173                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7174                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7175
7176         /* NIC side send descriptors. */
7177         for (i = 0; i < 6; i++) {
7178                 unsigned long txd;
7179
7180                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7181                         + (i * sizeof(struct tg3_tx_buffer_desc));
7182                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7183                        i,
7184                        readl(txd + 0x0), readl(txd + 0x4),
7185                        readl(txd + 0x8), readl(txd + 0xc));
7186         }
7187
7188         /* NIC side RX descriptors. */
7189         for (i = 0; i < 6; i++) {
7190                 unsigned long rxd;
7191
7192                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7193                         + (i * sizeof(struct tg3_rx_buffer_desc));
7194                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7195                        i,
7196                        readl(rxd + 0x0), readl(rxd + 0x4),
7197                        readl(rxd + 0x8), readl(rxd + 0xc));
7198                 rxd += (4 * sizeof(u32));
7199                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7200                        i,
7201                        readl(rxd + 0x0), readl(rxd + 0x4),
7202                        readl(rxd + 0x8), readl(rxd + 0xc));
7203         }
7204
7205         for (i = 0; i < 6; i++) {
7206                 unsigned long rxd;
7207
7208                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7209                         + (i * sizeof(struct tg3_rx_buffer_desc));
7210                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7211                        i,
7212                        readl(rxd + 0x0), readl(rxd + 0x4),
7213                        readl(rxd + 0x8), readl(rxd + 0xc));
7214                 rxd += (4 * sizeof(u32));
7215                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7216                        i,
7217                        readl(rxd + 0x0), readl(rxd + 0x4),
7218                        readl(rxd + 0x8), readl(rxd + 0xc));
7219         }
7220 }
7221 #endif
7222
7223 static struct net_device_stats *tg3_get_stats(struct net_device *);
7224 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7225
7226 static int tg3_close(struct net_device *dev)
7227 {
7228         struct tg3 *tp = netdev_priv(dev);
7229
7230         /* Calling flush_scheduled_work() may deadlock because
7231          * linkwatch_event() may be on the workqueue and it will try to get
7232          * the rtnl_lock which we are holding.
7233          */
7234         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7235                 msleep(1);
7236
7237         netif_stop_queue(dev);
7238
7239         del_timer_sync(&tp->timer);
7240
7241         tg3_full_lock(tp, 1);
7242 #if 0
7243         tg3_dump_state(tp);
7244 #endif
7245
7246         tg3_disable_ints(tp);
7247
7248         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7249         tg3_free_rings(tp);
7250         tp->tg3_flags &=
7251                 ~(TG3_FLAG_INIT_COMPLETE |
7252                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7253
7254         tg3_full_unlock(tp);
7255
7256         free_irq(tp->pdev->irq, dev);
7257         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7258                 pci_disable_msi(tp->pdev);
7259                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7260         }
7261
7262         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7263                sizeof(tp->net_stats_prev));
7264         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7265                sizeof(tp->estats_prev));
7266
7267         tg3_free_consistent(tp);
7268
7269         tg3_set_power_state(tp, PCI_D3hot);
7270
7271         netif_carrier_off(tp->dev);
7272
7273         return 0;
7274 }
7275
7276 static inline unsigned long get_stat64(tg3_stat64_t *val)
7277 {
7278         unsigned long ret;
7279
7280 #if (BITS_PER_LONG == 32)
7281         ret = val->low;
7282 #else
7283         ret = ((u64)val->high << 32) | ((u64)val->low);
7284 #endif
7285         return ret;
7286 }
7287
7288 static unsigned long calc_crc_errors(struct tg3 *tp)
7289 {
7290         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7291
7292         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7293             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7294              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7295                 u32 val;
7296
7297                 spin_lock_bh(&tp->lock);
7298                 if (!tg3_readphy(tp, 0x1e, &val)) {
7299                         tg3_writephy(tp, 0x1e, val | 0x8000);
7300                         tg3_readphy(tp, 0x14, &val);
7301                 } else
7302                         val = 0;
7303                 spin_unlock_bh(&tp->lock);
7304
7305                 tp->phy_crc_errors += val;
7306
7307                 return tp->phy_crc_errors;
7308         }
7309
7310         return get_stat64(&hw_stats->rx_fcs_errors);
7311 }
7312
7313 #define ESTAT_ADD(member) \
7314         estats->member =        old_estats->member + \
7315                                 get_stat64(&hw_stats->member)
7316
7317 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7318 {
7319         struct tg3_ethtool_stats *estats = &tp->estats;
7320         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7321         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7322
7323         if (!hw_stats)
7324                 return old_estats;
7325
7326         ESTAT_ADD(rx_octets);
7327         ESTAT_ADD(rx_fragments);
7328         ESTAT_ADD(rx_ucast_packets);
7329         ESTAT_ADD(rx_mcast_packets);
7330         ESTAT_ADD(rx_bcast_packets);
7331         ESTAT_ADD(rx_fcs_errors);
7332         ESTAT_ADD(rx_align_errors);
7333         ESTAT_ADD(rx_xon_pause_rcvd);
7334         ESTAT_ADD(rx_xoff_pause_rcvd);
7335         ESTAT_ADD(rx_mac_ctrl_rcvd);
7336         ESTAT_ADD(rx_xoff_entered);
7337         ESTAT_ADD(rx_frame_too_long_errors);
7338         ESTAT_ADD(rx_jabbers);
7339         ESTAT_ADD(rx_undersize_packets);
7340         ESTAT_ADD(rx_in_length_errors);
7341         ESTAT_ADD(rx_out_length_errors);
7342         ESTAT_ADD(rx_64_or_less_octet_packets);
7343         ESTAT_ADD(rx_65_to_127_octet_packets);
7344         ESTAT_ADD(rx_128_to_255_octet_packets);
7345         ESTAT_ADD(rx_256_to_511_octet_packets);
7346         ESTAT_ADD(rx_512_to_1023_octet_packets);
7347         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7348         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7349         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7350         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7351         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7352
7353         ESTAT_ADD(tx_octets);
7354         ESTAT_ADD(tx_collisions);
7355         ESTAT_ADD(tx_xon_sent);
7356         ESTAT_ADD(tx_xoff_sent);
7357         ESTAT_ADD(tx_flow_control);
7358         ESTAT_ADD(tx_mac_errors);
7359         ESTAT_ADD(tx_single_collisions);
7360         ESTAT_ADD(tx_mult_collisions);
7361         ESTAT_ADD(tx_deferred);
7362         ESTAT_ADD(tx_excessive_collisions);
7363         ESTAT_ADD(tx_late_collisions);
7364         ESTAT_ADD(tx_collide_2times);
7365         ESTAT_ADD(tx_collide_3times);
7366         ESTAT_ADD(tx_collide_4times);
7367         ESTAT_ADD(tx_collide_5times);
7368         ESTAT_ADD(tx_collide_6times);
7369         ESTAT_ADD(tx_collide_7times);
7370         ESTAT_ADD(tx_collide_8times);
7371         ESTAT_ADD(tx_collide_9times);
7372         ESTAT_ADD(tx_collide_10times);
7373         ESTAT_ADD(tx_collide_11times);
7374         ESTAT_ADD(tx_collide_12times);
7375         ESTAT_ADD(tx_collide_13times);
7376         ESTAT_ADD(tx_collide_14times);
7377         ESTAT_ADD(tx_collide_15times);
7378         ESTAT_ADD(tx_ucast_packets);
7379         ESTAT_ADD(tx_mcast_packets);
7380         ESTAT_ADD(tx_bcast_packets);
7381         ESTAT_ADD(tx_carrier_sense_errors);
7382         ESTAT_ADD(tx_discards);
7383         ESTAT_ADD(tx_errors);
7384
7385         ESTAT_ADD(dma_writeq_full);
7386         ESTAT_ADD(dma_write_prioq_full);
7387         ESTAT_ADD(rxbds_empty);
7388         ESTAT_ADD(rx_discards);
7389         ESTAT_ADD(rx_errors);
7390         ESTAT_ADD(rx_threshold_hit);
7391
7392         ESTAT_ADD(dma_readq_full);
7393         ESTAT_ADD(dma_read_prioq_full);
7394         ESTAT_ADD(tx_comp_queue_full);
7395
7396         ESTAT_ADD(ring_set_send_prod_index);
7397         ESTAT_ADD(ring_status_update);
7398         ESTAT_ADD(nic_irqs);
7399         ESTAT_ADD(nic_avoided_irqs);
7400         ESTAT_ADD(nic_tx_threshold_hit);
7401
7402         return estats;
7403 }
7404
7405 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7406 {
7407         struct tg3 *tp = netdev_priv(dev);
7408         struct net_device_stats *stats = &tp->net_stats;
7409         struct net_device_stats *old_stats = &tp->net_stats_prev;
7410         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7411
7412         if (!hw_stats)
7413                 return old_stats;
7414
7415         stats->rx_packets = old_stats->rx_packets +
7416                 get_stat64(&hw_stats->rx_ucast_packets) +
7417                 get_stat64(&hw_stats->rx_mcast_packets) +
7418                 get_stat64(&hw_stats->rx_bcast_packets);
7419
7420         stats->tx_packets = old_stats->tx_packets +
7421                 get_stat64(&hw_stats->tx_ucast_packets) +
7422                 get_stat64(&hw_stats->tx_mcast_packets) +
7423                 get_stat64(&hw_stats->tx_bcast_packets);
7424
7425         stats->rx_bytes = old_stats->rx_bytes +
7426                 get_stat64(&hw_stats->rx_octets);
7427         stats->tx_bytes = old_stats->tx_bytes +
7428                 get_stat64(&hw_stats->tx_octets);
7429
7430         stats->rx_errors = old_stats->rx_errors +
7431                 get_stat64(&hw_stats->rx_errors);
7432         stats->tx_errors = old_stats->tx_errors +
7433                 get_stat64(&hw_stats->tx_errors) +
7434                 get_stat64(&hw_stats->tx_mac_errors) +
7435                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7436                 get_stat64(&hw_stats->tx_discards);
7437
7438         stats->multicast = old_stats->multicast +
7439                 get_stat64(&hw_stats->rx_mcast_packets);
7440         stats->collisions = old_stats->collisions +
7441                 get_stat64(&hw_stats->tx_collisions);
7442
7443         stats->rx_length_errors = old_stats->rx_length_errors +
7444                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7445                 get_stat64(&hw_stats->rx_undersize_packets);
7446
7447         stats->rx_over_errors = old_stats->rx_over_errors +
7448                 get_stat64(&hw_stats->rxbds_empty);
7449         stats->rx_frame_errors = old_stats->rx_frame_errors +
7450                 get_stat64(&hw_stats->rx_align_errors);
7451         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7452                 get_stat64(&hw_stats->tx_discards);
7453         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7454                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7455
7456         stats->rx_crc_errors = old_stats->rx_crc_errors +
7457                 calc_crc_errors(tp);
7458
7459         stats->rx_missed_errors = old_stats->rx_missed_errors +
7460                 get_stat64(&hw_stats->rx_discards);
7461
7462         return stats;
7463 }
7464
7465 static inline u32 calc_crc(unsigned char *buf, int len)
7466 {
7467         u32 reg;
7468         u32 tmp;
7469         int j, k;
7470
7471         reg = 0xffffffff;
7472
7473         for (j = 0; j < len; j++) {
7474                 reg ^= buf[j];
7475
7476                 for (k = 0; k < 8; k++) {
7477                         tmp = reg & 0x01;
7478
7479                         reg >>= 1;
7480
7481                         if (tmp) {
7482                                 reg ^= 0xedb88320;
7483                         }
7484                 }
7485         }
7486
7487         return ~reg;
7488 }
7489
7490 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7491 {
7492         /* accept or reject all multicast frames */
7493         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7494         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7495         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7496         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7497 }
7498
7499 static void __tg3_set_rx_mode(struct net_device *dev)
7500 {
7501         struct tg3 *tp = netdev_priv(dev);
7502         u32 rx_mode;
7503
7504         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7505                                   RX_MODE_KEEP_VLAN_TAG);
7506
7507         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7508          * flag clear.
7509          */
7510 #if TG3_VLAN_TAG_USED
7511         if (!tp->vlgrp &&
7512             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7513                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7514 #else
7515         /* By definition, VLAN is disabled always in this
7516          * case.
7517          */
7518         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7519                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7520 #endif
7521
7522         if (dev->flags & IFF_PROMISC) {
7523                 /* Promiscuous mode. */
7524                 rx_mode |= RX_MODE_PROMISC;
7525         } else if (dev->flags & IFF_ALLMULTI) {
7526                 /* Accept all multicast. */
7527                 tg3_set_multi (tp, 1);
7528         } else if (dev->mc_count < 1) {
7529                 /* Reject all multicast. */
7530                 tg3_set_multi (tp, 0);
7531         } else {
7532                 /* Accept one or more multicast(s). */
7533                 struct dev_mc_list *mclist;
7534                 unsigned int i;
7535                 u32 mc_filter[4] = { 0, };
7536                 u32 regidx;
7537                 u32 bit;
7538                 u32 crc;
7539
7540                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7541                      i++, mclist = mclist->next) {
7542
7543                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7544                         bit = ~crc & 0x7f;
7545                         regidx = (bit & 0x60) >> 5;
7546                         bit &= 0x1f;
7547                         mc_filter[regidx] |= (1 << bit);
7548                 }
7549
7550                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7551                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7552                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7553                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7554         }
7555
7556         if (rx_mode != tp->rx_mode) {
7557                 tp->rx_mode = rx_mode;
7558                 tw32_f(MAC_RX_MODE, rx_mode);
7559                 udelay(10);
7560         }
7561 }
7562
7563 static void tg3_set_rx_mode(struct net_device *dev)
7564 {
7565         struct tg3 *tp = netdev_priv(dev);
7566
7567         if (!netif_running(dev))
7568                 return;
7569
7570         tg3_full_lock(tp, 0);
7571         __tg3_set_rx_mode(dev);
7572         tg3_full_unlock(tp);
7573 }
7574
7575 #define TG3_REGDUMP_LEN         (32 * 1024)
7576
7577 static int tg3_get_regs_len(struct net_device *dev)
7578 {
7579         return TG3_REGDUMP_LEN;
7580 }
7581
7582 static void tg3_get_regs(struct net_device *dev,
7583                 struct ethtool_regs *regs, void *_p)
7584 {
7585         u32 *p = _p;
7586         struct tg3 *tp = netdev_priv(dev);
7587         u8 *orig_p = _p;
7588         int i;
7589
7590         regs->version = 0;
7591
7592         memset(p, 0, TG3_REGDUMP_LEN);
7593
7594         if (tp->link_config.phy_is_low_power)
7595                 return;
7596
7597         tg3_full_lock(tp, 0);
7598
7599 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7600 #define GET_REG32_LOOP(base,len)                \
7601 do {    p = (u32 *)(orig_p + (base));           \
7602         for (i = 0; i < len; i += 4)            \
7603                 __GET_REG32((base) + i);        \
7604 } while (0)
7605 #define GET_REG32_1(reg)                        \
7606 do {    p = (u32 *)(orig_p + (reg));            \
7607         __GET_REG32((reg));                     \
7608 } while (0)
7609
7610         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7611         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7612         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7613         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7614         GET_REG32_1(SNDDATAC_MODE);
7615         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7616         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7617         GET_REG32_1(SNDBDC_MODE);
7618         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7619         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7620         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7621         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7622         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7623         GET_REG32_1(RCVDCC_MODE);
7624         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7625         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7626         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7627         GET_REG32_1(MBFREE_MODE);
7628         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7629         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7630         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7631         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7632         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7633         GET_REG32_1(RX_CPU_MODE);
7634         GET_REG32_1(RX_CPU_STATE);
7635         GET_REG32_1(RX_CPU_PGMCTR);
7636         GET_REG32_1(RX_CPU_HWBKPT);
7637         GET_REG32_1(TX_CPU_MODE);
7638         GET_REG32_1(TX_CPU_STATE);
7639         GET_REG32_1(TX_CPU_PGMCTR);
7640         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7641         GET_REG32_LOOP(FTQ_RESET, 0x120);
7642         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7643         GET_REG32_1(DMAC_MODE);
7644         GET_REG32_LOOP(GRC_MODE, 0x4c);
7645         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7646                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7647
7648 #undef __GET_REG32
7649 #undef GET_REG32_LOOP
7650 #undef GET_REG32_1
7651
7652         tg3_full_unlock(tp);
7653 }
7654
7655 static int tg3_get_eeprom_len(struct net_device *dev)
7656 {
7657         struct tg3 *tp = netdev_priv(dev);
7658
7659         return tp->nvram_size;
7660 }
7661
7662 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7663 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7664
7665 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7666 {
7667         struct tg3 *tp = netdev_priv(dev);
7668         int ret;
7669         u8  *pd;
7670         u32 i, offset, len, val, b_offset, b_count;
7671
7672         if (tp->link_config.phy_is_low_power)
7673                 return -EAGAIN;
7674
7675         offset = eeprom->offset;
7676         len = eeprom->len;
7677         eeprom->len = 0;
7678
7679         eeprom->magic = TG3_EEPROM_MAGIC;
7680
7681         if (offset & 3) {
7682                 /* adjustments to start on required 4 byte boundary */
7683                 b_offset = offset & 3;
7684                 b_count = 4 - b_offset;
7685                 if (b_count > len) {
7686                         /* i.e. offset=1 len=2 */
7687                         b_count = len;
7688                 }
7689                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7690                 if (ret)
7691                         return ret;
7692                 val = cpu_to_le32(val);
7693                 memcpy(data, ((char*)&val) + b_offset, b_count);
7694                 len -= b_count;
7695                 offset += b_count;
7696                 eeprom->len += b_count;
7697         }
7698
7699         /* read bytes upto the last 4 byte boundary */
7700         pd = &data[eeprom->len];
7701         for (i = 0; i < (len - (len & 3)); i += 4) {
7702                 ret = tg3_nvram_read(tp, offset + i, &val);
7703                 if (ret) {
7704                         eeprom->len += i;
7705                         return ret;
7706                 }
7707                 val = cpu_to_le32(val);
7708                 memcpy(pd + i, &val, 4);
7709         }
7710         eeprom->len += i;
7711
7712         if (len & 3) {
7713                 /* read last bytes not ending on 4 byte boundary */
7714                 pd = &data[eeprom->len];
7715                 b_count = len & 3;
7716                 b_offset = offset + len - b_count;
7717                 ret = tg3_nvram_read(tp, b_offset, &val);
7718                 if (ret)
7719                         return ret;
7720                 val = cpu_to_le32(val);
7721                 memcpy(pd, ((char*)&val), b_count);
7722                 eeprom->len += b_count;
7723         }
7724         return 0;
7725 }
7726
7727 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7728
7729 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7730 {
7731         struct tg3 *tp = netdev_priv(dev);
7732         int ret;
7733         u32 offset, len, b_offset, odd_len, start, end;
7734         u8 *buf;
7735
7736         if (tp->link_config.phy_is_low_power)
7737                 return -EAGAIN;
7738
7739         if (eeprom->magic != TG3_EEPROM_MAGIC)
7740                 return -EINVAL;
7741
7742         offset = eeprom->offset;
7743         len = eeprom->len;
7744
7745         if ((b_offset = (offset & 3))) {
7746                 /* adjustments to start on required 4 byte boundary */
7747                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7748                 if (ret)
7749                         return ret;
7750                 start = cpu_to_le32(start);
7751                 len += b_offset;
7752                 offset &= ~3;
7753                 if (len < 4)
7754                         len = 4;
7755         }
7756
7757         odd_len = 0;
7758         if (len & 3) {
7759                 /* adjustments to end on required 4 byte boundary */
7760                 odd_len = 1;
7761                 len = (len + 3) & ~3;
7762                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7763                 if (ret)
7764                         return ret;
7765                 end = cpu_to_le32(end);
7766         }
7767
7768         buf = data;
7769         if (b_offset || odd_len) {
7770                 buf = kmalloc(len, GFP_KERNEL);
7771                 if (buf == 0)
7772                         return -ENOMEM;
7773                 if (b_offset)
7774                         memcpy(buf, &start, 4);
7775                 if (odd_len)
7776                         memcpy(buf+len-4, &end, 4);
7777                 memcpy(buf + b_offset, data, eeprom->len);
7778         }
7779
7780         ret = tg3_nvram_write_block(tp, offset, len, buf);
7781
7782         if (buf != data)
7783                 kfree(buf);
7784
7785         return ret;
7786 }
7787
7788 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7789 {
7790         struct tg3 *tp = netdev_priv(dev);
7791
7792         cmd->supported = (SUPPORTED_Autoneg);
7793
7794         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7795                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7796                                    SUPPORTED_1000baseT_Full);
7797
7798         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7799                 cmd->supported |= (SUPPORTED_100baseT_Half |
7800                                   SUPPORTED_100baseT_Full |
7801                                   SUPPORTED_10baseT_Half |
7802                                   SUPPORTED_10baseT_Full |
7803                                   SUPPORTED_MII);
7804                 cmd->port = PORT_TP;
7805         } else {
7806                 cmd->supported |= SUPPORTED_FIBRE;
7807                 cmd->port = PORT_FIBRE;
7808         }
7809
7810         cmd->advertising = tp->link_config.advertising;
7811         if (netif_running(dev)) {
7812                 cmd->speed = tp->link_config.active_speed;
7813                 cmd->duplex = tp->link_config.active_duplex;
7814         }
7815         cmd->phy_address = PHY_ADDR;
7816         cmd->transceiver = 0;
7817         cmd->autoneg = tp->link_config.autoneg;
7818         cmd->maxtxpkt = 0;
7819         cmd->maxrxpkt = 0;
7820         return 0;
7821 }
7822
7823 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7824 {
7825         struct tg3 *tp = netdev_priv(dev);
7826
7827         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7828                 /* These are the only valid advertisement bits allowed.  */
7829                 if (cmd->autoneg == AUTONEG_ENABLE &&
7830                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7831                                           ADVERTISED_1000baseT_Full |
7832                                           ADVERTISED_Autoneg |
7833                                           ADVERTISED_FIBRE)))
7834                         return -EINVAL;
7835                 /* Fiber can only do SPEED_1000.  */
7836                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7837                          (cmd->speed != SPEED_1000))
7838                         return -EINVAL;
7839         /* Copper cannot force SPEED_1000.  */
7840         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7841                    (cmd->speed == SPEED_1000))
7842                 return -EINVAL;
7843         else if ((cmd->speed == SPEED_1000) &&
7844                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7845                 return -EINVAL;
7846
7847         tg3_full_lock(tp, 0);
7848
7849         tp->link_config.autoneg = cmd->autoneg;
7850         if (cmd->autoneg == AUTONEG_ENABLE) {
7851                 tp->link_config.advertising = cmd->advertising;
7852                 tp->link_config.speed = SPEED_INVALID;
7853                 tp->link_config.duplex = DUPLEX_INVALID;
7854         } else {
7855                 tp->link_config.advertising = 0;
7856                 tp->link_config.speed = cmd->speed;
7857                 tp->link_config.duplex = cmd->duplex;
7858         }
7859
7860         if (netif_running(dev))
7861                 tg3_setup_phy(tp, 1);
7862
7863         tg3_full_unlock(tp);
7864
7865         return 0;
7866 }
7867
7868 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7869 {
7870         struct tg3 *tp = netdev_priv(dev);
7871
7872         strcpy(info->driver, DRV_MODULE_NAME);
7873         strcpy(info->version, DRV_MODULE_VERSION);
7874         strcpy(info->fw_version, tp->fw_ver);
7875         strcpy(info->bus_info, pci_name(tp->pdev));
7876 }
7877
7878 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7879 {
7880         struct tg3 *tp = netdev_priv(dev);
7881
7882         wol->supported = WAKE_MAGIC;
7883         wol->wolopts = 0;
7884         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7885                 wol->wolopts = WAKE_MAGIC;
7886         memset(&wol->sopass, 0, sizeof(wol->sopass));
7887 }
7888
7889 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7890 {
7891         struct tg3 *tp = netdev_priv(dev);
7892
7893         if (wol->wolopts & ~WAKE_MAGIC)
7894                 return -EINVAL;
7895         if ((wol->wolopts & WAKE_MAGIC) &&
7896             tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
7897             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7898                 return -EINVAL;
7899
7900         spin_lock_bh(&tp->lock);
7901         if (wol->wolopts & WAKE_MAGIC)
7902                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7903         else
7904                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7905         spin_unlock_bh(&tp->lock);
7906
7907         return 0;
7908 }
7909
7910 static u32 tg3_get_msglevel(struct net_device *dev)
7911 {
7912         struct tg3 *tp = netdev_priv(dev);
7913         return tp->msg_enable;
7914 }
7915
7916 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7917 {
7918         struct tg3 *tp = netdev_priv(dev);
7919         tp->msg_enable = value;
7920 }
7921
7922 #if TG3_TSO_SUPPORT != 0
7923 static int tg3_set_tso(struct net_device *dev, u32 value)
7924 {
7925         struct tg3 *tp = netdev_priv(dev);
7926
7927         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7928                 if (value)
7929                         return -EINVAL;
7930                 return 0;
7931         }
7932         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7933                 if (value)
7934                         dev->features |= NETIF_F_TSO6;
7935                 else
7936                         dev->features &= ~NETIF_F_TSO6;
7937         }
7938         return ethtool_op_set_tso(dev, value);
7939 }
7940 #endif
7941
7942 static int tg3_nway_reset(struct net_device *dev)
7943 {
7944         struct tg3 *tp = netdev_priv(dev);
7945         u32 bmcr;
7946         int r;
7947
7948         if (!netif_running(dev))
7949                 return -EAGAIN;
7950
7951         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7952                 return -EINVAL;
7953
7954         spin_lock_bh(&tp->lock);
7955         r = -EINVAL;
7956         tg3_readphy(tp, MII_BMCR, &bmcr);
7957         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7958             ((bmcr & BMCR_ANENABLE) ||
7959              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7960                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7961                                            BMCR_ANENABLE);
7962                 r = 0;
7963         }
7964         spin_unlock_bh(&tp->lock);
7965
7966         return r;
7967 }
7968
7969 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7970 {
7971         struct tg3 *tp = netdev_priv(dev);
7972
7973         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7974         ering->rx_mini_max_pending = 0;
7975         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7976                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7977         else
7978                 ering->rx_jumbo_max_pending = 0;
7979
7980         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7981
7982         ering->rx_pending = tp->rx_pending;
7983         ering->rx_mini_pending = 0;
7984         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7985                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7986         else
7987                 ering->rx_jumbo_pending = 0;
7988
7989         ering->tx_pending = tp->tx_pending;
7990 }
7991
7992 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7993 {
7994         struct tg3 *tp = netdev_priv(dev);
7995         int irq_sync = 0, err = 0;
7996
7997         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7998             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7999             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
8000                 return -EINVAL;
8001
8002         if (netif_running(dev)) {
8003                 tg3_netif_stop(tp);
8004                 irq_sync = 1;
8005         }
8006
8007         tg3_full_lock(tp, irq_sync);
8008
8009         tp->rx_pending = ering->rx_pending;
8010
8011         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8012             tp->rx_pending > 63)
8013                 tp->rx_pending = 63;
8014         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8015         tp->tx_pending = ering->tx_pending;
8016
8017         if (netif_running(dev)) {
8018                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8019                 err = tg3_restart_hw(tp, 1);
8020                 if (!err)
8021                         tg3_netif_start(tp);
8022         }
8023
8024         tg3_full_unlock(tp);
8025
8026         return err;
8027 }
8028
8029 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8030 {
8031         struct tg3 *tp = netdev_priv(dev);
8032
8033         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8034         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8035         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8036 }
8037
8038 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8039 {
8040         struct tg3 *tp = netdev_priv(dev);
8041         int irq_sync = 0, err = 0;
8042
8043         if (netif_running(dev)) {
8044                 tg3_netif_stop(tp);
8045                 irq_sync = 1;
8046         }
8047
8048         tg3_full_lock(tp, irq_sync);
8049
8050         if (epause->autoneg)
8051                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8052         else
8053                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8054         if (epause->rx_pause)
8055                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8056         else
8057                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8058         if (epause->tx_pause)
8059                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8060         else
8061                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8062
8063         if (netif_running(dev)) {
8064                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8065                 err = tg3_restart_hw(tp, 1);
8066                 if (!err)
8067                         tg3_netif_start(tp);
8068         }
8069
8070         tg3_full_unlock(tp);
8071
8072         return err;
8073 }
8074
8075 static u32 tg3_get_rx_csum(struct net_device *dev)
8076 {
8077         struct tg3 *tp = netdev_priv(dev);
8078         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8079 }
8080
8081 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8082 {
8083         struct tg3 *tp = netdev_priv(dev);
8084
8085         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8086                 if (data != 0)
8087                         return -EINVAL;
8088                 return 0;
8089         }
8090
8091         spin_lock_bh(&tp->lock);
8092         if (data)
8093                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8094         else
8095                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8096         spin_unlock_bh(&tp->lock);
8097
8098         return 0;
8099 }
8100
8101 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8102 {
8103         struct tg3 *tp = netdev_priv(dev);
8104
8105         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8106                 if (data != 0)
8107                         return -EINVAL;
8108                 return 0;
8109         }
8110
8111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8113                 ethtool_op_set_tx_hw_csum(dev, data);
8114         else
8115                 ethtool_op_set_tx_csum(dev, data);
8116
8117         return 0;
8118 }
8119
8120 static int tg3_get_stats_count (struct net_device *dev)
8121 {
8122         return TG3_NUM_STATS;
8123 }
8124
8125 static int tg3_get_test_count (struct net_device *dev)
8126 {
8127         return TG3_NUM_TEST;
8128 }
8129
8130 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8131 {
8132         switch (stringset) {
8133         case ETH_SS_STATS:
8134                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8135                 break;
8136         case ETH_SS_TEST:
8137                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8138                 break;
8139         default:
8140                 WARN_ON(1);     /* we need a WARN() */
8141                 break;
8142         }
8143 }
8144
8145 static int tg3_phys_id(struct net_device *dev, u32 data)
8146 {
8147         struct tg3 *tp = netdev_priv(dev);
8148         int i;
8149
8150         if (!netif_running(tp->dev))
8151                 return -EAGAIN;
8152
8153         if (data == 0)
8154                 data = 2;
8155
8156         for (i = 0; i < (data * 2); i++) {
8157                 if ((i % 2) == 0)
8158                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8159                                            LED_CTRL_1000MBPS_ON |
8160                                            LED_CTRL_100MBPS_ON |
8161                                            LED_CTRL_10MBPS_ON |
8162                                            LED_CTRL_TRAFFIC_OVERRIDE |
8163                                            LED_CTRL_TRAFFIC_BLINK |
8164                                            LED_CTRL_TRAFFIC_LED);
8165
8166                 else
8167                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8168                                            LED_CTRL_TRAFFIC_OVERRIDE);
8169
8170                 if (msleep_interruptible(500))
8171                         break;
8172         }
8173         tw32(MAC_LED_CTRL, tp->led_ctrl);
8174         return 0;
8175 }
8176
8177 static void tg3_get_ethtool_stats (struct net_device *dev,
8178                                    struct ethtool_stats *estats, u64 *tmp_stats)
8179 {
8180         struct tg3 *tp = netdev_priv(dev);
8181         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8182 }
8183
8184 #define NVRAM_TEST_SIZE 0x100
8185 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8186
8187 static int tg3_test_nvram(struct tg3 *tp)
8188 {
8189         u32 *buf, csum, magic;
8190         int i, j, err = 0, size;
8191
8192         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8193                 return -EIO;
8194
8195         if (magic == TG3_EEPROM_MAGIC)
8196                 size = NVRAM_TEST_SIZE;
8197         else if ((magic & 0xff000000) == 0xa5000000) {
8198                 if ((magic & 0xe00000) == 0x200000)
8199                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8200                 else
8201                         return 0;
8202         } else
8203                 return -EIO;
8204
8205         buf = kmalloc(size, GFP_KERNEL);
8206         if (buf == NULL)
8207                 return -ENOMEM;
8208
8209         err = -EIO;
8210         for (i = 0, j = 0; i < size; i += 4, j++) {
8211                 u32 val;
8212
8213                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8214                         break;
8215                 buf[j] = cpu_to_le32(val);
8216         }
8217         if (i < size)
8218                 goto out;
8219
8220         /* Selfboot format */
8221         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8222                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8223
8224                 for (i = 0; i < size; i++)
8225                         csum8 += buf8[i];
8226
8227                 if (csum8 == 0) {
8228                         err = 0;
8229                         goto out;
8230                 }
8231
8232                 err = -EIO;
8233                 goto out;
8234         }
8235
8236         /* Bootstrap checksum at offset 0x10 */
8237         csum = calc_crc((unsigned char *) buf, 0x10);
8238         if(csum != cpu_to_le32(buf[0x10/4]))
8239                 goto out;
8240
8241         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8242         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8243         if (csum != cpu_to_le32(buf[0xfc/4]))
8244                  goto out;
8245
8246         err = 0;
8247
8248 out:
8249         kfree(buf);
8250         return err;
8251 }
8252
8253 #define TG3_SERDES_TIMEOUT_SEC  2
8254 #define TG3_COPPER_TIMEOUT_SEC  6
8255
8256 static int tg3_test_link(struct tg3 *tp)
8257 {
8258         int i, max;
8259
8260         if (!netif_running(tp->dev))
8261                 return -ENODEV;
8262
8263         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8264                 max = TG3_SERDES_TIMEOUT_SEC;
8265         else
8266                 max = TG3_COPPER_TIMEOUT_SEC;
8267
8268         for (i = 0; i < max; i++) {
8269                 if (netif_carrier_ok(tp->dev))
8270                         return 0;
8271
8272                 if (msleep_interruptible(1000))
8273                         break;
8274         }
8275
8276         return -EIO;
8277 }
8278
8279 /* Only test the commonly used registers */
8280 static int tg3_test_registers(struct tg3 *tp)
8281 {
8282         int i, is_5705;
8283         u32 offset, read_mask, write_mask, val, save_val, read_val;
8284         static struct {
8285                 u16 offset;
8286                 u16 flags;
8287 #define TG3_FL_5705     0x1
8288 #define TG3_FL_NOT_5705 0x2
8289 #define TG3_FL_NOT_5788 0x4
8290                 u32 read_mask;
8291                 u32 write_mask;
8292         } reg_tbl[] = {
8293                 /* MAC Control Registers */
8294                 { MAC_MODE, TG3_FL_NOT_5705,
8295                         0x00000000, 0x00ef6f8c },
8296                 { MAC_MODE, TG3_FL_5705,
8297                         0x00000000, 0x01ef6b8c },
8298                 { MAC_STATUS, TG3_FL_NOT_5705,
8299                         0x03800107, 0x00000000 },
8300                 { MAC_STATUS, TG3_FL_5705,
8301                         0x03800100, 0x00000000 },
8302                 { MAC_ADDR_0_HIGH, 0x0000,
8303                         0x00000000, 0x0000ffff },
8304                 { MAC_ADDR_0_LOW, 0x0000,
8305                         0x00000000, 0xffffffff },
8306                 { MAC_RX_MTU_SIZE, 0x0000,
8307                         0x00000000, 0x0000ffff },
8308                 { MAC_TX_MODE, 0x0000,
8309                         0x00000000, 0x00000070 },
8310                 { MAC_TX_LENGTHS, 0x0000,
8311                         0x00000000, 0x00003fff },
8312                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8313                         0x00000000, 0x000007fc },
8314                 { MAC_RX_MODE, TG3_FL_5705,
8315                         0x00000000, 0x000007dc },
8316                 { MAC_HASH_REG_0, 0x0000,
8317                         0x00000000, 0xffffffff },
8318                 { MAC_HASH_REG_1, 0x0000,
8319                         0x00000000, 0xffffffff },
8320                 { MAC_HASH_REG_2, 0x0000,
8321                         0x00000000, 0xffffffff },
8322                 { MAC_HASH_REG_3, 0x0000,
8323                         0x00000000, 0xffffffff },
8324
8325                 /* Receive Data and Receive BD Initiator Control Registers. */
8326                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8327                         0x00000000, 0xffffffff },
8328                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8329                         0x00000000, 0xffffffff },
8330                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8331                         0x00000000, 0x00000003 },
8332                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8333                         0x00000000, 0xffffffff },
8334                 { RCVDBDI_STD_BD+0, 0x0000,
8335                         0x00000000, 0xffffffff },
8336                 { RCVDBDI_STD_BD+4, 0x0000,
8337                         0x00000000, 0xffffffff },
8338                 { RCVDBDI_STD_BD+8, 0x0000,
8339                         0x00000000, 0xffff0002 },
8340                 { RCVDBDI_STD_BD+0xc, 0x0000,
8341                         0x00000000, 0xffffffff },
8342
8343                 /* Receive BD Initiator Control Registers. */
8344                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8345                         0x00000000, 0xffffffff },
8346                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8347                         0x00000000, 0x000003ff },
8348                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8349                         0x00000000, 0xffffffff },
8350
8351                 /* Host Coalescing Control Registers. */
8352                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8353                         0x00000000, 0x00000004 },
8354                 { HOSTCC_MODE, TG3_FL_5705,
8355                         0x00000000, 0x000000f6 },
8356                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8357                         0x00000000, 0xffffffff },
8358                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8359                         0x00000000, 0x000003ff },
8360                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8361                         0x00000000, 0xffffffff },
8362                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8363                         0x00000000, 0x000003ff },
8364                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8365                         0x00000000, 0xffffffff },
8366                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8367                         0x00000000, 0x000000ff },
8368                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8369                         0x00000000, 0xffffffff },
8370                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8371                         0x00000000, 0x000000ff },
8372                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8373                         0x00000000, 0xffffffff },
8374                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8375                         0x00000000, 0xffffffff },
8376                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8377                         0x00000000, 0xffffffff },
8378                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8379                         0x00000000, 0x000000ff },
8380                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8381                         0x00000000, 0xffffffff },
8382                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8383                         0x00000000, 0x000000ff },
8384                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8385                         0x00000000, 0xffffffff },
8386                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8387                         0x00000000, 0xffffffff },
8388                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8389                         0x00000000, 0xffffffff },
8390                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8391                         0x00000000, 0xffffffff },
8392                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8393                         0x00000000, 0xffffffff },
8394                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8395                         0xffffffff, 0x00000000 },
8396                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8397                         0xffffffff, 0x00000000 },
8398
8399                 /* Buffer Manager Control Registers. */
8400                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8401                         0x00000000, 0x007fff80 },
8402                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8403                         0x00000000, 0x007fffff },
8404                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8405                         0x00000000, 0x0000003f },
8406                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8407                         0x00000000, 0x000001ff },
8408                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8409                         0x00000000, 0x000001ff },
8410                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8411                         0xffffffff, 0x00000000 },
8412                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8413                         0xffffffff, 0x00000000 },
8414
8415                 /* Mailbox Registers */
8416                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8417                         0x00000000, 0x000001ff },
8418                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8419                         0x00000000, 0x000001ff },
8420                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8421                         0x00000000, 0x000007ff },
8422                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8423                         0x00000000, 0x000001ff },
8424
8425                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8426         };
8427
8428         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8429                 is_5705 = 1;
8430         else
8431                 is_5705 = 0;
8432
8433         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8434                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8435                         continue;
8436
8437                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8438                         continue;
8439
8440                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8441                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8442                         continue;
8443
8444                 offset = (u32) reg_tbl[i].offset;
8445                 read_mask = reg_tbl[i].read_mask;
8446                 write_mask = reg_tbl[i].write_mask;
8447
8448                 /* Save the original register content */
8449                 save_val = tr32(offset);
8450
8451                 /* Determine the read-only value. */
8452                 read_val = save_val & read_mask;
8453
8454                 /* Write zero to the register, then make sure the read-only bits
8455                  * are not changed and the read/write bits are all zeros.
8456                  */
8457                 tw32(offset, 0);
8458
8459                 val = tr32(offset);
8460
8461                 /* Test the read-only and read/write bits. */
8462                 if (((val & read_mask) != read_val) || (val & write_mask))
8463                         goto out;
8464
8465                 /* Write ones to all the bits defined by RdMask and WrMask, then
8466                  * make sure the read-only bits are not changed and the
8467                  * read/write bits are all ones.
8468                  */
8469                 tw32(offset, read_mask | write_mask);
8470
8471                 val = tr32(offset);
8472
8473                 /* Test the read-only bits. */
8474                 if ((val & read_mask) != read_val)
8475                         goto out;
8476
8477                 /* Test the read/write bits. */
8478                 if ((val & write_mask) != write_mask)
8479                         goto out;
8480
8481                 tw32(offset, save_val);
8482         }
8483
8484         return 0;
8485
8486 out:
8487         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8488         tw32(offset, save_val);
8489         return -EIO;
8490 }
8491
8492 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8493 {
8494         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8495         int i;
8496         u32 j;
8497
8498         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8499                 for (j = 0; j < len; j += 4) {
8500                         u32 val;
8501
8502                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8503                         tg3_read_mem(tp, offset + j, &val);
8504                         if (val != test_pattern[i])
8505                                 return -EIO;
8506                 }
8507         }
8508         return 0;
8509 }
8510
8511 static int tg3_test_memory(struct tg3 *tp)
8512 {
8513         static struct mem_entry {
8514                 u32 offset;
8515                 u32 len;
8516         } mem_tbl_570x[] = {
8517                 { 0x00000000, 0x00b50},
8518                 { 0x00002000, 0x1c000},
8519                 { 0xffffffff, 0x00000}
8520         }, mem_tbl_5705[] = {
8521                 { 0x00000100, 0x0000c},
8522                 { 0x00000200, 0x00008},
8523                 { 0x00004000, 0x00800},
8524                 { 0x00006000, 0x01000},
8525                 { 0x00008000, 0x02000},
8526                 { 0x00010000, 0x0e000},
8527                 { 0xffffffff, 0x00000}
8528         }, mem_tbl_5755[] = {
8529                 { 0x00000200, 0x00008},
8530                 { 0x00004000, 0x00800},
8531                 { 0x00006000, 0x00800},
8532                 { 0x00008000, 0x02000},
8533                 { 0x00010000, 0x0c000},
8534                 { 0xffffffff, 0x00000}
8535         };
8536         struct mem_entry *mem_tbl;
8537         int err = 0;
8538         int i;
8539
8540         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8541                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8542                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8543                         mem_tbl = mem_tbl_5755;
8544                 else
8545                         mem_tbl = mem_tbl_5705;
8546         } else
8547                 mem_tbl = mem_tbl_570x;
8548
8549         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8550                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8551                     mem_tbl[i].len)) != 0)
8552                         break;
8553         }
8554
8555         return err;
8556 }
8557
8558 #define TG3_MAC_LOOPBACK        0
8559 #define TG3_PHY_LOOPBACK        1
8560
8561 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8562 {
8563         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8564         u32 desc_idx;
8565         struct sk_buff *skb, *rx_skb;
8566         u8 *tx_data;
8567         dma_addr_t map;
8568         int num_pkts, tx_len, rx_len, i, err;
8569         struct tg3_rx_buffer_desc *desc;
8570
8571         if (loopback_mode == TG3_MAC_LOOPBACK) {
8572                 /* HW errata - mac loopback fails in some cases on 5780.
8573                  * Normal traffic and PHY loopback are not affected by
8574                  * errata.
8575                  */
8576                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8577                         return 0;
8578
8579                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8580                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8581                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8582                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8583                 else
8584                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8585                 tw32(MAC_MODE, mac_mode);
8586         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8587                 u32 val;
8588
8589                 val = BMCR_LOOPBACK | BMCR_FULLDPLX;
8590                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8591                         val |= BMCR_SPEED100;
8592                 else
8593                         val |= BMCR_SPEED1000;
8594
8595                 tg3_writephy(tp, MII_BMCR, val);
8596                 udelay(40);
8597                 /* reset to prevent losing 1st rx packet intermittently */
8598                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8599                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8600                         udelay(10);
8601                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8602                 }
8603                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8604                            MAC_MODE_LINK_POLARITY;
8605                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8606                         mac_mode |= MAC_MODE_PORT_MODE_MII;
8607                 else
8608                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
8609                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8610                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8611                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8612                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8613                 }
8614                 tw32(MAC_MODE, mac_mode);
8615         }
8616         else
8617                 return -EINVAL;
8618
8619         err = -EIO;
8620
8621         tx_len = 1514;
8622         skb = netdev_alloc_skb(tp->dev, tx_len);
8623         if (!skb)
8624                 return -ENOMEM;
8625
8626         tx_data = skb_put(skb, tx_len);
8627         memcpy(tx_data, tp->dev->dev_addr, 6);
8628         memset(tx_data + 6, 0x0, 8);
8629
8630         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8631
8632         for (i = 14; i < tx_len; i++)
8633                 tx_data[i] = (u8) (i & 0xff);
8634
8635         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8636
8637         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8638              HOSTCC_MODE_NOW);
8639
8640         udelay(10);
8641
8642         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8643
8644         num_pkts = 0;
8645
8646         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8647
8648         tp->tx_prod++;
8649         num_pkts++;
8650
8651         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8652                      tp->tx_prod);
8653         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8654
8655         udelay(10);
8656
8657         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8658         for (i = 0; i < 25; i++) {
8659                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8660                        HOSTCC_MODE_NOW);
8661
8662                 udelay(10);
8663
8664                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8665                 rx_idx = tp->hw_status->idx[0].rx_producer;
8666                 if ((tx_idx == tp->tx_prod) &&
8667                     (rx_idx == (rx_start_idx + num_pkts)))
8668                         break;
8669         }
8670
8671         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8672         dev_kfree_skb(skb);
8673
8674         if (tx_idx != tp->tx_prod)
8675                 goto out;
8676
8677         if (rx_idx != rx_start_idx + num_pkts)
8678                 goto out;
8679
8680         desc = &tp->rx_rcb[rx_start_idx];
8681         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8682         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8683         if (opaque_key != RXD_OPAQUE_RING_STD)
8684                 goto out;
8685
8686         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8687             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8688                 goto out;
8689
8690         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8691         if (rx_len != tx_len)
8692                 goto out;
8693
8694         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8695
8696         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8697         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8698
8699         for (i = 14; i < tx_len; i++) {
8700                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8701                         goto out;
8702         }
8703         err = 0;
8704
8705         /* tg3_free_rings will unmap and free the rx_skb */
8706 out:
8707         return err;
8708 }
8709
8710 #define TG3_MAC_LOOPBACK_FAILED         1
8711 #define TG3_PHY_LOOPBACK_FAILED         2
8712 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8713                                          TG3_PHY_LOOPBACK_FAILED)
8714
8715 static int tg3_test_loopback(struct tg3 *tp)
8716 {
8717         int err = 0;
8718
8719         if (!netif_running(tp->dev))
8720                 return TG3_LOOPBACK_FAILED;
8721
8722         err = tg3_reset_hw(tp, 1);
8723         if (err)
8724                 return TG3_LOOPBACK_FAILED;
8725
8726         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8727                 err |= TG3_MAC_LOOPBACK_FAILED;
8728         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8729                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8730                         err |= TG3_PHY_LOOPBACK_FAILED;
8731         }
8732
8733         return err;
8734 }
8735
8736 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8737                           u64 *data)
8738 {
8739         struct tg3 *tp = netdev_priv(dev);
8740
8741         if (tp->link_config.phy_is_low_power)
8742                 tg3_set_power_state(tp, PCI_D0);
8743
8744         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8745
8746         if (tg3_test_nvram(tp) != 0) {
8747                 etest->flags |= ETH_TEST_FL_FAILED;
8748                 data[0] = 1;
8749         }
8750         if (tg3_test_link(tp) != 0) {
8751                 etest->flags |= ETH_TEST_FL_FAILED;
8752                 data[1] = 1;
8753         }
8754         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8755                 int err, irq_sync = 0;
8756
8757                 if (netif_running(dev)) {
8758                         tg3_netif_stop(tp);
8759                         irq_sync = 1;
8760                 }
8761
8762                 tg3_full_lock(tp, irq_sync);
8763
8764                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8765                 err = tg3_nvram_lock(tp);
8766                 tg3_halt_cpu(tp, RX_CPU_BASE);
8767                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8768                         tg3_halt_cpu(tp, TX_CPU_BASE);
8769                 if (!err)
8770                         tg3_nvram_unlock(tp);
8771
8772                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8773                         tg3_phy_reset(tp);
8774
8775                 if (tg3_test_registers(tp) != 0) {
8776                         etest->flags |= ETH_TEST_FL_FAILED;
8777                         data[2] = 1;
8778                 }
8779                 if (tg3_test_memory(tp) != 0) {
8780                         etest->flags |= ETH_TEST_FL_FAILED;
8781                         data[3] = 1;
8782                 }
8783                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8784                         etest->flags |= ETH_TEST_FL_FAILED;
8785
8786                 tg3_full_unlock(tp);
8787
8788                 if (tg3_test_interrupt(tp) != 0) {
8789                         etest->flags |= ETH_TEST_FL_FAILED;
8790                         data[5] = 1;
8791                 }
8792
8793                 tg3_full_lock(tp, 0);
8794
8795                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8796                 if (netif_running(dev)) {
8797                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8798                         if (!tg3_restart_hw(tp, 1))
8799                                 tg3_netif_start(tp);
8800                 }
8801
8802                 tg3_full_unlock(tp);
8803         }
8804         if (tp->link_config.phy_is_low_power)
8805                 tg3_set_power_state(tp, PCI_D3hot);
8806
8807 }
8808
8809 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8810 {
8811         struct mii_ioctl_data *data = if_mii(ifr);
8812         struct tg3 *tp = netdev_priv(dev);
8813         int err;
8814
8815         switch(cmd) {
8816         case SIOCGMIIPHY:
8817                 data->phy_id = PHY_ADDR;
8818
8819                 /* fallthru */
8820         case SIOCGMIIREG: {
8821                 u32 mii_regval;
8822
8823                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8824                         break;                  /* We have no PHY */
8825
8826                 if (tp->link_config.phy_is_low_power)
8827                         return -EAGAIN;
8828
8829                 spin_lock_bh(&tp->lock);
8830                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8831                 spin_unlock_bh(&tp->lock);
8832
8833                 data->val_out = mii_regval;
8834
8835                 return err;
8836         }
8837
8838         case SIOCSMIIREG:
8839                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8840                         break;                  /* We have no PHY */
8841
8842                 if (!capable(CAP_NET_ADMIN))
8843                         return -EPERM;
8844
8845                 if (tp->link_config.phy_is_low_power)
8846                         return -EAGAIN;
8847
8848                 spin_lock_bh(&tp->lock);
8849                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8850                 spin_unlock_bh(&tp->lock);
8851
8852                 return err;
8853
8854         default:
8855                 /* do nothing */
8856                 break;
8857         }
8858         return -EOPNOTSUPP;
8859 }
8860
8861 #if TG3_VLAN_TAG_USED
8862 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8863 {
8864         struct tg3 *tp = netdev_priv(dev);
8865
8866         if (netif_running(dev))
8867                 tg3_netif_stop(tp);
8868
8869         tg3_full_lock(tp, 0);
8870
8871         tp->vlgrp = grp;
8872
8873         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8874         __tg3_set_rx_mode(dev);
8875
8876         tg3_full_unlock(tp);
8877
8878         if (netif_running(dev))
8879                 tg3_netif_start(tp);
8880 }
8881
8882 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8883 {
8884         struct tg3 *tp = netdev_priv(dev);
8885
8886         if (netif_running(dev))
8887                 tg3_netif_stop(tp);
8888
8889         tg3_full_lock(tp, 0);
8890         if (tp->vlgrp)
8891                 tp->vlgrp->vlan_devices[vid] = NULL;
8892         tg3_full_unlock(tp);
8893
8894         if (netif_running(dev))
8895                 tg3_netif_start(tp);
8896 }
8897 #endif
8898
8899 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8900 {
8901         struct tg3 *tp = netdev_priv(dev);
8902
8903         memcpy(ec, &tp->coal, sizeof(*ec));
8904         return 0;
8905 }
8906
8907 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8908 {
8909         struct tg3 *tp = netdev_priv(dev);
8910         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8911         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8912
8913         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8914                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8915                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8916                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8917                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8918         }
8919
8920         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8921             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8922             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8923             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8924             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8925             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8926             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8927             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8928             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8929             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8930                 return -EINVAL;
8931
8932         /* No rx interrupts will be generated if both are zero */
8933         if ((ec->rx_coalesce_usecs == 0) &&
8934             (ec->rx_max_coalesced_frames == 0))
8935                 return -EINVAL;
8936
8937         /* No tx interrupts will be generated if both are zero */
8938         if ((ec->tx_coalesce_usecs == 0) &&
8939             (ec->tx_max_coalesced_frames == 0))
8940                 return -EINVAL;
8941
8942         /* Only copy relevant parameters, ignore all others. */
8943         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8944         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8945         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8946         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8947         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8948         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8949         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8950         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8951         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8952
8953         if (netif_running(dev)) {
8954                 tg3_full_lock(tp, 0);
8955                 __tg3_set_coalesce(tp, &tp->coal);
8956                 tg3_full_unlock(tp);
8957         }
8958         return 0;
8959 }
8960
8961 static const struct ethtool_ops tg3_ethtool_ops = {
8962         .get_settings           = tg3_get_settings,
8963         .set_settings           = tg3_set_settings,
8964         .get_drvinfo            = tg3_get_drvinfo,
8965         .get_regs_len           = tg3_get_regs_len,
8966         .get_regs               = tg3_get_regs,
8967         .get_wol                = tg3_get_wol,
8968         .set_wol                = tg3_set_wol,
8969         .get_msglevel           = tg3_get_msglevel,
8970         .set_msglevel           = tg3_set_msglevel,
8971         .nway_reset             = tg3_nway_reset,
8972         .get_link               = ethtool_op_get_link,
8973         .get_eeprom_len         = tg3_get_eeprom_len,
8974         .get_eeprom             = tg3_get_eeprom,
8975         .set_eeprom             = tg3_set_eeprom,
8976         .get_ringparam          = tg3_get_ringparam,
8977         .set_ringparam          = tg3_set_ringparam,
8978         .get_pauseparam         = tg3_get_pauseparam,
8979         .set_pauseparam         = tg3_set_pauseparam,
8980         .get_rx_csum            = tg3_get_rx_csum,
8981         .set_rx_csum            = tg3_set_rx_csum,
8982         .get_tx_csum            = ethtool_op_get_tx_csum,
8983         .set_tx_csum            = tg3_set_tx_csum,
8984         .get_sg                 = ethtool_op_get_sg,
8985         .set_sg                 = ethtool_op_set_sg,
8986 #if TG3_TSO_SUPPORT != 0
8987         .get_tso                = ethtool_op_get_tso,
8988         .set_tso                = tg3_set_tso,
8989 #endif
8990         .self_test_count        = tg3_get_test_count,
8991         .self_test              = tg3_self_test,
8992         .get_strings            = tg3_get_strings,
8993         .phys_id                = tg3_phys_id,
8994         .get_stats_count        = tg3_get_stats_count,
8995         .get_ethtool_stats      = tg3_get_ethtool_stats,
8996         .get_coalesce           = tg3_get_coalesce,
8997         .set_coalesce           = tg3_set_coalesce,
8998         .get_perm_addr          = ethtool_op_get_perm_addr,
8999 };
9000
9001 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9002 {
9003         u32 cursize, val, magic;
9004
9005         tp->nvram_size = EEPROM_CHIP_SIZE;
9006
9007         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9008                 return;
9009
9010         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
9011                 return;
9012
9013         /*
9014          * Size the chip by reading offsets at increasing powers of two.
9015          * When we encounter our validation signature, we know the addressing
9016          * has wrapped around, and thus have our chip size.
9017          */
9018         cursize = 0x10;
9019
9020         while (cursize < tp->nvram_size) {
9021                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9022                         return;
9023
9024                 if (val == magic)
9025                         break;
9026
9027                 cursize <<= 1;
9028         }
9029
9030         tp->nvram_size = cursize;
9031 }
9032
9033 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9034 {
9035         u32 val;
9036
9037         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9038                 return;
9039
9040         /* Selfboot format */
9041         if (val != TG3_EEPROM_MAGIC) {
9042                 tg3_get_eeprom_size(tp);
9043                 return;
9044         }
9045
9046         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9047                 if (val != 0) {
9048                         tp->nvram_size = (val >> 16) * 1024;
9049                         return;
9050                 }
9051         }
9052         tp->nvram_size = 0x20000;
9053 }
9054
9055 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9056 {
9057         u32 nvcfg1;
9058
9059         nvcfg1 = tr32(NVRAM_CFG1);
9060         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9061                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9062         }
9063         else {
9064                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9065                 tw32(NVRAM_CFG1, nvcfg1);
9066         }
9067
9068         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9069             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9070                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9071                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9072                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9073                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9074                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9075                                 break;
9076                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9077                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9078                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9079                                 break;
9080                         case FLASH_VENDOR_ATMEL_EEPROM:
9081                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9082                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9083                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9084                                 break;
9085                         case FLASH_VENDOR_ST:
9086                                 tp->nvram_jedecnum = JEDEC_ST;
9087                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9088                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9089                                 break;
9090                         case FLASH_VENDOR_SAIFUN:
9091                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9092                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9093                                 break;
9094                         case FLASH_VENDOR_SST_SMALL:
9095                         case FLASH_VENDOR_SST_LARGE:
9096                                 tp->nvram_jedecnum = JEDEC_SST;
9097                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9098                                 break;
9099                 }
9100         }
9101         else {
9102                 tp->nvram_jedecnum = JEDEC_ATMEL;
9103                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9104                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9105         }
9106 }
9107
9108 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9109 {
9110         u32 nvcfg1;
9111
9112         nvcfg1 = tr32(NVRAM_CFG1);
9113
9114         /* NVRAM protection for TPM */
9115         if (nvcfg1 & (1 << 27))
9116                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9117
9118         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9119                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9120                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9121                         tp->nvram_jedecnum = JEDEC_ATMEL;
9122                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9123                         break;
9124                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9125                         tp->nvram_jedecnum = JEDEC_ATMEL;
9126                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9127                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9128                         break;
9129                 case FLASH_5752VENDOR_ST_M45PE10:
9130                 case FLASH_5752VENDOR_ST_M45PE20:
9131                 case FLASH_5752VENDOR_ST_M45PE40:
9132                         tp->nvram_jedecnum = JEDEC_ST;
9133                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9134                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9135                         break;
9136         }
9137
9138         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9139                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9140                         case FLASH_5752PAGE_SIZE_256:
9141                                 tp->nvram_pagesize = 256;
9142                                 break;
9143                         case FLASH_5752PAGE_SIZE_512:
9144                                 tp->nvram_pagesize = 512;
9145                                 break;
9146                         case FLASH_5752PAGE_SIZE_1K:
9147                                 tp->nvram_pagesize = 1024;
9148                                 break;
9149                         case FLASH_5752PAGE_SIZE_2K:
9150                                 tp->nvram_pagesize = 2048;
9151                                 break;
9152                         case FLASH_5752PAGE_SIZE_4K:
9153                                 tp->nvram_pagesize = 4096;
9154                                 break;
9155                         case FLASH_5752PAGE_SIZE_264:
9156                                 tp->nvram_pagesize = 264;
9157                                 break;
9158                 }
9159         }
9160         else {
9161                 /* For eeprom, set pagesize to maximum eeprom size */
9162                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9163
9164                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9165                 tw32(NVRAM_CFG1, nvcfg1);
9166         }
9167 }
9168
9169 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9170 {
9171         u32 nvcfg1;
9172
9173         nvcfg1 = tr32(NVRAM_CFG1);
9174
9175         /* NVRAM protection for TPM */
9176         if (nvcfg1 & (1 << 27))
9177                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9178
9179         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9180                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9181                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9182                         tp->nvram_jedecnum = JEDEC_ATMEL;
9183                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9184                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9185
9186                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9187                         tw32(NVRAM_CFG1, nvcfg1);
9188                         break;
9189                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9190                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9191                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9192                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9193                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9194                         tp->nvram_jedecnum = JEDEC_ATMEL;
9195                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9196                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9197                         tp->nvram_pagesize = 264;
9198                         break;
9199                 case FLASH_5752VENDOR_ST_M45PE10:
9200                 case FLASH_5752VENDOR_ST_M45PE20:
9201                 case FLASH_5752VENDOR_ST_M45PE40:
9202                         tp->nvram_jedecnum = JEDEC_ST;
9203                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9204                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9205                         tp->nvram_pagesize = 256;
9206                         break;
9207         }
9208 }
9209
9210 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9211 {
9212         u32 nvcfg1;
9213
9214         nvcfg1 = tr32(NVRAM_CFG1);
9215
9216         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9217                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9218                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9219                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9220                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9221                         tp->nvram_jedecnum = JEDEC_ATMEL;
9222                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9223                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9224
9225                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9226                         tw32(NVRAM_CFG1, nvcfg1);
9227                         break;
9228                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9229                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9230                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9231                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9232                         tp->nvram_jedecnum = JEDEC_ATMEL;
9233                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9234                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9235                         tp->nvram_pagesize = 264;
9236                         break;
9237                 case FLASH_5752VENDOR_ST_M45PE10:
9238                 case FLASH_5752VENDOR_ST_M45PE20:
9239                 case FLASH_5752VENDOR_ST_M45PE40:
9240                         tp->nvram_jedecnum = JEDEC_ST;
9241                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9242                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9243                         tp->nvram_pagesize = 256;
9244                         break;
9245         }
9246 }
9247
9248 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9249 static void __devinit tg3_nvram_init(struct tg3 *tp)
9250 {
9251         int j;
9252
9253         tw32_f(GRC_EEPROM_ADDR,
9254              (EEPROM_ADDR_FSM_RESET |
9255               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9256                EEPROM_ADDR_CLKPERD_SHIFT)));
9257
9258         /* XXX schedule_timeout() ... */
9259         for (j = 0; j < 100; j++)
9260                 udelay(10);
9261
9262         /* Enable seeprom accesses. */
9263         tw32_f(GRC_LOCAL_CTRL,
9264              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9265         udelay(100);
9266
9267         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9268             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9269                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9270
9271                 if (tg3_nvram_lock(tp)) {
9272                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9273                                "tg3_nvram_init failed.\n", tp->dev->name);
9274                         return;
9275                 }
9276                 tg3_enable_nvram_access(tp);
9277
9278                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9279                         tg3_get_5752_nvram_info(tp);
9280                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9281                         tg3_get_5755_nvram_info(tp);
9282                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9283                         tg3_get_5787_nvram_info(tp);
9284                 else
9285                         tg3_get_nvram_info(tp);
9286
9287                 tg3_get_nvram_size(tp);
9288
9289                 tg3_disable_nvram_access(tp);
9290                 tg3_nvram_unlock(tp);
9291
9292         } else {
9293                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9294
9295                 tg3_get_eeprom_size(tp);
9296         }
9297 }
9298
9299 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9300                                         u32 offset, u32 *val)
9301 {
9302         u32 tmp;
9303         int i;
9304
9305         if (offset > EEPROM_ADDR_ADDR_MASK ||
9306             (offset % 4) != 0)
9307                 return -EINVAL;
9308
9309         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9310                                         EEPROM_ADDR_DEVID_MASK |
9311                                         EEPROM_ADDR_READ);
9312         tw32(GRC_EEPROM_ADDR,
9313              tmp |
9314              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9315              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9316               EEPROM_ADDR_ADDR_MASK) |
9317              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9318
9319         for (i = 0; i < 10000; i++) {
9320                 tmp = tr32(GRC_EEPROM_ADDR);
9321
9322                 if (tmp & EEPROM_ADDR_COMPLETE)
9323                         break;
9324                 udelay(100);
9325         }
9326         if (!(tmp & EEPROM_ADDR_COMPLETE))
9327                 return -EBUSY;
9328
9329         *val = tr32(GRC_EEPROM_DATA);
9330         return 0;
9331 }
9332
9333 #define NVRAM_CMD_TIMEOUT 10000
9334
9335 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9336 {
9337         int i;
9338
9339         tw32(NVRAM_CMD, nvram_cmd);
9340         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9341                 udelay(10);
9342                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9343                         udelay(10);
9344                         break;
9345                 }
9346         }
9347         if (i == NVRAM_CMD_TIMEOUT) {
9348                 return -EBUSY;
9349         }
9350         return 0;
9351 }
9352
9353 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9354 {
9355         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9356             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9357             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9358             (tp->nvram_jedecnum == JEDEC_ATMEL))
9359
9360                 addr = ((addr / tp->nvram_pagesize) <<
9361                         ATMEL_AT45DB0X1B_PAGE_POS) +
9362                        (addr % tp->nvram_pagesize);
9363
9364         return addr;
9365 }
9366
9367 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9368 {
9369         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9370             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9371             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9372             (tp->nvram_jedecnum == JEDEC_ATMEL))
9373
9374                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9375                         tp->nvram_pagesize) +
9376                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9377
9378         return addr;
9379 }
9380
9381 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9382 {
9383         int ret;
9384
9385         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9386                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9387
9388         offset = tg3_nvram_phys_addr(tp, offset);
9389
9390         if (offset > NVRAM_ADDR_MSK)
9391                 return -EINVAL;
9392
9393         ret = tg3_nvram_lock(tp);
9394         if (ret)
9395                 return ret;
9396
9397         tg3_enable_nvram_access(tp);
9398
9399         tw32(NVRAM_ADDR, offset);
9400         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9401                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9402
9403         if (ret == 0)
9404                 *val = swab32(tr32(NVRAM_RDDATA));
9405
9406         tg3_disable_nvram_access(tp);
9407
9408         tg3_nvram_unlock(tp);
9409
9410         return ret;
9411 }
9412
9413 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9414 {
9415         int err;
9416         u32 tmp;
9417
9418         err = tg3_nvram_read(tp, offset, &tmp);
9419         *val = swab32(tmp);
9420         return err;
9421 }
9422
9423 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9424                                     u32 offset, u32 len, u8 *buf)
9425 {
9426         int i, j, rc = 0;
9427         u32 val;
9428
9429         for (i = 0; i < len; i += 4) {
9430                 u32 addr, data;
9431
9432                 addr = offset + i;
9433
9434                 memcpy(&data, buf + i, 4);
9435
9436                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9437
9438                 val = tr32(GRC_EEPROM_ADDR);
9439                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9440
9441                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9442                         EEPROM_ADDR_READ);
9443                 tw32(GRC_EEPROM_ADDR, val |
9444                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9445                         (addr & EEPROM_ADDR_ADDR_MASK) |
9446                         EEPROM_ADDR_START |
9447                         EEPROM_ADDR_WRITE);
9448
9449                 for (j = 0; j < 10000; j++) {
9450                         val = tr32(GRC_EEPROM_ADDR);
9451
9452                         if (val & EEPROM_ADDR_COMPLETE)
9453                                 break;
9454                         udelay(100);
9455                 }
9456                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9457                         rc = -EBUSY;
9458                         break;
9459                 }
9460         }
9461
9462         return rc;
9463 }
9464
9465 /* offset and length are dword aligned */
9466 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9467                 u8 *buf)
9468 {
9469         int ret = 0;
9470         u32 pagesize = tp->nvram_pagesize;
9471         u32 pagemask = pagesize - 1;
9472         u32 nvram_cmd;
9473         u8 *tmp;
9474
9475         tmp = kmalloc(pagesize, GFP_KERNEL);
9476         if (tmp == NULL)
9477                 return -ENOMEM;
9478
9479         while (len) {
9480                 int j;
9481                 u32 phy_addr, page_off, size;
9482
9483                 phy_addr = offset & ~pagemask;
9484
9485                 for (j = 0; j < pagesize; j += 4) {
9486                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9487                                                 (u32 *) (tmp + j))))
9488                                 break;
9489                 }
9490                 if (ret)
9491                         break;
9492
9493                 page_off = offset & pagemask;
9494                 size = pagesize;
9495                 if (len < size)
9496                         size = len;
9497
9498                 len -= size;
9499
9500                 memcpy(tmp + page_off, buf, size);
9501
9502                 offset = offset + (pagesize - page_off);
9503
9504                 tg3_enable_nvram_access(tp);
9505
9506                 /*
9507                  * Before we can erase the flash page, we need
9508                  * to issue a special "write enable" command.
9509                  */
9510                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9511
9512                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9513                         break;
9514
9515                 /* Erase the target page */
9516                 tw32(NVRAM_ADDR, phy_addr);
9517
9518                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9519                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9520
9521                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9522                         break;
9523
9524                 /* Issue another write enable to start the write. */
9525                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9526
9527                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9528                         break;
9529
9530                 for (j = 0; j < pagesize; j += 4) {
9531                         u32 data;
9532
9533                         data = *((u32 *) (tmp + j));
9534                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9535
9536                         tw32(NVRAM_ADDR, phy_addr + j);
9537
9538                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9539                                 NVRAM_CMD_WR;
9540
9541                         if (j == 0)
9542                                 nvram_cmd |= NVRAM_CMD_FIRST;
9543                         else if (j == (pagesize - 4))
9544                                 nvram_cmd |= NVRAM_CMD_LAST;
9545
9546                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9547                                 break;
9548                 }
9549                 if (ret)
9550                         break;
9551         }
9552
9553         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9554         tg3_nvram_exec_cmd(tp, nvram_cmd);
9555
9556         kfree(tmp);
9557
9558         return ret;
9559 }
9560
9561 /* offset and length are dword aligned */
9562 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9563                 u8 *buf)
9564 {
9565         int i, ret = 0;
9566
9567         for (i = 0; i < len; i += 4, offset += 4) {
9568                 u32 data, page_off, phy_addr, nvram_cmd;
9569
9570                 memcpy(&data, buf + i, 4);
9571                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9572
9573                 page_off = offset % tp->nvram_pagesize;
9574
9575                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9576
9577                 tw32(NVRAM_ADDR, phy_addr);
9578
9579                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9580
9581                 if ((page_off == 0) || (i == 0))
9582                         nvram_cmd |= NVRAM_CMD_FIRST;
9583                 if (page_off == (tp->nvram_pagesize - 4))
9584                         nvram_cmd |= NVRAM_CMD_LAST;
9585
9586                 if (i == (len - 4))
9587                         nvram_cmd |= NVRAM_CMD_LAST;
9588
9589                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9590                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9591                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9592                     (tp->nvram_jedecnum == JEDEC_ST) &&
9593                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9594
9595                         if ((ret = tg3_nvram_exec_cmd(tp,
9596                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9597                                 NVRAM_CMD_DONE)))
9598
9599                                 break;
9600                 }
9601                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9602                         /* We always do complete word writes to eeprom. */
9603                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9604                 }
9605
9606                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9607                         break;
9608         }
9609         return ret;
9610 }
9611
9612 /* offset and length are dword aligned */
9613 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9614 {
9615         int ret;
9616
9617         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9618                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9619                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9620                 udelay(40);
9621         }
9622
9623         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9624                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9625         }
9626         else {
9627                 u32 grc_mode;
9628
9629                 ret = tg3_nvram_lock(tp);
9630                 if (ret)
9631                         return ret;
9632
9633                 tg3_enable_nvram_access(tp);
9634                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9635                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9636                         tw32(NVRAM_WRITE1, 0x406);
9637
9638                 grc_mode = tr32(GRC_MODE);
9639                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9640
9641                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9642                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9643
9644                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9645                                 buf);
9646                 }
9647                 else {
9648                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9649                                 buf);
9650                 }
9651
9652                 grc_mode = tr32(GRC_MODE);
9653                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9654
9655                 tg3_disable_nvram_access(tp);
9656                 tg3_nvram_unlock(tp);
9657         }
9658
9659         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9660                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9661                 udelay(40);
9662         }
9663
9664         return ret;
9665 }
9666
9667 struct subsys_tbl_ent {
9668         u16 subsys_vendor, subsys_devid;
9669         u32 phy_id;
9670 };
9671
9672 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9673         /* Broadcom boards. */
9674         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9675         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9676         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9677         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9678         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9679         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9680         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9681         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9682         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9683         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9684         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9685
9686         /* 3com boards. */
9687         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9688         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9689         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9690         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9691         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9692
9693         /* DELL boards. */
9694         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9695         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9696         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9697         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9698
9699         /* Compaq boards. */
9700         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9701         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9702         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9703         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9704         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9705
9706         /* IBM boards. */
9707         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9708 };
9709
9710 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9711 {
9712         int i;
9713
9714         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9715                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9716                      tp->pdev->subsystem_vendor) &&
9717                     (subsys_id_to_phy_id[i].subsys_devid ==
9718                      tp->pdev->subsystem_device))
9719                         return &subsys_id_to_phy_id[i];
9720         }
9721         return NULL;
9722 }
9723
9724 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9725 {
9726         u32 val;
9727         u16 pmcsr;
9728
9729         /* On some early chips the SRAM cannot be accessed in D3hot state,
9730          * so need make sure we're in D0.
9731          */
9732         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9733         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9734         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9735         msleep(1);
9736
9737         /* Make sure register accesses (indirect or otherwise)
9738          * will function correctly.
9739          */
9740         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9741                                tp->misc_host_ctrl);
9742
9743         /* The memory arbiter has to be enabled in order for SRAM accesses
9744          * to succeed.  Normally on powerup the tg3 chip firmware will make
9745          * sure it is enabled, but other entities such as system netboot
9746          * code might disable it.
9747          */
9748         val = tr32(MEMARB_MODE);
9749         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9750
9751         tp->phy_id = PHY_ID_INVALID;
9752         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9753
9754         /* Assume an onboard device by default.  */
9755         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9756
9757         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9758         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9759                 u32 nic_cfg, led_cfg;
9760                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9761                 int eeprom_phy_serdes = 0;
9762
9763                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9764                 tp->nic_sram_data_cfg = nic_cfg;
9765
9766                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9767                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9768                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9769                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9770                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9771                     (ver > 0) && (ver < 0x100))
9772                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9773
9774                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9775                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9776                         eeprom_phy_serdes = 1;
9777
9778                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9779                 if (nic_phy_id != 0) {
9780                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9781                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9782
9783                         eeprom_phy_id  = (id1 >> 16) << 10;
9784                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9785                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9786                 } else
9787                         eeprom_phy_id = 0;
9788
9789                 tp->phy_id = eeprom_phy_id;
9790                 if (eeprom_phy_serdes) {
9791                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9792                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9793                         else
9794                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9795                 }
9796
9797                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9798                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9799                                     SHASTA_EXT_LED_MODE_MASK);
9800                 else
9801                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9802
9803                 switch (led_cfg) {
9804                 default:
9805                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9806                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9807                         break;
9808
9809                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9810                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9811                         break;
9812
9813                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9814                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9815
9816                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9817                          * read on some older 5700/5701 bootcode.
9818                          */
9819                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9820                             ASIC_REV_5700 ||
9821                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9822                             ASIC_REV_5701)
9823                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9824
9825                         break;
9826
9827                 case SHASTA_EXT_LED_SHARED:
9828                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9829                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9830                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9831                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9832                                                  LED_CTRL_MODE_PHY_2);
9833                         break;
9834
9835                 case SHASTA_EXT_LED_MAC:
9836                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9837                         break;
9838
9839                 case SHASTA_EXT_LED_COMBO:
9840                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9841                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9842                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9843                                                  LED_CTRL_MODE_PHY_2);
9844                         break;
9845
9846                 };
9847
9848                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9849                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9850                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9851                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9852
9853                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9854                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9855                 else
9856                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9857
9858                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9859                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9860                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9861                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9862                 }
9863                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9864                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9865
9866                 if (cfg2 & (1 << 17))
9867                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9868
9869                 /* serdes signal pre-emphasis in register 0x590 set by */
9870                 /* bootcode if bit 18 is set */
9871                 if (cfg2 & (1 << 18))
9872                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9873         }
9874 }
9875
9876 static int __devinit tg3_phy_probe(struct tg3 *tp)
9877 {
9878         u32 hw_phy_id_1, hw_phy_id_2;
9879         u32 hw_phy_id, hw_phy_id_masked;
9880         int err;
9881
9882         /* Reading the PHY ID register can conflict with ASF
9883          * firwmare access to the PHY hardware.
9884          */
9885         err = 0;
9886         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9887                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9888         } else {
9889                 /* Now read the physical PHY_ID from the chip and verify
9890                  * that it is sane.  If it doesn't look good, we fall back
9891                  * to either the hard-coded table based PHY_ID and failing
9892                  * that the value found in the eeprom area.
9893                  */
9894                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9895                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9896
9897                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9898                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9899                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9900
9901                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9902         }
9903
9904         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9905                 tp->phy_id = hw_phy_id;
9906                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9907                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9908                 else
9909                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9910         } else {
9911                 if (tp->phy_id != PHY_ID_INVALID) {
9912                         /* Do nothing, phy ID already set up in
9913                          * tg3_get_eeprom_hw_cfg().
9914                          */
9915                 } else {
9916                         struct subsys_tbl_ent *p;
9917
9918                         /* No eeprom signature?  Try the hardcoded
9919                          * subsys device table.
9920                          */
9921                         p = lookup_by_subsys(tp);
9922                         if (!p)
9923                                 return -ENODEV;
9924
9925                         tp->phy_id = p->phy_id;
9926                         if (!tp->phy_id ||
9927                             tp->phy_id == PHY_ID_BCM8002)
9928                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9929                 }
9930         }
9931
9932         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9933             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9934                 u32 bmsr, adv_reg, tg3_ctrl;
9935
9936                 tg3_readphy(tp, MII_BMSR, &bmsr);
9937                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9938                     (bmsr & BMSR_LSTATUS))
9939                         goto skip_phy_reset;
9940
9941                 err = tg3_phy_reset(tp);
9942                 if (err)
9943                         return err;
9944
9945                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9946                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9947                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9948                 tg3_ctrl = 0;
9949                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9950                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9951                                     MII_TG3_CTRL_ADV_1000_FULL);
9952                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9953                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9954                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9955                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9956                 }
9957
9958                 if (!tg3_copper_is_advertising_all(tp)) {
9959                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9960
9961                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9962                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9963
9964                         tg3_writephy(tp, MII_BMCR,
9965                                      BMCR_ANENABLE | BMCR_ANRESTART);
9966                 }
9967                 tg3_phy_set_wirespeed(tp);
9968
9969                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9970                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9971                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9972         }
9973
9974 skip_phy_reset:
9975         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9976                 err = tg3_init_5401phy_dsp(tp);
9977                 if (err)
9978                         return err;
9979         }
9980
9981         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9982                 err = tg3_init_5401phy_dsp(tp);
9983         }
9984
9985         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9986                 tp->link_config.advertising =
9987                         (ADVERTISED_1000baseT_Half |
9988                          ADVERTISED_1000baseT_Full |
9989                          ADVERTISED_Autoneg |
9990                          ADVERTISED_FIBRE);
9991         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9992                 tp->link_config.advertising &=
9993                         ~(ADVERTISED_1000baseT_Half |
9994                           ADVERTISED_1000baseT_Full);
9995
9996         return err;
9997 }
9998
9999 static void __devinit tg3_read_partno(struct tg3 *tp)
10000 {
10001         unsigned char vpd_data[256];
10002         int i;
10003         u32 magic;
10004
10005         if (tg3_nvram_read_swab(tp, 0x0, &magic))
10006                 goto out_not_found;
10007
10008         if (magic == TG3_EEPROM_MAGIC) {
10009                 for (i = 0; i < 256; i += 4) {
10010                         u32 tmp;
10011
10012                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10013                                 goto out_not_found;
10014
10015                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10016                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10017                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10018                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10019                 }
10020         } else {
10021                 int vpd_cap;
10022
10023                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10024                 for (i = 0; i < 256; i += 4) {
10025                         u32 tmp, j = 0;
10026                         u16 tmp16;
10027
10028                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10029                                               i);
10030                         while (j++ < 100) {
10031                                 pci_read_config_word(tp->pdev, vpd_cap +
10032                                                      PCI_VPD_ADDR, &tmp16);
10033                                 if (tmp16 & 0x8000)
10034                                         break;
10035                                 msleep(1);
10036                         }
10037                         if (!(tmp16 & 0x8000))
10038                                 goto out_not_found;
10039
10040                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10041                                               &tmp);
10042                         tmp = cpu_to_le32(tmp);
10043                         memcpy(&vpd_data[i], &tmp, 4);
10044                 }
10045         }
10046
10047         /* Now parse and find the part number. */
10048         for (i = 0; i < 256; ) {
10049                 unsigned char val = vpd_data[i];
10050                 int block_end;
10051
10052                 if (val == 0x82 || val == 0x91) {
10053                         i = (i + 3 +
10054                              (vpd_data[i + 1] +
10055                               (vpd_data[i + 2] << 8)));
10056                         continue;
10057                 }
10058
10059                 if (val != 0x90)
10060                         goto out_not_found;
10061
10062                 block_end = (i + 3 +
10063                              (vpd_data[i + 1] +
10064                               (vpd_data[i + 2] << 8)));
10065                 i += 3;
10066                 while (i < block_end) {
10067                         if (vpd_data[i + 0] == 'P' &&
10068                             vpd_data[i + 1] == 'N') {
10069                                 int partno_len = vpd_data[i + 2];
10070
10071                                 if (partno_len > 24)
10072                                         goto out_not_found;
10073
10074                                 memcpy(tp->board_part_number,
10075                                        &vpd_data[i + 3],
10076                                        partno_len);
10077
10078                                 /* Success. */
10079                                 return;
10080                         }
10081                 }
10082
10083                 /* Part number not found. */
10084                 goto out_not_found;
10085         }
10086
10087 out_not_found:
10088         strcpy(tp->board_part_number, "none");
10089 }
10090
10091 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10092 {
10093         u32 val, offset, start;
10094
10095         if (tg3_nvram_read_swab(tp, 0, &val))
10096                 return;
10097
10098         if (val != TG3_EEPROM_MAGIC)
10099                 return;
10100
10101         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10102             tg3_nvram_read_swab(tp, 0x4, &start))
10103                 return;
10104
10105         offset = tg3_nvram_logical_addr(tp, offset);
10106         if (tg3_nvram_read_swab(tp, offset, &val))
10107                 return;
10108
10109         if ((val & 0xfc000000) == 0x0c000000) {
10110                 u32 ver_offset, addr;
10111                 int i;
10112
10113                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10114                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10115                         return;
10116
10117                 if (val != 0)
10118                         return;
10119
10120                 addr = offset + ver_offset - start;
10121                 for (i = 0; i < 16; i += 4) {
10122                         if (tg3_nvram_read(tp, addr + i, &val))
10123                                 return;
10124
10125                         val = cpu_to_le32(val);
10126                         memcpy(tp->fw_ver + i, &val, 4);
10127                 }
10128         }
10129 }
10130
10131 static int __devinit tg3_get_invariants(struct tg3 *tp)
10132 {
10133         static struct pci_device_id write_reorder_chipsets[] = {
10134                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10135                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10136                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10137                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10138                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10139                              PCI_DEVICE_ID_VIA_8385_0) },
10140                 { },
10141         };
10142         u32 misc_ctrl_reg;
10143         u32 cacheline_sz_reg;
10144         u32 pci_state_reg, grc_misc_cfg;
10145         u32 val;
10146         u16 pci_cmd;
10147         int err;
10148
10149         /* Force memory write invalidate off.  If we leave it on,
10150          * then on 5700_BX chips we have to enable a workaround.
10151          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10152          * to match the cacheline size.  The Broadcom driver have this
10153          * workaround but turns MWI off all the times so never uses
10154          * it.  This seems to suggest that the workaround is insufficient.
10155          */
10156         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10157         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10158         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10159
10160         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10161          * has the register indirect write enable bit set before
10162          * we try to access any of the MMIO registers.  It is also
10163          * critical that the PCI-X hw workaround situation is decided
10164          * before that as well.
10165          */
10166         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10167                               &misc_ctrl_reg);
10168
10169         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10170                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10171
10172         /* Wrong chip ID in 5752 A0. This code can be removed later
10173          * as A0 is not in production.
10174          */
10175         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10176                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10177
10178         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10179          * we need to disable memory and use config. cycles
10180          * only to access all registers. The 5702/03 chips
10181          * can mistakenly decode the special cycles from the
10182          * ICH chipsets as memory write cycles, causing corruption
10183          * of register and memory space. Only certain ICH bridges
10184          * will drive special cycles with non-zero data during the
10185          * address phase which can fall within the 5703's address
10186          * range. This is not an ICH bug as the PCI spec allows
10187          * non-zero address during special cycles. However, only
10188          * these ICH bridges are known to drive non-zero addresses
10189          * during special cycles.
10190          *
10191          * Since special cycles do not cross PCI bridges, we only
10192          * enable this workaround if the 5703 is on the secondary
10193          * bus of these ICH bridges.
10194          */
10195         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10196             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10197                 static struct tg3_dev_id {
10198                         u32     vendor;
10199                         u32     device;
10200                         u32     rev;
10201                 } ich_chipsets[] = {
10202                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10203                           PCI_ANY_ID },
10204                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10205                           PCI_ANY_ID },
10206                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10207                           0xa },
10208                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10209                           PCI_ANY_ID },
10210                         { },
10211                 };
10212                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10213                 struct pci_dev *bridge = NULL;
10214
10215                 while (pci_id->vendor != 0) {
10216                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10217                                                 bridge);
10218                         if (!bridge) {
10219                                 pci_id++;
10220                                 continue;
10221                         }
10222                         if (pci_id->rev != PCI_ANY_ID) {
10223                                 u8 rev;
10224
10225                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10226                                                      &rev);
10227                                 if (rev > pci_id->rev)
10228                                         continue;
10229                         }
10230                         if (bridge->subordinate &&
10231                             (bridge->subordinate->number ==
10232                              tp->pdev->bus->number)) {
10233
10234                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10235                                 pci_dev_put(bridge);
10236                                 break;
10237                         }
10238                 }
10239         }
10240
10241         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10242          * DMA addresses > 40-bit. This bridge may have other additional
10243          * 57xx devices behind it in some 4-port NIC designs for example.
10244          * Any tg3 device found behind the bridge will also need the 40-bit
10245          * DMA workaround.
10246          */
10247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10249                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10250                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10251                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10252         }
10253         else {
10254                 struct pci_dev *bridge = NULL;
10255
10256                 do {
10257                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10258                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10259                                                 bridge);
10260                         if (bridge && bridge->subordinate &&
10261                             (bridge->subordinate->number <=
10262                              tp->pdev->bus->number) &&
10263                             (bridge->subordinate->subordinate >=
10264                              tp->pdev->bus->number)) {
10265                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10266                                 pci_dev_put(bridge);
10267                                 break;
10268                         }
10269                 } while (bridge);
10270         }
10271
10272         /* Initialize misc host control in PCI block. */
10273         tp->misc_host_ctrl |= (misc_ctrl_reg &
10274                                MISC_HOST_CTRL_CHIPREV);
10275         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10276                                tp->misc_host_ctrl);
10277
10278         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10279                               &cacheline_sz_reg);
10280
10281         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10282         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10283         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10284         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10285
10286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10287             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10290             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10291                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10292
10293         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10294             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10295                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10296
10297         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10298                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10299                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10300                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10301                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10302                 } else {
10303                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10304                                           TG3_FLG2_HW_TSO_1_BUG;
10305                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10306                                 ASIC_REV_5750 &&
10307                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10308                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10309                 }
10310         }
10311
10312         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10313             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10314             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10315             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10316             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10317                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10318
10319         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10320                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10321
10322         /* If we have an AMD 762 or VIA K8T800 chipset, write
10323          * reordering to the mailbox registers done by the host
10324          * controller can cause major troubles.  We read back from
10325          * every mailbox register write to force the writes to be
10326          * posted to the chip in order.
10327          */
10328         if (pci_dev_present(write_reorder_chipsets) &&
10329             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10330                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10331
10332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10333             tp->pci_lat_timer < 64) {
10334                 tp->pci_lat_timer = 64;
10335
10336                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10337                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10338                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10339                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10340
10341                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10342                                        cacheline_sz_reg);
10343         }
10344
10345         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10346                               &pci_state_reg);
10347
10348         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10349                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10350
10351                 /* If this is a 5700 BX chipset, and we are in PCI-X
10352                  * mode, enable register write workaround.
10353                  *
10354                  * The workaround is to use indirect register accesses
10355                  * for all chip writes not to mailbox registers.
10356                  */
10357                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10358                         u32 pm_reg;
10359                         u16 pci_cmd;
10360
10361                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10362
10363                         /* The chip can have it's power management PCI config
10364                          * space registers clobbered due to this bug.
10365                          * So explicitly force the chip into D0 here.
10366                          */
10367                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10368                                               &pm_reg);
10369                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10370                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10371                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10372                                                pm_reg);
10373
10374                         /* Also, force SERR#/PERR# in PCI command. */
10375                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10376                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10377                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10378                 }
10379         }
10380
10381         /* 5700 BX chips need to have their TX producer index mailboxes
10382          * written twice to workaround a bug.
10383          */
10384         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10385                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10386
10387         /* Back to back register writes can cause problems on this chip,
10388          * the workaround is to read back all reg writes except those to
10389          * mailbox regs.  See tg3_write_indirect_reg32().
10390          *
10391          * PCI Express 5750_A0 rev chips need this workaround too.
10392          */
10393         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10394             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10395              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10396                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10397
10398         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10399                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10400         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10401                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10402
10403         /* Chip-specific fixup from Broadcom driver */
10404         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10405             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10406                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10407                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10408         }
10409
10410         /* Default fast path register access methods */
10411         tp->read32 = tg3_read32;
10412         tp->write32 = tg3_write32;
10413         tp->read32_mbox = tg3_read32;
10414         tp->write32_mbox = tg3_write32;
10415         tp->write32_tx_mbox = tg3_write32;
10416         tp->write32_rx_mbox = tg3_write32;
10417
10418         /* Various workaround register access methods */
10419         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10420                 tp->write32 = tg3_write_indirect_reg32;
10421         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10422                 tp->write32 = tg3_write_flush_reg32;
10423
10424         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10425             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10426                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10427                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10428                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10429         }
10430
10431         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10432                 tp->read32 = tg3_read_indirect_reg32;
10433                 tp->write32 = tg3_write_indirect_reg32;
10434                 tp->read32_mbox = tg3_read_indirect_mbox;
10435                 tp->write32_mbox = tg3_write_indirect_mbox;
10436                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10437                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10438
10439                 iounmap(tp->regs);
10440                 tp->regs = NULL;
10441
10442                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10443                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10444                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10445         }
10446
10447         if (tp->write32 == tg3_write_indirect_reg32 ||
10448             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10449              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10450               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10451                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10452
10453         /* Get eeprom hw config before calling tg3_set_power_state().
10454          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10455          * determined before calling tg3_set_power_state() so that
10456          * we know whether or not to switch out of Vaux power.
10457          * When the flag is set, it means that GPIO1 is used for eeprom
10458          * write protect and also implies that it is a LOM where GPIOs
10459          * are not used to switch power.
10460          */
10461         tg3_get_eeprom_hw_cfg(tp);
10462
10463         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10464          * GPIO1 driven high will bring 5700's external PHY out of reset.
10465          * It is also used as eeprom write protect on LOMs.
10466          */
10467         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10468         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10469             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10470                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10471                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10472         /* Unused GPIO3 must be driven as output on 5752 because there
10473          * are no pull-up resistors on unused GPIO pins.
10474          */
10475         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10476                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10477
10478         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10479                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10480
10481         /* Force the chip into D0. */
10482         err = tg3_set_power_state(tp, PCI_D0);
10483         if (err) {
10484                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10485                        pci_name(tp->pdev));
10486                 return err;
10487         }
10488
10489         /* 5700 B0 chips do not support checksumming correctly due
10490          * to hardware bugs.
10491          */
10492         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10493                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10494
10495         /* Derive initial jumbo mode from MTU assigned in
10496          * ether_setup() via the alloc_etherdev() call
10497          */
10498         if (tp->dev->mtu > ETH_DATA_LEN &&
10499             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10500                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10501
10502         /* Determine WakeOnLan speed to use. */
10503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10504             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10505             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10506             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10507                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10508         } else {
10509                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10510         }
10511
10512         /* A few boards don't want Ethernet@WireSpeed phy feature */
10513         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10514             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10515              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10516              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10517             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10518                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10519
10520         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10521             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10522                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10523         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10524                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10525
10526         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10527                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10528                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10529                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10530                 else
10531                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10532         }
10533
10534         tp->coalesce_mode = 0;
10535         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10536             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10537                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10538
10539         /* Initialize MAC MI mode, polling disabled. */
10540         tw32_f(MAC_MI_MODE, tp->mi_mode);
10541         udelay(80);
10542
10543         /* Initialize data/descriptor byte/word swapping. */
10544         val = tr32(GRC_MODE);
10545         val &= GRC_MODE_HOST_STACKUP;
10546         tw32(GRC_MODE, val | tp->grc_mode);
10547
10548         tg3_switch_clocks(tp);
10549
10550         /* Clear this out for sanity. */
10551         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10552
10553         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10554                               &pci_state_reg);
10555         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10556             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10557                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10558
10559                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10560                     chiprevid == CHIPREV_ID_5701_B0 ||
10561                     chiprevid == CHIPREV_ID_5701_B2 ||
10562                     chiprevid == CHIPREV_ID_5701_B5) {
10563                         void __iomem *sram_base;
10564
10565                         /* Write some dummy words into the SRAM status block
10566                          * area, see if it reads back correctly.  If the return
10567                          * value is bad, force enable the PCIX workaround.
10568                          */
10569                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10570
10571                         writel(0x00000000, sram_base);
10572                         writel(0x00000000, sram_base + 4);
10573                         writel(0xffffffff, sram_base + 4);
10574                         if (readl(sram_base) != 0x00000000)
10575                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10576                 }
10577         }
10578
10579         udelay(50);
10580         tg3_nvram_init(tp);
10581
10582         grc_misc_cfg = tr32(GRC_MISC_CFG);
10583         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10584
10585         /* Broadcom's driver says that CIOBE multisplit has a bug */
10586 #if 0
10587         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10588             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10589                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10590                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10591         }
10592 #endif
10593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10594             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10595              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10596                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10597
10598         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10599             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10600                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10601         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10602                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10603                                       HOSTCC_MODE_CLRTICK_TXBD);
10604
10605                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10606                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10607                                        tp->misc_host_ctrl);
10608         }
10609
10610         /* these are limited to 10/100 only */
10611         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10612              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10613             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10614              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10615              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10616               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10617               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10618             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10619              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10620               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10621                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10622
10623         err = tg3_phy_probe(tp);
10624         if (err) {
10625                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10626                        pci_name(tp->pdev), err);
10627                 /* ... but do not return immediately ... */
10628         }
10629
10630         tg3_read_partno(tp);
10631         tg3_read_fw_ver(tp);
10632
10633         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10634                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10635         } else {
10636                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10637                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10638                 else
10639                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10640         }
10641
10642         /* 5700 {AX,BX} chips have a broken status block link
10643          * change bit implementation, so we must use the
10644          * status register in those cases.
10645          */
10646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10647                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10648         else
10649                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10650
10651         /* The led_ctrl is set during tg3_phy_probe, here we might
10652          * have to force the link status polling mechanism based
10653          * upon subsystem IDs.
10654          */
10655         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10656             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10657                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10658                                   TG3_FLAG_USE_LINKCHG_REG);
10659         }
10660
10661         /* For all SERDES we poll the MAC status register. */
10662         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10663                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10664         else
10665                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10666
10667         /* All chips before 5787 can get confused if TX buffers
10668          * straddle the 4GB address boundary in some cases.
10669          */
10670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10672                 tp->dev->hard_start_xmit = tg3_start_xmit;
10673         else
10674                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10675
10676         tp->rx_offset = 2;
10677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10678             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10679                 tp->rx_offset = 0;
10680
10681         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10682
10683         /* Increment the rx prod index on the rx std ring by at most
10684          * 8 for these chips to workaround hw errata.
10685          */
10686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10687             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10689                 tp->rx_std_max_post = 8;
10690
10691         /* By default, disable wake-on-lan.  User can change this
10692          * using ETHTOOL_SWOL.
10693          */
10694         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10695
10696         return err;
10697 }
10698
10699 #ifdef CONFIG_SPARC64
10700 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10701 {
10702         struct net_device *dev = tp->dev;
10703         struct pci_dev *pdev = tp->pdev;
10704         struct pcidev_cookie *pcp = pdev->sysdata;
10705
10706         if (pcp != NULL) {
10707                 unsigned char *addr;
10708                 int len;
10709
10710                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10711                                         &len);
10712                 if (addr && len == 6) {
10713                         memcpy(dev->dev_addr, addr, 6);
10714                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10715                         return 0;
10716                 }
10717         }
10718         return -ENODEV;
10719 }
10720
10721 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10722 {
10723         struct net_device *dev = tp->dev;
10724
10725         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10726         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10727         return 0;
10728 }
10729 #endif
10730
10731 static int __devinit tg3_get_device_address(struct tg3 *tp)
10732 {
10733         struct net_device *dev = tp->dev;
10734         u32 hi, lo, mac_offset;
10735         int addr_ok = 0;
10736
10737 #ifdef CONFIG_SPARC64
10738         if (!tg3_get_macaddr_sparc(tp))
10739                 return 0;
10740 #endif
10741
10742         mac_offset = 0x7c;
10743         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10744             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10745                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10746                         mac_offset = 0xcc;
10747                 if (tg3_nvram_lock(tp))
10748                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10749                 else
10750                         tg3_nvram_unlock(tp);
10751         }
10752
10753         /* First try to get it from MAC address mailbox. */
10754         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10755         if ((hi >> 16) == 0x484b) {
10756                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10757                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10758
10759                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10760                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10761                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10762                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10763                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10764
10765                 /* Some old bootcode may report a 0 MAC address in SRAM */
10766                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10767         }
10768         if (!addr_ok) {
10769                 /* Next, try NVRAM. */
10770                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10771                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10772                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10773                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10774                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10775                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10776                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10777                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10778                 }
10779                 /* Finally just fetch it out of the MAC control regs. */
10780                 else {
10781                         hi = tr32(MAC_ADDR_0_HIGH);
10782                         lo = tr32(MAC_ADDR_0_LOW);
10783
10784                         dev->dev_addr[5] = lo & 0xff;
10785                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10786                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10787                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10788                         dev->dev_addr[1] = hi & 0xff;
10789                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10790                 }
10791         }
10792
10793         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10794 #ifdef CONFIG_SPARC64
10795                 if (!tg3_get_default_macaddr_sparc(tp))
10796                         return 0;
10797 #endif
10798                 return -EINVAL;
10799         }
10800         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10801         return 0;
10802 }
10803
10804 #define BOUNDARY_SINGLE_CACHELINE       1
10805 #define BOUNDARY_MULTI_CACHELINE        2
10806
10807 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10808 {
10809         int cacheline_size;
10810         u8 byte;
10811         int goal;
10812
10813         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10814         if (byte == 0)
10815                 cacheline_size = 1024;
10816         else
10817                 cacheline_size = (int) byte * 4;
10818
10819         /* On 5703 and later chips, the boundary bits have no
10820          * effect.
10821          */
10822         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10823             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10824             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10825                 goto out;
10826
10827 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10828         goal = BOUNDARY_MULTI_CACHELINE;
10829 #else
10830 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10831         goal = BOUNDARY_SINGLE_CACHELINE;
10832 #else
10833         goal = 0;
10834 #endif
10835 #endif
10836
10837         if (!goal)
10838                 goto out;
10839
10840         /* PCI controllers on most RISC systems tend to disconnect
10841          * when a device tries to burst across a cache-line boundary.
10842          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10843          *
10844          * Unfortunately, for PCI-E there are only limited
10845          * write-side controls for this, and thus for reads
10846          * we will still get the disconnects.  We'll also waste
10847          * these PCI cycles for both read and write for chips
10848          * other than 5700 and 5701 which do not implement the
10849          * boundary bits.
10850          */
10851         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10852             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10853                 switch (cacheline_size) {
10854                 case 16:
10855                 case 32:
10856                 case 64:
10857                 case 128:
10858                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10859                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10860                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10861                         } else {
10862                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10863                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10864                         }
10865                         break;
10866
10867                 case 256:
10868                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10869                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10870                         break;
10871
10872                 default:
10873                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10874                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10875                         break;
10876                 };
10877         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10878                 switch (cacheline_size) {
10879                 case 16:
10880                 case 32:
10881                 case 64:
10882                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10883                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10884                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10885                                 break;
10886                         }
10887                         /* fallthrough */
10888                 case 128:
10889                 default:
10890                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10891                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10892                         break;
10893                 };
10894         } else {
10895                 switch (cacheline_size) {
10896                 case 16:
10897                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10898                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10899                                         DMA_RWCTRL_WRITE_BNDRY_16);
10900                                 break;
10901                         }
10902                         /* fallthrough */
10903                 case 32:
10904                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10905                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10906                                         DMA_RWCTRL_WRITE_BNDRY_32);
10907                                 break;
10908                         }
10909                         /* fallthrough */
10910                 case 64:
10911                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10912                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10913                                         DMA_RWCTRL_WRITE_BNDRY_64);
10914                                 break;
10915                         }
10916                         /* fallthrough */
10917                 case 128:
10918                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10919                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10920                                         DMA_RWCTRL_WRITE_BNDRY_128);
10921                                 break;
10922                         }
10923                         /* fallthrough */
10924                 case 256:
10925                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10926                                 DMA_RWCTRL_WRITE_BNDRY_256);
10927                         break;
10928                 case 512:
10929                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10930                                 DMA_RWCTRL_WRITE_BNDRY_512);
10931                         break;
10932                 case 1024:
10933                 default:
10934                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10935                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10936                         break;
10937                 };
10938         }
10939
10940 out:
10941         return val;
10942 }
10943
10944 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10945 {
10946         struct tg3_internal_buffer_desc test_desc;
10947         u32 sram_dma_descs;
10948         int i, ret;
10949
10950         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10951
10952         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10953         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10954         tw32(RDMAC_STATUS, 0);
10955         tw32(WDMAC_STATUS, 0);
10956
10957         tw32(BUFMGR_MODE, 0);
10958         tw32(FTQ_RESET, 0);
10959
10960         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10961         test_desc.addr_lo = buf_dma & 0xffffffff;
10962         test_desc.nic_mbuf = 0x00002100;
10963         test_desc.len = size;
10964
10965         /*
10966          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10967          * the *second* time the tg3 driver was getting loaded after an
10968          * initial scan.
10969          *
10970          * Broadcom tells me:
10971          *   ...the DMA engine is connected to the GRC block and a DMA
10972          *   reset may affect the GRC block in some unpredictable way...
10973          *   The behavior of resets to individual blocks has not been tested.
10974          *
10975          * Broadcom noted the GRC reset will also reset all sub-components.
10976          */
10977         if (to_device) {
10978                 test_desc.cqid_sqid = (13 << 8) | 2;
10979
10980                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10981                 udelay(40);
10982         } else {
10983                 test_desc.cqid_sqid = (16 << 8) | 7;
10984
10985                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10986                 udelay(40);
10987         }
10988         test_desc.flags = 0x00000005;
10989
10990         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10991                 u32 val;
10992
10993                 val = *(((u32 *)&test_desc) + i);
10994                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10995                                        sram_dma_descs + (i * sizeof(u32)));
10996                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10997         }
10998         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10999
11000         if (to_device) {
11001                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11002         } else {
11003                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11004         }
11005
11006         ret = -ENODEV;
11007         for (i = 0; i < 40; i++) {
11008                 u32 val;
11009
11010                 if (to_device)
11011                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11012                 else
11013                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11014                 if ((val & 0xffff) == sram_dma_descs) {
11015                         ret = 0;
11016                         break;
11017                 }
11018
11019                 udelay(100);
11020         }
11021
11022         return ret;
11023 }
11024
11025 #define TEST_BUFFER_SIZE        0x2000
11026
11027 static int __devinit tg3_test_dma(struct tg3 *tp)
11028 {
11029         dma_addr_t buf_dma;
11030         u32 *buf, saved_dma_rwctrl;
11031         int ret;
11032
11033         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11034         if (!buf) {
11035                 ret = -ENOMEM;
11036                 goto out_nofree;
11037         }
11038
11039         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11040                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11041
11042         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11043
11044         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11045                 /* DMA read watermark not used on PCIE */
11046                 tp->dma_rwctrl |= 0x00180000;
11047         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11048                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11049                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11050                         tp->dma_rwctrl |= 0x003f0000;
11051                 else
11052                         tp->dma_rwctrl |= 0x003f000f;
11053         } else {
11054                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11055                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11056                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11057
11058                         /* If the 5704 is behind the EPB bridge, we can
11059                          * do the less restrictive ONE_DMA workaround for
11060                          * better performance.
11061                          */
11062                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11063                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11064                                 tp->dma_rwctrl |= 0x8000;
11065                         else if (ccval == 0x6 || ccval == 0x7)
11066                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11067
11068                         /* Set bit 23 to enable PCIX hw bug fix */
11069                         tp->dma_rwctrl |= 0x009f0000;
11070                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11071                         /* 5780 always in PCIX mode */
11072                         tp->dma_rwctrl |= 0x00144000;
11073                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11074                         /* 5714 always in PCIX mode */
11075                         tp->dma_rwctrl |= 0x00148000;
11076                 } else {
11077                         tp->dma_rwctrl |= 0x001b000f;
11078                 }
11079         }
11080
11081         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11082             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11083                 tp->dma_rwctrl &= 0xfffffff0;
11084
11085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11087                 /* Remove this if it causes problems for some boards. */
11088                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11089
11090                 /* On 5700/5701 chips, we need to set this bit.
11091                  * Otherwise the chip will issue cacheline transactions
11092                  * to streamable DMA memory with not all the byte
11093                  * enables turned on.  This is an error on several
11094                  * RISC PCI controllers, in particular sparc64.
11095                  *
11096                  * On 5703/5704 chips, this bit has been reassigned
11097                  * a different meaning.  In particular, it is used
11098                  * on those chips to enable a PCI-X workaround.
11099                  */
11100                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11101         }
11102
11103         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11104
11105 #if 0
11106         /* Unneeded, already done by tg3_get_invariants.  */
11107         tg3_switch_clocks(tp);
11108 #endif
11109
11110         ret = 0;
11111         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11112             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11113                 goto out;
11114
11115         /* It is best to perform DMA test with maximum write burst size
11116          * to expose the 5700/5701 write DMA bug.
11117          */
11118         saved_dma_rwctrl = tp->dma_rwctrl;
11119         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11120         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11121
11122         while (1) {
11123                 u32 *p = buf, i;
11124
11125                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11126                         p[i] = i;
11127
11128                 /* Send the buffer to the chip. */
11129                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11130                 if (ret) {
11131                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11132                         break;
11133                 }
11134
11135 #if 0
11136                 /* validate data reached card RAM correctly. */
11137                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11138                         u32 val;
11139                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11140                         if (le32_to_cpu(val) != p[i]) {
11141                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11142                                 /* ret = -ENODEV here? */
11143                         }
11144                         p[i] = 0;
11145                 }
11146 #endif
11147                 /* Now read it back. */
11148                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11149                 if (ret) {
11150                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11151
11152                         break;
11153                 }
11154
11155                 /* Verify it. */
11156                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11157                         if (p[i] == i)
11158                                 continue;
11159
11160                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11161                             DMA_RWCTRL_WRITE_BNDRY_16) {
11162                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11163                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11164                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11165                                 break;
11166                         } else {
11167                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11168                                 ret = -ENODEV;
11169                                 goto out;
11170                         }
11171                 }
11172
11173                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11174                         /* Success. */
11175                         ret = 0;
11176                         break;
11177                 }
11178         }
11179         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11180             DMA_RWCTRL_WRITE_BNDRY_16) {
11181                 static struct pci_device_id dma_wait_state_chipsets[] = {
11182                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11183                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11184                         { },
11185                 };
11186
11187                 /* DMA test passed without adjusting DMA boundary,
11188                  * now look for chipsets that are known to expose the
11189                  * DMA bug without failing the test.
11190                  */
11191                 if (pci_dev_present(dma_wait_state_chipsets)) {
11192                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11193                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11194                 }
11195                 else
11196                         /* Safe to use the calculated DMA boundary. */
11197                         tp->dma_rwctrl = saved_dma_rwctrl;
11198
11199                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11200         }
11201
11202 out:
11203         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11204 out_nofree:
11205         return ret;
11206 }
11207
11208 static void __devinit tg3_init_link_config(struct tg3 *tp)
11209 {
11210         tp->link_config.advertising =
11211                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11212                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11213                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11214                  ADVERTISED_Autoneg | ADVERTISED_MII);
11215         tp->link_config.speed = SPEED_INVALID;
11216         tp->link_config.duplex = DUPLEX_INVALID;
11217         tp->link_config.autoneg = AUTONEG_ENABLE;
11218         tp->link_config.active_speed = SPEED_INVALID;
11219         tp->link_config.active_duplex = DUPLEX_INVALID;
11220         tp->link_config.phy_is_low_power = 0;
11221         tp->link_config.orig_speed = SPEED_INVALID;
11222         tp->link_config.orig_duplex = DUPLEX_INVALID;
11223         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11224 }
11225
11226 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11227 {
11228         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11229                 tp->bufmgr_config.mbuf_read_dma_low_water =
11230                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11231                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11232                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11233                 tp->bufmgr_config.mbuf_high_water =
11234                         DEFAULT_MB_HIGH_WATER_5705;
11235
11236                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11237                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11238                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11239                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11240                 tp->bufmgr_config.mbuf_high_water_jumbo =
11241                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11242         } else {
11243                 tp->bufmgr_config.mbuf_read_dma_low_water =
11244                         DEFAULT_MB_RDMA_LOW_WATER;
11245                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11246                         DEFAULT_MB_MACRX_LOW_WATER;
11247                 tp->bufmgr_config.mbuf_high_water =
11248                         DEFAULT_MB_HIGH_WATER;
11249
11250                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11251                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11252                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11253                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11254                 tp->bufmgr_config.mbuf_high_water_jumbo =
11255                         DEFAULT_MB_HIGH_WATER_JUMBO;
11256         }
11257
11258         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11259         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11260 }
11261
11262 static char * __devinit tg3_phy_string(struct tg3 *tp)
11263 {
11264         switch (tp->phy_id & PHY_ID_MASK) {
11265         case PHY_ID_BCM5400:    return "5400";
11266         case PHY_ID_BCM5401:    return "5401";
11267         case PHY_ID_BCM5411:    return "5411";
11268         case PHY_ID_BCM5701:    return "5701";
11269         case PHY_ID_BCM5703:    return "5703";
11270         case PHY_ID_BCM5704:    return "5704";
11271         case PHY_ID_BCM5705:    return "5705";
11272         case PHY_ID_BCM5750:    return "5750";
11273         case PHY_ID_BCM5752:    return "5752";
11274         case PHY_ID_BCM5714:    return "5714";
11275         case PHY_ID_BCM5780:    return "5780";
11276         case PHY_ID_BCM5755:    return "5755";
11277         case PHY_ID_BCM5787:    return "5787";
11278         case PHY_ID_BCM5756:    return "5722/5756";
11279         case PHY_ID_BCM8002:    return "8002/serdes";
11280         case 0:                 return "serdes";
11281         default:                return "unknown";
11282         };
11283 }
11284
11285 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11286 {
11287         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11288                 strcpy(str, "PCI Express");
11289                 return str;
11290         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11291                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11292
11293                 strcpy(str, "PCIX:");
11294
11295                 if ((clock_ctrl == 7) ||
11296                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11297                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11298                         strcat(str, "133MHz");
11299                 else if (clock_ctrl == 0)
11300                         strcat(str, "33MHz");
11301                 else if (clock_ctrl == 2)
11302                         strcat(str, "50MHz");
11303                 else if (clock_ctrl == 4)
11304                         strcat(str, "66MHz");
11305                 else if (clock_ctrl == 6)
11306                         strcat(str, "100MHz");
11307         } else {
11308                 strcpy(str, "PCI:");
11309                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11310                         strcat(str, "66MHz");
11311                 else
11312                         strcat(str, "33MHz");
11313         }
11314         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11315                 strcat(str, ":32-bit");
11316         else
11317                 strcat(str, ":64-bit");
11318         return str;
11319 }
11320
11321 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11322 {
11323         struct pci_dev *peer;
11324         unsigned int func, devnr = tp->pdev->devfn & ~7;
11325
11326         for (func = 0; func < 8; func++) {
11327                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11328                 if (peer && peer != tp->pdev)
11329                         break;
11330                 pci_dev_put(peer);
11331         }
11332         /* 5704 can be configured in single-port mode, set peer to
11333          * tp->pdev in that case.
11334          */
11335         if (!peer) {
11336                 peer = tp->pdev;
11337                 return peer;
11338         }
11339
11340         /*
11341          * We don't need to keep the refcount elevated; there's no way
11342          * to remove one half of this device without removing the other
11343          */
11344         pci_dev_put(peer);
11345
11346         return peer;
11347 }
11348
11349 static void __devinit tg3_init_coal(struct tg3 *tp)
11350 {
11351         struct ethtool_coalesce *ec = &tp->coal;
11352
11353         memset(ec, 0, sizeof(*ec));
11354         ec->cmd = ETHTOOL_GCOALESCE;
11355         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11356         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11357         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11358         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11359         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11360         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11361         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11362         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11363         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11364
11365         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11366                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11367                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11368                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11369                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11370                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11371         }
11372
11373         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11374                 ec->rx_coalesce_usecs_irq = 0;
11375                 ec->tx_coalesce_usecs_irq = 0;
11376                 ec->stats_block_coalesce_usecs = 0;
11377         }
11378 }
11379
11380 static int __devinit tg3_init_one(struct pci_dev *pdev,
11381                                   const struct pci_device_id *ent)
11382 {
11383         static int tg3_version_printed = 0;
11384         unsigned long tg3reg_base, tg3reg_len;
11385         struct net_device *dev;
11386         struct tg3 *tp;
11387         int i, err, pm_cap;
11388         char str[40];
11389         u64 dma_mask, persist_dma_mask;
11390
11391         if (tg3_version_printed++ == 0)
11392                 printk(KERN_INFO "%s", version);
11393
11394         err = pci_enable_device(pdev);
11395         if (err) {
11396                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11397                        "aborting.\n");
11398                 return err;
11399         }
11400
11401         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11402                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11403                        "base address, aborting.\n");
11404                 err = -ENODEV;
11405                 goto err_out_disable_pdev;
11406         }
11407
11408         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11409         if (err) {
11410                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11411                        "aborting.\n");
11412                 goto err_out_disable_pdev;
11413         }
11414
11415         pci_set_master(pdev);
11416
11417         /* Find power-management capability. */
11418         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11419         if (pm_cap == 0) {
11420                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11421                        "aborting.\n");
11422                 err = -EIO;
11423                 goto err_out_free_res;
11424         }
11425
11426         tg3reg_base = pci_resource_start(pdev, 0);
11427         tg3reg_len = pci_resource_len(pdev, 0);
11428
11429         dev = alloc_etherdev(sizeof(*tp));
11430         if (!dev) {
11431                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11432                 err = -ENOMEM;
11433                 goto err_out_free_res;
11434         }
11435
11436         SET_MODULE_OWNER(dev);
11437         SET_NETDEV_DEV(dev, &pdev->dev);
11438
11439 #if TG3_VLAN_TAG_USED
11440         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11441         dev->vlan_rx_register = tg3_vlan_rx_register;
11442         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11443 #endif
11444
11445         tp = netdev_priv(dev);
11446         tp->pdev = pdev;
11447         tp->dev = dev;
11448         tp->pm_cap = pm_cap;
11449         tp->mac_mode = TG3_DEF_MAC_MODE;
11450         tp->rx_mode = TG3_DEF_RX_MODE;
11451         tp->tx_mode = TG3_DEF_TX_MODE;
11452         tp->mi_mode = MAC_MI_MODE_BASE;
11453         if (tg3_debug > 0)
11454                 tp->msg_enable = tg3_debug;
11455         else
11456                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11457
11458         /* The word/byte swap controls here control register access byte
11459          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11460          * setting below.
11461          */
11462         tp->misc_host_ctrl =
11463                 MISC_HOST_CTRL_MASK_PCI_INT |
11464                 MISC_HOST_CTRL_WORD_SWAP |
11465                 MISC_HOST_CTRL_INDIR_ACCESS |
11466                 MISC_HOST_CTRL_PCISTATE_RW;
11467
11468         /* The NONFRM (non-frame) byte/word swap controls take effect
11469          * on descriptor entries, anything which isn't packet data.
11470          *
11471          * The StrongARM chips on the board (one for tx, one for rx)
11472          * are running in big-endian mode.
11473          */
11474         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11475                         GRC_MODE_WSWAP_NONFRM_DATA);
11476 #ifdef __BIG_ENDIAN
11477         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11478 #endif
11479         spin_lock_init(&tp->lock);
11480         spin_lock_init(&tp->indirect_lock);
11481         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11482
11483         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11484         if (tp->regs == 0UL) {
11485                 printk(KERN_ERR PFX "Cannot map device registers, "
11486                        "aborting.\n");
11487                 err = -ENOMEM;
11488                 goto err_out_free_dev;
11489         }
11490
11491         tg3_init_link_config(tp);
11492
11493         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11494         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11495         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11496
11497         dev->open = tg3_open;
11498         dev->stop = tg3_close;
11499         dev->get_stats = tg3_get_stats;
11500         dev->set_multicast_list = tg3_set_rx_mode;
11501         dev->set_mac_address = tg3_set_mac_addr;
11502         dev->do_ioctl = tg3_ioctl;
11503         dev->tx_timeout = tg3_tx_timeout;
11504         dev->poll = tg3_poll;
11505         dev->ethtool_ops = &tg3_ethtool_ops;
11506         dev->weight = 64;
11507         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11508         dev->change_mtu = tg3_change_mtu;
11509         dev->irq = pdev->irq;
11510 #ifdef CONFIG_NET_POLL_CONTROLLER
11511         dev->poll_controller = tg3_poll_controller;
11512 #endif
11513
11514         err = tg3_get_invariants(tp);
11515         if (err) {
11516                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11517                        "aborting.\n");
11518                 goto err_out_iounmap;
11519         }
11520
11521         /* The EPB bridge inside 5714, 5715, and 5780 and any
11522          * device behind the EPB cannot support DMA addresses > 40-bit.
11523          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11524          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11525          * do DMA address check in tg3_start_xmit().
11526          */
11527         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11528                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11529         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11530                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11531 #ifdef CONFIG_HIGHMEM
11532                 dma_mask = DMA_64BIT_MASK;
11533 #endif
11534         } else
11535                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11536
11537         /* Configure DMA attributes. */
11538         if (dma_mask > DMA_32BIT_MASK) {
11539                 err = pci_set_dma_mask(pdev, dma_mask);
11540                 if (!err) {
11541                         dev->features |= NETIF_F_HIGHDMA;
11542                         err = pci_set_consistent_dma_mask(pdev,
11543                                                           persist_dma_mask);
11544                         if (err < 0) {
11545                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11546                                        "DMA for consistent allocations\n");
11547                                 goto err_out_iounmap;
11548                         }
11549                 }
11550         }
11551         if (err || dma_mask == DMA_32BIT_MASK) {
11552                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11553                 if (err) {
11554                         printk(KERN_ERR PFX "No usable DMA configuration, "
11555                                "aborting.\n");
11556                         goto err_out_iounmap;
11557                 }
11558         }
11559
11560         tg3_init_bufmgr_config(tp);
11561
11562 #if TG3_TSO_SUPPORT != 0
11563         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11564                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11565         }
11566         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11568             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11569             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11570                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11571         } else {
11572                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11573         }
11574
11575         /* TSO is on by default on chips that support hardware TSO.
11576          * Firmware TSO on older chips gives lower performance, so it
11577          * is off by default, but can be enabled using ethtool.
11578          */
11579         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11580                 dev->features |= NETIF_F_TSO;
11581                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11582                         dev->features |= NETIF_F_TSO6;
11583         }
11584
11585 #endif
11586
11587         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11588             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11589             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11590                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11591                 tp->rx_pending = 63;
11592         }
11593
11594         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11595             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11596                 tp->pdev_peer = tg3_find_peer(tp);
11597
11598         err = tg3_get_device_address(tp);
11599         if (err) {
11600                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11601                        "aborting.\n");
11602                 goto err_out_iounmap;
11603         }
11604
11605         /*
11606          * Reset chip in case UNDI or EFI driver did not shutdown
11607          * DMA self test will enable WDMAC and we'll see (spurious)
11608          * pending DMA on the PCI bus at that point.
11609          */
11610         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11611             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11612                 pci_save_state(tp->pdev);
11613                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11614                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11615         }
11616
11617         err = tg3_test_dma(tp);
11618         if (err) {
11619                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11620                 goto err_out_iounmap;
11621         }
11622
11623         /* Tigon3 can do ipv4 only... and some chips have buggy
11624          * checksumming.
11625          */
11626         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11627                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11628                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11629                         dev->features |= NETIF_F_HW_CSUM;
11630                 else
11631                         dev->features |= NETIF_F_IP_CSUM;
11632                 dev->features |= NETIF_F_SG;
11633                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11634         } else
11635                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11636
11637         /* flow control autonegotiation is default behavior */
11638         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11639
11640         tg3_init_coal(tp);
11641
11642         /* Now that we have fully setup the chip, save away a snapshot
11643          * of the PCI config space.  We need to restore this after
11644          * GRC_MISC_CFG core clock resets and some resume events.
11645          */
11646         pci_save_state(tp->pdev);
11647
11648         err = register_netdev(dev);
11649         if (err) {
11650                 printk(KERN_ERR PFX "Cannot register net device, "
11651                        "aborting.\n");
11652                 goto err_out_iounmap;
11653         }
11654
11655         pci_set_drvdata(pdev, dev);
11656
11657         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11658                dev->name,
11659                tp->board_part_number,
11660                tp->pci_chip_rev_id,
11661                tg3_phy_string(tp),
11662                tg3_bus_string(tp, str),
11663                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11664
11665         for (i = 0; i < 6; i++)
11666                 printk("%2.2x%c", dev->dev_addr[i],
11667                        i == 5 ? '\n' : ':');
11668
11669         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11670                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11671                "TSOcap[%d] \n",
11672                dev->name,
11673                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11674                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11675                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11676                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11677                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11678                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11679                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11680         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11681                dev->name, tp->dma_rwctrl,
11682                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11683                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11684
11685         netif_carrier_off(tp->dev);
11686
11687         return 0;
11688
11689 err_out_iounmap:
11690         if (tp->regs) {
11691                 iounmap(tp->regs);
11692                 tp->regs = NULL;
11693         }
11694
11695 err_out_free_dev:
11696         free_netdev(dev);
11697
11698 err_out_free_res:
11699         pci_release_regions(pdev);
11700
11701 err_out_disable_pdev:
11702         pci_disable_device(pdev);
11703         pci_set_drvdata(pdev, NULL);
11704         return err;
11705 }
11706
11707 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11708 {
11709         struct net_device *dev = pci_get_drvdata(pdev);
11710
11711         if (dev) {
11712                 struct tg3 *tp = netdev_priv(dev);
11713
11714                 flush_scheduled_work();
11715                 unregister_netdev(dev);
11716                 if (tp->regs) {
11717                         iounmap(tp->regs);
11718                         tp->regs = NULL;
11719                 }
11720                 free_netdev(dev);
11721                 pci_release_regions(pdev);
11722                 pci_disable_device(pdev);
11723                 pci_set_drvdata(pdev, NULL);
11724         }
11725 }
11726
11727 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11728 {
11729         struct net_device *dev = pci_get_drvdata(pdev);
11730         struct tg3 *tp = netdev_priv(dev);
11731         int err;
11732
11733         if (!netif_running(dev))
11734                 return 0;
11735
11736         flush_scheduled_work();
11737         tg3_netif_stop(tp);
11738
11739         del_timer_sync(&tp->timer);
11740
11741         tg3_full_lock(tp, 1);
11742         tg3_disable_ints(tp);
11743         tg3_full_unlock(tp);
11744
11745         netif_device_detach(dev);
11746
11747         tg3_full_lock(tp, 0);
11748         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11749         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11750         tg3_full_unlock(tp);
11751
11752         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11753         if (err) {
11754                 tg3_full_lock(tp, 0);
11755
11756                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11757                 if (tg3_restart_hw(tp, 1))
11758                         goto out;
11759
11760                 tp->timer.expires = jiffies + tp->timer_offset;
11761                 add_timer(&tp->timer);
11762
11763                 netif_device_attach(dev);
11764                 tg3_netif_start(tp);
11765
11766 out:
11767                 tg3_full_unlock(tp);
11768         }
11769
11770         return err;
11771 }
11772
11773 static int tg3_resume(struct pci_dev *pdev)
11774 {
11775         struct net_device *dev = pci_get_drvdata(pdev);
11776         struct tg3 *tp = netdev_priv(dev);
11777         int err;
11778
11779         if (!netif_running(dev))
11780                 return 0;
11781
11782         pci_restore_state(tp->pdev);
11783
11784         err = tg3_set_power_state(tp, PCI_D0);
11785         if (err)
11786                 return err;
11787
11788         netif_device_attach(dev);
11789
11790         tg3_full_lock(tp, 0);
11791
11792         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11793         err = tg3_restart_hw(tp, 1);
11794         if (err)
11795                 goto out;
11796
11797         tp->timer.expires = jiffies + tp->timer_offset;
11798         add_timer(&tp->timer);
11799
11800         tg3_netif_start(tp);
11801
11802 out:
11803         tg3_full_unlock(tp);
11804
11805         return err;
11806 }
11807
11808 static struct pci_driver tg3_driver = {
11809         .name           = DRV_MODULE_NAME,
11810         .id_table       = tg3_pci_tbl,
11811         .probe          = tg3_init_one,
11812         .remove         = __devexit_p(tg3_remove_one),
11813         .suspend        = tg3_suspend,
11814         .resume         = tg3_resume
11815 };
11816
11817 static int __init tg3_init(void)
11818 {
11819         return pci_register_driver(&tg3_driver);
11820 }
11821
11822 static void __exit tg3_cleanup(void)
11823 {
11824         pci_unregister_driver(&tg3_driver);
11825 }
11826
11827 module_init(tg3_init);
11828 module_exit(tg3_cleanup);