[TG3]: Improve ASF heartbeat.
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43
44 #include <asm/system.h>
45 #include <asm/io.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
48
49 #ifdef CONFIG_SPARC64
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
52 #include <asm/pbm.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #ifdef NETIF_F_TSO
62 #define TG3_TSO_SUPPORT 1
63 #else
64 #define TG3_TSO_SUPPORT 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.65"
72 #define DRV_MODULE_RELDATE      "August 07, 2006"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 #define TG3_NUM_TEST            6
138
139 static char version[] __devinitdata =
140         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
141
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION);
146
147 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug, int, 0);
149 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
150
151 static struct pci_device_id tg3_pci_tbl[] = {
152         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
200         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
201         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
202         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
203         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
204         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
205         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
206         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
207         {}
208 };
209
210 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
211
212 static const struct {
213         const char string[ETH_GSTRING_LEN];
214 } ethtool_stats_keys[TG3_NUM_STATS] = {
215         { "rx_octets" },
216         { "rx_fragments" },
217         { "rx_ucast_packets" },
218         { "rx_mcast_packets" },
219         { "rx_bcast_packets" },
220         { "rx_fcs_errors" },
221         { "rx_align_errors" },
222         { "rx_xon_pause_rcvd" },
223         { "rx_xoff_pause_rcvd" },
224         { "rx_mac_ctrl_rcvd" },
225         { "rx_xoff_entered" },
226         { "rx_frame_too_long_errors" },
227         { "rx_jabbers" },
228         { "rx_undersize_packets" },
229         { "rx_in_length_errors" },
230         { "rx_out_length_errors" },
231         { "rx_64_or_less_octet_packets" },
232         { "rx_65_to_127_octet_packets" },
233         { "rx_128_to_255_octet_packets" },
234         { "rx_256_to_511_octet_packets" },
235         { "rx_512_to_1023_octet_packets" },
236         { "rx_1024_to_1522_octet_packets" },
237         { "rx_1523_to_2047_octet_packets" },
238         { "rx_2048_to_4095_octet_packets" },
239         { "rx_4096_to_8191_octet_packets" },
240         { "rx_8192_to_9022_octet_packets" },
241
242         { "tx_octets" },
243         { "tx_collisions" },
244
245         { "tx_xon_sent" },
246         { "tx_xoff_sent" },
247         { "tx_flow_control" },
248         { "tx_mac_errors" },
249         { "tx_single_collisions" },
250         { "tx_mult_collisions" },
251         { "tx_deferred" },
252         { "tx_excessive_collisions" },
253         { "tx_late_collisions" },
254         { "tx_collide_2times" },
255         { "tx_collide_3times" },
256         { "tx_collide_4times" },
257         { "tx_collide_5times" },
258         { "tx_collide_6times" },
259         { "tx_collide_7times" },
260         { "tx_collide_8times" },
261         { "tx_collide_9times" },
262         { "tx_collide_10times" },
263         { "tx_collide_11times" },
264         { "tx_collide_12times" },
265         { "tx_collide_13times" },
266         { "tx_collide_14times" },
267         { "tx_collide_15times" },
268         { "tx_ucast_packets" },
269         { "tx_mcast_packets" },
270         { "tx_bcast_packets" },
271         { "tx_carrier_sense_errors" },
272         { "tx_discards" },
273         { "tx_errors" },
274
275         { "dma_writeq_full" },
276         { "dma_write_prioq_full" },
277         { "rxbds_empty" },
278         { "rx_discards" },
279         { "rx_errors" },
280         { "rx_threshold_hit" },
281
282         { "dma_readq_full" },
283         { "dma_read_prioq_full" },
284         { "tx_comp_queue_full" },
285
286         { "ring_set_send_prod_index" },
287         { "ring_status_update" },
288         { "nic_irqs" },
289         { "nic_avoided_irqs" },
290         { "nic_tx_threshold_hit" }
291 };
292
293 static const struct {
294         const char string[ETH_GSTRING_LEN];
295 } ethtool_test_keys[TG3_NUM_TEST] = {
296         { "nvram test     (online) " },
297         { "link test      (online) " },
298         { "register test  (offline)" },
299         { "memory test    (offline)" },
300         { "loopback test  (offline)" },
301         { "interrupt test (offline)" },
302 };
303
304 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
305 {
306         writel(val, tp->regs + off);
307 }
308
309 static u32 tg3_read32(struct tg3 *tp, u32 off)
310 {
311         return (readl(tp->regs + off));
312 }
313
314 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
315 {
316         unsigned long flags;
317
318         spin_lock_irqsave(&tp->indirect_lock, flags);
319         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
320         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
321         spin_unlock_irqrestore(&tp->indirect_lock, flags);
322 }
323
324 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
325 {
326         writel(val, tp->regs + off);
327         readl(tp->regs + off);
328 }
329
330 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
331 {
332         unsigned long flags;
333         u32 val;
334
335         spin_lock_irqsave(&tp->indirect_lock, flags);
336         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
337         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
338         spin_unlock_irqrestore(&tp->indirect_lock, flags);
339         return val;
340 }
341
342 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
343 {
344         unsigned long flags;
345
346         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
347                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
348                                        TG3_64BIT_REG_LOW, val);
349                 return;
350         }
351         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
352                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
353                                        TG3_64BIT_REG_LOW, val);
354                 return;
355         }
356
357         spin_lock_irqsave(&tp->indirect_lock, flags);
358         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
359         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
360         spin_unlock_irqrestore(&tp->indirect_lock, flags);
361
362         /* In indirect mode when disabling interrupts, we also need
363          * to clear the interrupt bit in the GRC local ctrl register.
364          */
365         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
366             (val == 0x1)) {
367                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
368                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
369         }
370 }
371
372 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
373 {
374         unsigned long flags;
375         u32 val;
376
377         spin_lock_irqsave(&tp->indirect_lock, flags);
378         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
379         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
380         spin_unlock_irqrestore(&tp->indirect_lock, flags);
381         return val;
382 }
383
384 /* usec_wait specifies the wait time in usec when writing to certain registers
385  * where it is unsafe to read back the register without some delay.
386  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
387  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
388  */
389 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
390 {
391         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
392             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
393                 /* Non-posted methods */
394                 tp->write32(tp, off, val);
395         else {
396                 /* Posted method */
397                 tg3_write32(tp, off, val);
398                 if (usec_wait)
399                         udelay(usec_wait);
400                 tp->read32(tp, off);
401         }
402         /* Wait again after the read for the posted method to guarantee that
403          * the wait time is met.
404          */
405         if (usec_wait)
406                 udelay(usec_wait);
407 }
408
409 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
410 {
411         tp->write32_mbox(tp, off, val);
412         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
413             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
414                 tp->read32_mbox(tp, off);
415 }
416
417 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
418 {
419         void __iomem *mbox = tp->regs + off;
420         writel(val, mbox);
421         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
422                 writel(val, mbox);
423         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
424                 readl(mbox);
425 }
426
427 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
428 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
429 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
430 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
431 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
432
433 #define tw32(reg,val)           tp->write32(tp, reg, val)
434 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
435 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
436 #define tr32(reg)               tp->read32(tp, reg)
437
438 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
439 {
440         unsigned long flags;
441
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
444                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
445                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
446
447                 /* Always leave this as zero. */
448                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
449         } else {
450                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
451                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
452
453                 /* Always leave this as zero. */
454                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
455         }
456         spin_unlock_irqrestore(&tp->indirect_lock, flags);
457 }
458
459 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
460 {
461         unsigned long flags;
462
463         spin_lock_irqsave(&tp->indirect_lock, flags);
464         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
465                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
466                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
467
468                 /* Always leave this as zero. */
469                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
470         } else {
471                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
472                 *val = tr32(TG3PCI_MEM_WIN_DATA);
473
474                 /* Always leave this as zero. */
475                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
476         }
477         spin_unlock_irqrestore(&tp->indirect_lock, flags);
478 }
479
480 static void tg3_disable_ints(struct tg3 *tp)
481 {
482         tw32(TG3PCI_MISC_HOST_CTRL,
483              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
484         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
485 }
486
487 static inline void tg3_cond_int(struct tg3 *tp)
488 {
489         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
490             (tp->hw_status->status & SD_STATUS_UPDATED))
491                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
492 }
493
494 static void tg3_enable_ints(struct tg3 *tp)
495 {
496         tp->irq_sync = 0;
497         wmb();
498
499         tw32(TG3PCI_MISC_HOST_CTRL,
500              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
501         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
502                        (tp->last_tag << 24));
503         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
504                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
505                                (tp->last_tag << 24));
506         tg3_cond_int(tp);
507 }
508
509 static inline unsigned int tg3_has_work(struct tg3 *tp)
510 {
511         struct tg3_hw_status *sblk = tp->hw_status;
512         unsigned int work_exists = 0;
513
514         /* check for phy events */
515         if (!(tp->tg3_flags &
516               (TG3_FLAG_USE_LINKCHG_REG |
517                TG3_FLAG_POLL_SERDES))) {
518                 if (sblk->status & SD_STATUS_LINK_CHG)
519                         work_exists = 1;
520         }
521         /* check for RX/TX work to do */
522         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
523             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
524                 work_exists = 1;
525
526         return work_exists;
527 }
528
529 /* tg3_restart_ints
530  *  similar to tg3_enable_ints, but it accurately determines whether there
531  *  is new work pending and can return without flushing the PIO write
532  *  which reenables interrupts
533  */
534 static void tg3_restart_ints(struct tg3 *tp)
535 {
536         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
537                      tp->last_tag << 24);
538         mmiowb();
539
540         /* When doing tagged status, this work check is unnecessary.
541          * The last_tag we write above tells the chip which piece of
542          * work we've completed.
543          */
544         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
545             tg3_has_work(tp))
546                 tw32(HOSTCC_MODE, tp->coalesce_mode |
547                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
548 }
549
550 static inline void tg3_netif_stop(struct tg3 *tp)
551 {
552         tp->dev->trans_start = jiffies; /* prevent tx timeout */
553         netif_poll_disable(tp->dev);
554         netif_tx_disable(tp->dev);
555 }
556
557 static inline void tg3_netif_start(struct tg3 *tp)
558 {
559         netif_wake_queue(tp->dev);
560         /* NOTE: unconditional netif_wake_queue is only appropriate
561          * so long as all callers are assured to have free tx slots
562          * (such as after tg3_init_hw)
563          */
564         netif_poll_enable(tp->dev);
565         tp->hw_status->status |= SD_STATUS_UPDATED;
566         tg3_enable_ints(tp);
567 }
568
569 static void tg3_switch_clocks(struct tg3 *tp)
570 {
571         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
572         u32 orig_clock_ctrl;
573
574         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
575                 return;
576
577         orig_clock_ctrl = clock_ctrl;
578         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
579                        CLOCK_CTRL_CLKRUN_OENABLE |
580                        0x1f);
581         tp->pci_clock_ctrl = clock_ctrl;
582
583         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
584                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
585                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
586                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
587                 }
588         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
589                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
590                             clock_ctrl |
591                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
592                             40);
593                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
594                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
595                             40);
596         }
597         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
598 }
599
600 #define PHY_BUSY_LOOPS  5000
601
602 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
603 {
604         u32 frame_val;
605         unsigned int loops;
606         int ret;
607
608         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
609                 tw32_f(MAC_MI_MODE,
610                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
611                 udelay(80);
612         }
613
614         *val = 0x0;
615
616         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
617                       MI_COM_PHY_ADDR_MASK);
618         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
619                       MI_COM_REG_ADDR_MASK);
620         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
621
622         tw32_f(MAC_MI_COM, frame_val);
623
624         loops = PHY_BUSY_LOOPS;
625         while (loops != 0) {
626                 udelay(10);
627                 frame_val = tr32(MAC_MI_COM);
628
629                 if ((frame_val & MI_COM_BUSY) == 0) {
630                         udelay(5);
631                         frame_val = tr32(MAC_MI_COM);
632                         break;
633                 }
634                 loops -= 1;
635         }
636
637         ret = -EBUSY;
638         if (loops != 0) {
639                 *val = frame_val & MI_COM_DATA_MASK;
640                 ret = 0;
641         }
642
643         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
644                 tw32_f(MAC_MI_MODE, tp->mi_mode);
645                 udelay(80);
646         }
647
648         return ret;
649 }
650
651 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
652 {
653         u32 frame_val;
654         unsigned int loops;
655         int ret;
656
657         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
658                 tw32_f(MAC_MI_MODE,
659                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
660                 udelay(80);
661         }
662
663         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
664                       MI_COM_PHY_ADDR_MASK);
665         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
666                       MI_COM_REG_ADDR_MASK);
667         frame_val |= (val & MI_COM_DATA_MASK);
668         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
669
670         tw32_f(MAC_MI_COM, frame_val);
671
672         loops = PHY_BUSY_LOOPS;
673         while (loops != 0) {
674                 udelay(10);
675                 frame_val = tr32(MAC_MI_COM);
676                 if ((frame_val & MI_COM_BUSY) == 0) {
677                         udelay(5);
678                         frame_val = tr32(MAC_MI_COM);
679                         break;
680                 }
681                 loops -= 1;
682         }
683
684         ret = -EBUSY;
685         if (loops != 0)
686                 ret = 0;
687
688         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
689                 tw32_f(MAC_MI_MODE, tp->mi_mode);
690                 udelay(80);
691         }
692
693         return ret;
694 }
695
696 static void tg3_phy_set_wirespeed(struct tg3 *tp)
697 {
698         u32 val;
699
700         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
701                 return;
702
703         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
704             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
705                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
706                              (val | (1 << 15) | (1 << 4)));
707 }
708
709 static int tg3_bmcr_reset(struct tg3 *tp)
710 {
711         u32 phy_control;
712         int limit, err;
713
714         /* OK, reset it, and poll the BMCR_RESET bit until it
715          * clears or we time out.
716          */
717         phy_control = BMCR_RESET;
718         err = tg3_writephy(tp, MII_BMCR, phy_control);
719         if (err != 0)
720                 return -EBUSY;
721
722         limit = 5000;
723         while (limit--) {
724                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
725                 if (err != 0)
726                         return -EBUSY;
727
728                 if ((phy_control & BMCR_RESET) == 0) {
729                         udelay(40);
730                         break;
731                 }
732                 udelay(10);
733         }
734         if (limit <= 0)
735                 return -EBUSY;
736
737         return 0;
738 }
739
740 static int tg3_wait_macro_done(struct tg3 *tp)
741 {
742         int limit = 100;
743
744         while (limit--) {
745                 u32 tmp32;
746
747                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
748                         if ((tmp32 & 0x1000) == 0)
749                                 break;
750                 }
751         }
752         if (limit <= 0)
753                 return -EBUSY;
754
755         return 0;
756 }
757
758 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
759 {
760         static const u32 test_pat[4][6] = {
761         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
762         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
763         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
764         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
765         };
766         int chan;
767
768         for (chan = 0; chan < 4; chan++) {
769                 int i;
770
771                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
772                              (chan * 0x2000) | 0x0200);
773                 tg3_writephy(tp, 0x16, 0x0002);
774
775                 for (i = 0; i < 6; i++)
776                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
777                                      test_pat[chan][i]);
778
779                 tg3_writephy(tp, 0x16, 0x0202);
780                 if (tg3_wait_macro_done(tp)) {
781                         *resetp = 1;
782                         return -EBUSY;
783                 }
784
785                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
786                              (chan * 0x2000) | 0x0200);
787                 tg3_writephy(tp, 0x16, 0x0082);
788                 if (tg3_wait_macro_done(tp)) {
789                         *resetp = 1;
790                         return -EBUSY;
791                 }
792
793                 tg3_writephy(tp, 0x16, 0x0802);
794                 if (tg3_wait_macro_done(tp)) {
795                         *resetp = 1;
796                         return -EBUSY;
797                 }
798
799                 for (i = 0; i < 6; i += 2) {
800                         u32 low, high;
801
802                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
803                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
804                             tg3_wait_macro_done(tp)) {
805                                 *resetp = 1;
806                                 return -EBUSY;
807                         }
808                         low &= 0x7fff;
809                         high &= 0x000f;
810                         if (low != test_pat[chan][i] ||
811                             high != test_pat[chan][i+1]) {
812                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
813                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
814                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
815
816                                 return -EBUSY;
817                         }
818                 }
819         }
820
821         return 0;
822 }
823
824 static int tg3_phy_reset_chanpat(struct tg3 *tp)
825 {
826         int chan;
827
828         for (chan = 0; chan < 4; chan++) {
829                 int i;
830
831                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
832                              (chan * 0x2000) | 0x0200);
833                 tg3_writephy(tp, 0x16, 0x0002);
834                 for (i = 0; i < 6; i++)
835                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
836                 tg3_writephy(tp, 0x16, 0x0202);
837                 if (tg3_wait_macro_done(tp))
838                         return -EBUSY;
839         }
840
841         return 0;
842 }
843
844 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
845 {
846         u32 reg32, phy9_orig;
847         int retries, do_phy_reset, err;
848
849         retries = 10;
850         do_phy_reset = 1;
851         do {
852                 if (do_phy_reset) {
853                         err = tg3_bmcr_reset(tp);
854                         if (err)
855                                 return err;
856                         do_phy_reset = 0;
857                 }
858
859                 /* Disable transmitter and interrupt.  */
860                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
861                         continue;
862
863                 reg32 |= 0x3000;
864                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
865
866                 /* Set full-duplex, 1000 mbps.  */
867                 tg3_writephy(tp, MII_BMCR,
868                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
869
870                 /* Set to master mode.  */
871                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
872                         continue;
873
874                 tg3_writephy(tp, MII_TG3_CTRL,
875                              (MII_TG3_CTRL_AS_MASTER |
876                               MII_TG3_CTRL_ENABLE_AS_MASTER));
877
878                 /* Enable SM_DSP_CLOCK and 6dB.  */
879                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
880
881                 /* Block the PHY control access.  */
882                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
883                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
884
885                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
886                 if (!err)
887                         break;
888         } while (--retries);
889
890         err = tg3_phy_reset_chanpat(tp);
891         if (err)
892                 return err;
893
894         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
895         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
896
897         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
898         tg3_writephy(tp, 0x16, 0x0000);
899
900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
902                 /* Set Extended packet length bit for jumbo frames */
903                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
904         }
905         else {
906                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
907         }
908
909         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
910
911         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
912                 reg32 &= ~0x3000;
913                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
914         } else if (!err)
915                 err = -EBUSY;
916
917         return err;
918 }
919
920 static void tg3_link_report(struct tg3 *);
921
922 /* This will reset the tigon3 PHY if there is no valid
923  * link unless the FORCE argument is non-zero.
924  */
925 static int tg3_phy_reset(struct tg3 *tp)
926 {
927         u32 phy_status;
928         int err;
929
930         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
931         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
932         if (err != 0)
933                 return -EBUSY;
934
935         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
936                 netif_carrier_off(tp->dev);
937                 tg3_link_report(tp);
938         }
939
940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
941             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
942             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
943                 err = tg3_phy_reset_5703_4_5(tp);
944                 if (err)
945                         return err;
946                 goto out;
947         }
948
949         err = tg3_bmcr_reset(tp);
950         if (err)
951                 return err;
952
953 out:
954         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
955                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
956                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
957                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
958                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
959                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
960                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
961         }
962         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
963                 tg3_writephy(tp, 0x1c, 0x8d68);
964                 tg3_writephy(tp, 0x1c, 0x8d68);
965         }
966         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
967                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
968                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
969                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
970                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
971                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
972                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
973                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
974                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
975         }
976         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
977                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
978                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
979                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
980                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
981         }
982         /* Set Extended packet length bit (bit 14) on all chips that */
983         /* support jumbo frames */
984         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
985                 /* Cannot do read-modify-write on 5401 */
986                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
987         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
988                 u32 phy_reg;
989
990                 /* Set bit 14 with read-modify-write to preserve other bits */
991                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
992                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
993                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
994         }
995
996         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
997          * jumbo frames transmission.
998          */
999         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1000                 u32 phy_reg;
1001
1002                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1003                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1004                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1005         }
1006
1007         tg3_phy_set_wirespeed(tp);
1008         return 0;
1009 }
1010
1011 static void tg3_frob_aux_power(struct tg3 *tp)
1012 {
1013         struct tg3 *tp_peer = tp;
1014
1015         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
1016                 return;
1017
1018         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1019             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1020                 struct net_device *dev_peer;
1021
1022                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1023                 /* remove_one() may have been run on the peer. */
1024                 if (!dev_peer)
1025                         tp_peer = tp;
1026                 else
1027                         tp_peer = netdev_priv(dev_peer);
1028         }
1029
1030         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1031             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1032             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1033             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1034                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1035                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1036                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1037                                     (GRC_LCLCTRL_GPIO_OE0 |
1038                                      GRC_LCLCTRL_GPIO_OE1 |
1039                                      GRC_LCLCTRL_GPIO_OE2 |
1040                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1041                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1042                                     100);
1043                 } else {
1044                         u32 no_gpio2;
1045                         u32 grc_local_ctrl = 0;
1046
1047                         if (tp_peer != tp &&
1048                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1049                                 return;
1050
1051                         /* Workaround to prevent overdrawing Amps. */
1052                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1053                             ASIC_REV_5714) {
1054                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1055                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1056                                             grc_local_ctrl, 100);
1057                         }
1058
1059                         /* On 5753 and variants, GPIO2 cannot be used. */
1060                         no_gpio2 = tp->nic_sram_data_cfg &
1061                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1062
1063                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1064                                          GRC_LCLCTRL_GPIO_OE1 |
1065                                          GRC_LCLCTRL_GPIO_OE2 |
1066                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1067                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1068                         if (no_gpio2) {
1069                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1070                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1071                         }
1072                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1073                                                     grc_local_ctrl, 100);
1074
1075                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1076
1077                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1078                                                     grc_local_ctrl, 100);
1079
1080                         if (!no_gpio2) {
1081                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1082                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1083                                             grc_local_ctrl, 100);
1084                         }
1085                 }
1086         } else {
1087                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1088                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1089                         if (tp_peer != tp &&
1090                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1091                                 return;
1092
1093                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1094                                     (GRC_LCLCTRL_GPIO_OE1 |
1095                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1096
1097                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1098                                     GRC_LCLCTRL_GPIO_OE1, 100);
1099
1100                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1101                                     (GRC_LCLCTRL_GPIO_OE1 |
1102                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1103                 }
1104         }
1105 }
1106
1107 static int tg3_setup_phy(struct tg3 *, int);
1108
1109 #define RESET_KIND_SHUTDOWN     0
1110 #define RESET_KIND_INIT         1
1111 #define RESET_KIND_SUSPEND      2
1112
1113 static void tg3_write_sig_post_reset(struct tg3 *, int);
1114 static int tg3_halt_cpu(struct tg3 *, u32);
1115 static int tg3_nvram_lock(struct tg3 *);
1116 static void tg3_nvram_unlock(struct tg3 *);
1117
1118 static void tg3_power_down_phy(struct tg3 *tp)
1119 {
1120         /* The PHY should not be powered down on some chips because
1121          * of bugs.
1122          */
1123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1124             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1125             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1126              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1127                 return;
1128         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1129 }
1130
1131 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1132 {
1133         u32 misc_host_ctrl;
1134         u16 power_control, power_caps;
1135         int pm = tp->pm_cap;
1136
1137         /* Make sure register accesses (indirect or otherwise)
1138          * will function correctly.
1139          */
1140         pci_write_config_dword(tp->pdev,
1141                                TG3PCI_MISC_HOST_CTRL,
1142                                tp->misc_host_ctrl);
1143
1144         pci_read_config_word(tp->pdev,
1145                              pm + PCI_PM_CTRL,
1146                              &power_control);
1147         power_control |= PCI_PM_CTRL_PME_STATUS;
1148         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1149         switch (state) {
1150         case PCI_D0:
1151                 power_control |= 0;
1152                 pci_write_config_word(tp->pdev,
1153                                       pm + PCI_PM_CTRL,
1154                                       power_control);
1155                 udelay(100);    /* Delay after power state change */
1156
1157                 /* Switch out of Vaux if it is not a LOM */
1158                 if (!(tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
1159                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1160
1161                 return 0;
1162
1163         case PCI_D1:
1164                 power_control |= 1;
1165                 break;
1166
1167         case PCI_D2:
1168                 power_control |= 2;
1169                 break;
1170
1171         case PCI_D3hot:
1172                 power_control |= 3;
1173                 break;
1174
1175         default:
1176                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1177                        "requested.\n",
1178                        tp->dev->name, state);
1179                 return -EINVAL;
1180         };
1181
1182         power_control |= PCI_PM_CTRL_PME_ENABLE;
1183
1184         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1185         tw32(TG3PCI_MISC_HOST_CTRL,
1186              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1187
1188         if (tp->link_config.phy_is_low_power == 0) {
1189                 tp->link_config.phy_is_low_power = 1;
1190                 tp->link_config.orig_speed = tp->link_config.speed;
1191                 tp->link_config.orig_duplex = tp->link_config.duplex;
1192                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1193         }
1194
1195         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1196                 tp->link_config.speed = SPEED_10;
1197                 tp->link_config.duplex = DUPLEX_HALF;
1198                 tp->link_config.autoneg = AUTONEG_ENABLE;
1199                 tg3_setup_phy(tp, 0);
1200         }
1201
1202         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1203                 int i;
1204                 u32 val;
1205
1206                 for (i = 0; i < 200; i++) {
1207                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1208                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1209                                 break;
1210                         msleep(1);
1211                 }
1212         }
1213         tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1214                                              WOL_DRV_STATE_SHUTDOWN |
1215                                              WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
1216
1217         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1218
1219         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1220                 u32 mac_mode;
1221
1222                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1223                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1224                         udelay(40);
1225
1226                         mac_mode = MAC_MODE_PORT_MODE_MII;
1227
1228                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1229                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1230                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1231                 } else {
1232                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1233                 }
1234
1235                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1236                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1237
1238                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1239                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1240                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1241
1242                 tw32_f(MAC_MODE, mac_mode);
1243                 udelay(100);
1244
1245                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1246                 udelay(10);
1247         }
1248
1249         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1250             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1251              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1252                 u32 base_val;
1253
1254                 base_val = tp->pci_clock_ctrl;
1255                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1256                              CLOCK_CTRL_TXCLK_DISABLE);
1257
1258                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1259                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
1260         } else if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
1261                 /* do nothing */
1262         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1263                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1264                 u32 newbits1, newbits2;
1265
1266                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1267                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1268                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1269                                     CLOCK_CTRL_TXCLK_DISABLE |
1270                                     CLOCK_CTRL_ALTCLK);
1271                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1272                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1273                         newbits1 = CLOCK_CTRL_625_CORE;
1274                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1275                 } else {
1276                         newbits1 = CLOCK_CTRL_ALTCLK;
1277                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1278                 }
1279
1280                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1281                             40);
1282
1283                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1284                             40);
1285
1286                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1287                         u32 newbits3;
1288
1289                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1290                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1291                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1292                                             CLOCK_CTRL_TXCLK_DISABLE |
1293                                             CLOCK_CTRL_44MHZ_CORE);
1294                         } else {
1295                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1296                         }
1297
1298                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1299                                     tp->pci_clock_ctrl | newbits3, 40);
1300                 }
1301         }
1302
1303         if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1304             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1305                 /* Turn off the PHY */
1306                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1307                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1308                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1309                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1310                         tg3_power_down_phy(tp);
1311                 }
1312         }
1313
1314         tg3_frob_aux_power(tp);
1315
1316         /* Workaround for unstable PLL clock */
1317         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1318             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1319                 u32 val = tr32(0x7d00);
1320
1321                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1322                 tw32(0x7d00, val);
1323                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1324                         int err;
1325
1326                         err = tg3_nvram_lock(tp);
1327                         tg3_halt_cpu(tp, RX_CPU_BASE);
1328                         if (!err)
1329                                 tg3_nvram_unlock(tp);
1330                 }
1331         }
1332
1333         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1334
1335         /* Finally, set the new power state. */
1336         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1337         udelay(100);    /* Delay after power state change */
1338
1339         return 0;
1340 }
1341
1342 static void tg3_link_report(struct tg3 *tp)
1343 {
1344         if (!netif_carrier_ok(tp->dev)) {
1345                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1346         } else {
1347                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1348                        tp->dev->name,
1349                        (tp->link_config.active_speed == SPEED_1000 ?
1350                         1000 :
1351                         (tp->link_config.active_speed == SPEED_100 ?
1352                          100 : 10)),
1353                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1354                         "full" : "half"));
1355
1356                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1357                        "%s for RX.\n",
1358                        tp->dev->name,
1359                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1360                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1361         }
1362 }
1363
1364 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1365 {
1366         u32 new_tg3_flags = 0;
1367         u32 old_rx_mode = tp->rx_mode;
1368         u32 old_tx_mode = tp->tx_mode;
1369
1370         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1371
1372                 /* Convert 1000BaseX flow control bits to 1000BaseT
1373                  * bits before resolving flow control.
1374                  */
1375                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1376                         local_adv &= ~(ADVERTISE_PAUSE_CAP |
1377                                        ADVERTISE_PAUSE_ASYM);
1378                         remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1379
1380                         if (local_adv & ADVERTISE_1000XPAUSE)
1381                                 local_adv |= ADVERTISE_PAUSE_CAP;
1382                         if (local_adv & ADVERTISE_1000XPSE_ASYM)
1383                                 local_adv |= ADVERTISE_PAUSE_ASYM;
1384                         if (remote_adv & LPA_1000XPAUSE)
1385                                 remote_adv |= LPA_PAUSE_CAP;
1386                         if (remote_adv & LPA_1000XPAUSE_ASYM)
1387                                 remote_adv |= LPA_PAUSE_ASYM;
1388                 }
1389
1390                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1391                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1392                                 if (remote_adv & LPA_PAUSE_CAP)
1393                                         new_tg3_flags |=
1394                                                 (TG3_FLAG_RX_PAUSE |
1395                                                 TG3_FLAG_TX_PAUSE);
1396                                 else if (remote_adv & LPA_PAUSE_ASYM)
1397                                         new_tg3_flags |=
1398                                                 (TG3_FLAG_RX_PAUSE);
1399                         } else {
1400                                 if (remote_adv & LPA_PAUSE_CAP)
1401                                         new_tg3_flags |=
1402                                                 (TG3_FLAG_RX_PAUSE |
1403                                                 TG3_FLAG_TX_PAUSE);
1404                         }
1405                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1406                         if ((remote_adv & LPA_PAUSE_CAP) &&
1407                         (remote_adv & LPA_PAUSE_ASYM))
1408                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1409                 }
1410
1411                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1412                 tp->tg3_flags |= new_tg3_flags;
1413         } else {
1414                 new_tg3_flags = tp->tg3_flags;
1415         }
1416
1417         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1418                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1419         else
1420                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1421
1422         if (old_rx_mode != tp->rx_mode) {
1423                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1424         }
1425
1426         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1427                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1428         else
1429                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1430
1431         if (old_tx_mode != tp->tx_mode) {
1432                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1433         }
1434 }
1435
1436 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1437 {
1438         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1439         case MII_TG3_AUX_STAT_10HALF:
1440                 *speed = SPEED_10;
1441                 *duplex = DUPLEX_HALF;
1442                 break;
1443
1444         case MII_TG3_AUX_STAT_10FULL:
1445                 *speed = SPEED_10;
1446                 *duplex = DUPLEX_FULL;
1447                 break;
1448
1449         case MII_TG3_AUX_STAT_100HALF:
1450                 *speed = SPEED_100;
1451                 *duplex = DUPLEX_HALF;
1452                 break;
1453
1454         case MII_TG3_AUX_STAT_100FULL:
1455                 *speed = SPEED_100;
1456                 *duplex = DUPLEX_FULL;
1457                 break;
1458
1459         case MII_TG3_AUX_STAT_1000HALF:
1460                 *speed = SPEED_1000;
1461                 *duplex = DUPLEX_HALF;
1462                 break;
1463
1464         case MII_TG3_AUX_STAT_1000FULL:
1465                 *speed = SPEED_1000;
1466                 *duplex = DUPLEX_FULL;
1467                 break;
1468
1469         default:
1470                 *speed = SPEED_INVALID;
1471                 *duplex = DUPLEX_INVALID;
1472                 break;
1473         };
1474 }
1475
1476 static void tg3_phy_copper_begin(struct tg3 *tp)
1477 {
1478         u32 new_adv;
1479         int i;
1480
1481         if (tp->link_config.phy_is_low_power) {
1482                 /* Entering low power mode.  Disable gigabit and
1483                  * 100baseT advertisements.
1484                  */
1485                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1486
1487                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1488                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1489                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1490                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1491
1492                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1493         } else if (tp->link_config.speed == SPEED_INVALID) {
1494                 tp->link_config.advertising =
1495                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1496                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1497                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1498                          ADVERTISED_Autoneg | ADVERTISED_MII);
1499
1500                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1501                         tp->link_config.advertising &=
1502                                 ~(ADVERTISED_1000baseT_Half |
1503                                   ADVERTISED_1000baseT_Full);
1504
1505                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1506                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1507                         new_adv |= ADVERTISE_10HALF;
1508                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1509                         new_adv |= ADVERTISE_10FULL;
1510                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1511                         new_adv |= ADVERTISE_100HALF;
1512                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1513                         new_adv |= ADVERTISE_100FULL;
1514                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1515
1516                 if (tp->link_config.advertising &
1517                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1518                         new_adv = 0;
1519                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1520                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1521                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1522                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1523                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1524                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1525                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1526                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1527                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1528                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1529                 } else {
1530                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1531                 }
1532         } else {
1533                 /* Asking for a specific link mode. */
1534                 if (tp->link_config.speed == SPEED_1000) {
1535                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1536                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1537
1538                         if (tp->link_config.duplex == DUPLEX_FULL)
1539                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1540                         else
1541                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1542                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1543                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1544                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1545                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1546                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1547                 } else {
1548                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1549
1550                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1551                         if (tp->link_config.speed == SPEED_100) {
1552                                 if (tp->link_config.duplex == DUPLEX_FULL)
1553                                         new_adv |= ADVERTISE_100FULL;
1554                                 else
1555                                         new_adv |= ADVERTISE_100HALF;
1556                         } else {
1557                                 if (tp->link_config.duplex == DUPLEX_FULL)
1558                                         new_adv |= ADVERTISE_10FULL;
1559                                 else
1560                                         new_adv |= ADVERTISE_10HALF;
1561                         }
1562                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1563                 }
1564         }
1565
1566         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1567             tp->link_config.speed != SPEED_INVALID) {
1568                 u32 bmcr, orig_bmcr;
1569
1570                 tp->link_config.active_speed = tp->link_config.speed;
1571                 tp->link_config.active_duplex = tp->link_config.duplex;
1572
1573                 bmcr = 0;
1574                 switch (tp->link_config.speed) {
1575                 default:
1576                 case SPEED_10:
1577                         break;
1578
1579                 case SPEED_100:
1580                         bmcr |= BMCR_SPEED100;
1581                         break;
1582
1583                 case SPEED_1000:
1584                         bmcr |= TG3_BMCR_SPEED1000;
1585                         break;
1586                 };
1587
1588                 if (tp->link_config.duplex == DUPLEX_FULL)
1589                         bmcr |= BMCR_FULLDPLX;
1590
1591                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1592                     (bmcr != orig_bmcr)) {
1593                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1594                         for (i = 0; i < 1500; i++) {
1595                                 u32 tmp;
1596
1597                                 udelay(10);
1598                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1599                                     tg3_readphy(tp, MII_BMSR, &tmp))
1600                                         continue;
1601                                 if (!(tmp & BMSR_LSTATUS)) {
1602                                         udelay(40);
1603                                         break;
1604                                 }
1605                         }
1606                         tg3_writephy(tp, MII_BMCR, bmcr);
1607                         udelay(40);
1608                 }
1609         } else {
1610                 tg3_writephy(tp, MII_BMCR,
1611                              BMCR_ANENABLE | BMCR_ANRESTART);
1612         }
1613 }
1614
1615 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1616 {
1617         int err;
1618
1619         /* Turn off tap power management. */
1620         /* Set Extended packet length bit */
1621         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1622
1623         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1624         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1625
1626         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1627         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1628
1629         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1630         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1631
1632         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1633         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1634
1635         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1636         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1637
1638         udelay(40);
1639
1640         return err;
1641 }
1642
1643 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1644 {
1645         u32 adv_reg, all_mask;
1646
1647         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1648                 return 0;
1649
1650         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1651                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1652         if ((adv_reg & all_mask) != all_mask)
1653                 return 0;
1654         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1655                 u32 tg3_ctrl;
1656
1657                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1658                         return 0;
1659
1660                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1661                             MII_TG3_CTRL_ADV_1000_FULL);
1662                 if ((tg3_ctrl & all_mask) != all_mask)
1663                         return 0;
1664         }
1665         return 1;
1666 }
1667
1668 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1669 {
1670         int current_link_up;
1671         u32 bmsr, dummy;
1672         u16 current_speed;
1673         u8 current_duplex;
1674         int i, err;
1675
1676         tw32(MAC_EVENT, 0);
1677
1678         tw32_f(MAC_STATUS,
1679              (MAC_STATUS_SYNC_CHANGED |
1680               MAC_STATUS_CFG_CHANGED |
1681               MAC_STATUS_MI_COMPLETION |
1682               MAC_STATUS_LNKSTATE_CHANGED));
1683         udelay(40);
1684
1685         tp->mi_mode = MAC_MI_MODE_BASE;
1686         tw32_f(MAC_MI_MODE, tp->mi_mode);
1687         udelay(80);
1688
1689         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1690
1691         /* Some third-party PHYs need to be reset on link going
1692          * down.
1693          */
1694         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1695              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1696              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1697             netif_carrier_ok(tp->dev)) {
1698                 tg3_readphy(tp, MII_BMSR, &bmsr);
1699                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1700                     !(bmsr & BMSR_LSTATUS))
1701                         force_reset = 1;
1702         }
1703         if (force_reset)
1704                 tg3_phy_reset(tp);
1705
1706         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1707                 tg3_readphy(tp, MII_BMSR, &bmsr);
1708                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1709                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1710                         bmsr = 0;
1711
1712                 if (!(bmsr & BMSR_LSTATUS)) {
1713                         err = tg3_init_5401phy_dsp(tp);
1714                         if (err)
1715                                 return err;
1716
1717                         tg3_readphy(tp, MII_BMSR, &bmsr);
1718                         for (i = 0; i < 1000; i++) {
1719                                 udelay(10);
1720                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1721                                     (bmsr & BMSR_LSTATUS)) {
1722                                         udelay(40);
1723                                         break;
1724                                 }
1725                         }
1726
1727                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1728                             !(bmsr & BMSR_LSTATUS) &&
1729                             tp->link_config.active_speed == SPEED_1000) {
1730                                 err = tg3_phy_reset(tp);
1731                                 if (!err)
1732                                         err = tg3_init_5401phy_dsp(tp);
1733                                 if (err)
1734                                         return err;
1735                         }
1736                 }
1737         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1738                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1739                 /* 5701 {A0,B0} CRC bug workaround */
1740                 tg3_writephy(tp, 0x15, 0x0a75);
1741                 tg3_writephy(tp, 0x1c, 0x8c68);
1742                 tg3_writephy(tp, 0x1c, 0x8d68);
1743                 tg3_writephy(tp, 0x1c, 0x8c68);
1744         }
1745
1746         /* Clear pending interrupts... */
1747         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1748         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1749
1750         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1751                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1752         else
1753                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1754
1755         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1756             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1757                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1758                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1759                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1760                 else
1761                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1762         }
1763
1764         current_link_up = 0;
1765         current_speed = SPEED_INVALID;
1766         current_duplex = DUPLEX_INVALID;
1767
1768         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1769                 u32 val;
1770
1771                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1772                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1773                 if (!(val & (1 << 10))) {
1774                         val |= (1 << 10);
1775                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1776                         goto relink;
1777                 }
1778         }
1779
1780         bmsr = 0;
1781         for (i = 0; i < 100; i++) {
1782                 tg3_readphy(tp, MII_BMSR, &bmsr);
1783                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1784                     (bmsr & BMSR_LSTATUS))
1785                         break;
1786                 udelay(40);
1787         }
1788
1789         if (bmsr & BMSR_LSTATUS) {
1790                 u32 aux_stat, bmcr;
1791
1792                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1793                 for (i = 0; i < 2000; i++) {
1794                         udelay(10);
1795                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1796                             aux_stat)
1797                                 break;
1798                 }
1799
1800                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1801                                              &current_speed,
1802                                              &current_duplex);
1803
1804                 bmcr = 0;
1805                 for (i = 0; i < 200; i++) {
1806                         tg3_readphy(tp, MII_BMCR, &bmcr);
1807                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1808                                 continue;
1809                         if (bmcr && bmcr != 0x7fff)
1810                                 break;
1811                         udelay(10);
1812                 }
1813
1814                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1815                         if (bmcr & BMCR_ANENABLE) {
1816                                 current_link_up = 1;
1817
1818                                 /* Force autoneg restart if we are exiting
1819                                  * low power mode.
1820                                  */
1821                                 if (!tg3_copper_is_advertising_all(tp))
1822                                         current_link_up = 0;
1823                         } else {
1824                                 current_link_up = 0;
1825                         }
1826                 } else {
1827                         if (!(bmcr & BMCR_ANENABLE) &&
1828                             tp->link_config.speed == current_speed &&
1829                             tp->link_config.duplex == current_duplex) {
1830                                 current_link_up = 1;
1831                         } else {
1832                                 current_link_up = 0;
1833                         }
1834                 }
1835
1836                 tp->link_config.active_speed = current_speed;
1837                 tp->link_config.active_duplex = current_duplex;
1838         }
1839
1840         if (current_link_up == 1 &&
1841             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1842             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1843                 u32 local_adv, remote_adv;
1844
1845                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1846                         local_adv = 0;
1847                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1848
1849                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1850                         remote_adv = 0;
1851
1852                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1853
1854                 /* If we are not advertising full pause capability,
1855                  * something is wrong.  Bring the link down and reconfigure.
1856                  */
1857                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1858                         current_link_up = 0;
1859                 } else {
1860                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1861                 }
1862         }
1863 relink:
1864         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1865                 u32 tmp;
1866
1867                 tg3_phy_copper_begin(tp);
1868
1869                 tg3_readphy(tp, MII_BMSR, &tmp);
1870                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1871                     (tmp & BMSR_LSTATUS))
1872                         current_link_up = 1;
1873         }
1874
1875         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1876         if (current_link_up == 1) {
1877                 if (tp->link_config.active_speed == SPEED_100 ||
1878                     tp->link_config.active_speed == SPEED_10)
1879                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1880                 else
1881                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1882         } else
1883                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1884
1885         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1886         if (tp->link_config.active_duplex == DUPLEX_HALF)
1887                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1888
1889         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1890         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1891                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1892                     (current_link_up == 1 &&
1893                      tp->link_config.active_speed == SPEED_10))
1894                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1895         } else {
1896                 if (current_link_up == 1)
1897                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1898         }
1899
1900         /* ??? Without this setting Netgear GA302T PHY does not
1901          * ??? send/receive packets...
1902          */
1903         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1904             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1905                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1906                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1907                 udelay(80);
1908         }
1909
1910         tw32_f(MAC_MODE, tp->mac_mode);
1911         udelay(40);
1912
1913         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1914                 /* Polled via timer. */
1915                 tw32_f(MAC_EVENT, 0);
1916         } else {
1917                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1918         }
1919         udelay(40);
1920
1921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1922             current_link_up == 1 &&
1923             tp->link_config.active_speed == SPEED_1000 &&
1924             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1925              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1926                 udelay(120);
1927                 tw32_f(MAC_STATUS,
1928                      (MAC_STATUS_SYNC_CHANGED |
1929                       MAC_STATUS_CFG_CHANGED));
1930                 udelay(40);
1931                 tg3_write_mem(tp,
1932                               NIC_SRAM_FIRMWARE_MBOX,
1933                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1934         }
1935
1936         if (current_link_up != netif_carrier_ok(tp->dev)) {
1937                 if (current_link_up)
1938                         netif_carrier_on(tp->dev);
1939                 else
1940                         netif_carrier_off(tp->dev);
1941                 tg3_link_report(tp);
1942         }
1943
1944         return 0;
1945 }
1946
1947 struct tg3_fiber_aneginfo {
1948         int state;
1949 #define ANEG_STATE_UNKNOWN              0
1950 #define ANEG_STATE_AN_ENABLE            1
1951 #define ANEG_STATE_RESTART_INIT         2
1952 #define ANEG_STATE_RESTART              3
1953 #define ANEG_STATE_DISABLE_LINK_OK      4
1954 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1955 #define ANEG_STATE_ABILITY_DETECT       6
1956 #define ANEG_STATE_ACK_DETECT_INIT      7
1957 #define ANEG_STATE_ACK_DETECT           8
1958 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1959 #define ANEG_STATE_COMPLETE_ACK         10
1960 #define ANEG_STATE_IDLE_DETECT_INIT     11
1961 #define ANEG_STATE_IDLE_DETECT          12
1962 #define ANEG_STATE_LINK_OK              13
1963 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1964 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1965
1966         u32 flags;
1967 #define MR_AN_ENABLE            0x00000001
1968 #define MR_RESTART_AN           0x00000002
1969 #define MR_AN_COMPLETE          0x00000004
1970 #define MR_PAGE_RX              0x00000008
1971 #define MR_NP_LOADED            0x00000010
1972 #define MR_TOGGLE_TX            0x00000020
1973 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1974 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1975 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1976 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1977 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1978 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1979 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1980 #define MR_TOGGLE_RX            0x00002000
1981 #define MR_NP_RX                0x00004000
1982
1983 #define MR_LINK_OK              0x80000000
1984
1985         unsigned long link_time, cur_time;
1986
1987         u32 ability_match_cfg;
1988         int ability_match_count;
1989
1990         char ability_match, idle_match, ack_match;
1991
1992         u32 txconfig, rxconfig;
1993 #define ANEG_CFG_NP             0x00000080
1994 #define ANEG_CFG_ACK            0x00000040
1995 #define ANEG_CFG_RF2            0x00000020
1996 #define ANEG_CFG_RF1            0x00000010
1997 #define ANEG_CFG_PS2            0x00000001
1998 #define ANEG_CFG_PS1            0x00008000
1999 #define ANEG_CFG_HD             0x00004000
2000 #define ANEG_CFG_FD             0x00002000
2001 #define ANEG_CFG_INVAL          0x00001f06
2002
2003 };
2004 #define ANEG_OK         0
2005 #define ANEG_DONE       1
2006 #define ANEG_TIMER_ENAB 2
2007 #define ANEG_FAILED     -1
2008
2009 #define ANEG_STATE_SETTLE_TIME  10000
2010
2011 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2012                                    struct tg3_fiber_aneginfo *ap)
2013 {
2014         unsigned long delta;
2015         u32 rx_cfg_reg;
2016         int ret;
2017
2018         if (ap->state == ANEG_STATE_UNKNOWN) {
2019                 ap->rxconfig = 0;
2020                 ap->link_time = 0;
2021                 ap->cur_time = 0;
2022                 ap->ability_match_cfg = 0;
2023                 ap->ability_match_count = 0;
2024                 ap->ability_match = 0;
2025                 ap->idle_match = 0;
2026                 ap->ack_match = 0;
2027         }
2028         ap->cur_time++;
2029
2030         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2031                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2032
2033                 if (rx_cfg_reg != ap->ability_match_cfg) {
2034                         ap->ability_match_cfg = rx_cfg_reg;
2035                         ap->ability_match = 0;
2036                         ap->ability_match_count = 0;
2037                 } else {
2038                         if (++ap->ability_match_count > 1) {
2039                                 ap->ability_match = 1;
2040                                 ap->ability_match_cfg = rx_cfg_reg;
2041                         }
2042                 }
2043                 if (rx_cfg_reg & ANEG_CFG_ACK)
2044                         ap->ack_match = 1;
2045                 else
2046                         ap->ack_match = 0;
2047
2048                 ap->idle_match = 0;
2049         } else {
2050                 ap->idle_match = 1;
2051                 ap->ability_match_cfg = 0;
2052                 ap->ability_match_count = 0;
2053                 ap->ability_match = 0;
2054                 ap->ack_match = 0;
2055
2056                 rx_cfg_reg = 0;
2057         }
2058
2059         ap->rxconfig = rx_cfg_reg;
2060         ret = ANEG_OK;
2061
2062         switch(ap->state) {
2063         case ANEG_STATE_UNKNOWN:
2064                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2065                         ap->state = ANEG_STATE_AN_ENABLE;
2066
2067                 /* fallthru */
2068         case ANEG_STATE_AN_ENABLE:
2069                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2070                 if (ap->flags & MR_AN_ENABLE) {
2071                         ap->link_time = 0;
2072                         ap->cur_time = 0;
2073                         ap->ability_match_cfg = 0;
2074                         ap->ability_match_count = 0;
2075                         ap->ability_match = 0;
2076                         ap->idle_match = 0;
2077                         ap->ack_match = 0;
2078
2079                         ap->state = ANEG_STATE_RESTART_INIT;
2080                 } else {
2081                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
2082                 }
2083                 break;
2084
2085         case ANEG_STATE_RESTART_INIT:
2086                 ap->link_time = ap->cur_time;
2087                 ap->flags &= ~(MR_NP_LOADED);
2088                 ap->txconfig = 0;
2089                 tw32(MAC_TX_AUTO_NEG, 0);
2090                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2091                 tw32_f(MAC_MODE, tp->mac_mode);
2092                 udelay(40);
2093
2094                 ret = ANEG_TIMER_ENAB;
2095                 ap->state = ANEG_STATE_RESTART;
2096
2097                 /* fallthru */
2098         case ANEG_STATE_RESTART:
2099                 delta = ap->cur_time - ap->link_time;
2100                 if (delta > ANEG_STATE_SETTLE_TIME) {
2101                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2102                 } else {
2103                         ret = ANEG_TIMER_ENAB;
2104                 }
2105                 break;
2106
2107         case ANEG_STATE_DISABLE_LINK_OK:
2108                 ret = ANEG_DONE;
2109                 break;
2110
2111         case ANEG_STATE_ABILITY_DETECT_INIT:
2112                 ap->flags &= ~(MR_TOGGLE_TX);
2113                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2114                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2115                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2116                 tw32_f(MAC_MODE, tp->mac_mode);
2117                 udelay(40);
2118
2119                 ap->state = ANEG_STATE_ABILITY_DETECT;
2120                 break;
2121
2122         case ANEG_STATE_ABILITY_DETECT:
2123                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2124                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
2125                 }
2126                 break;
2127
2128         case ANEG_STATE_ACK_DETECT_INIT:
2129                 ap->txconfig |= ANEG_CFG_ACK;
2130                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2131                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2132                 tw32_f(MAC_MODE, tp->mac_mode);
2133                 udelay(40);
2134
2135                 ap->state = ANEG_STATE_ACK_DETECT;
2136
2137                 /* fallthru */
2138         case ANEG_STATE_ACK_DETECT:
2139                 if (ap->ack_match != 0) {
2140                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2141                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2142                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2143                         } else {
2144                                 ap->state = ANEG_STATE_AN_ENABLE;
2145                         }
2146                 } else if (ap->ability_match != 0 &&
2147                            ap->rxconfig == 0) {
2148                         ap->state = ANEG_STATE_AN_ENABLE;
2149                 }
2150                 break;
2151
2152         case ANEG_STATE_COMPLETE_ACK_INIT:
2153                 if (ap->rxconfig & ANEG_CFG_INVAL) {
2154                         ret = ANEG_FAILED;
2155                         break;
2156                 }
2157                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2158                                MR_LP_ADV_HALF_DUPLEX |
2159                                MR_LP_ADV_SYM_PAUSE |
2160                                MR_LP_ADV_ASYM_PAUSE |
2161                                MR_LP_ADV_REMOTE_FAULT1 |
2162                                MR_LP_ADV_REMOTE_FAULT2 |
2163                                MR_LP_ADV_NEXT_PAGE |
2164                                MR_TOGGLE_RX |
2165                                MR_NP_RX);
2166                 if (ap->rxconfig & ANEG_CFG_FD)
2167                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2168                 if (ap->rxconfig & ANEG_CFG_HD)
2169                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2170                 if (ap->rxconfig & ANEG_CFG_PS1)
2171                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
2172                 if (ap->rxconfig & ANEG_CFG_PS2)
2173                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2174                 if (ap->rxconfig & ANEG_CFG_RF1)
2175                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2176                 if (ap->rxconfig & ANEG_CFG_RF2)
2177                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2178                 if (ap->rxconfig & ANEG_CFG_NP)
2179                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
2180
2181                 ap->link_time = ap->cur_time;
2182
2183                 ap->flags ^= (MR_TOGGLE_TX);
2184                 if (ap->rxconfig & 0x0008)
2185                         ap->flags |= MR_TOGGLE_RX;
2186                 if (ap->rxconfig & ANEG_CFG_NP)
2187                         ap->flags |= MR_NP_RX;
2188                 ap->flags |= MR_PAGE_RX;
2189
2190                 ap->state = ANEG_STATE_COMPLETE_ACK;
2191                 ret = ANEG_TIMER_ENAB;
2192                 break;
2193
2194         case ANEG_STATE_COMPLETE_ACK:
2195                 if (ap->ability_match != 0 &&
2196                     ap->rxconfig == 0) {
2197                         ap->state = ANEG_STATE_AN_ENABLE;
2198                         break;
2199                 }
2200                 delta = ap->cur_time - ap->link_time;
2201                 if (delta > ANEG_STATE_SETTLE_TIME) {
2202                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2203                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2204                         } else {
2205                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2206                                     !(ap->flags & MR_NP_RX)) {
2207                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2208                                 } else {
2209                                         ret = ANEG_FAILED;
2210                                 }
2211                         }
2212                 }
2213                 break;
2214
2215         case ANEG_STATE_IDLE_DETECT_INIT:
2216                 ap->link_time = ap->cur_time;
2217                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2218                 tw32_f(MAC_MODE, tp->mac_mode);
2219                 udelay(40);
2220
2221                 ap->state = ANEG_STATE_IDLE_DETECT;
2222                 ret = ANEG_TIMER_ENAB;
2223                 break;
2224
2225         case ANEG_STATE_IDLE_DETECT:
2226                 if (ap->ability_match != 0 &&
2227                     ap->rxconfig == 0) {
2228                         ap->state = ANEG_STATE_AN_ENABLE;
2229                         break;
2230                 }
2231                 delta = ap->cur_time - ap->link_time;
2232                 if (delta > ANEG_STATE_SETTLE_TIME) {
2233                         /* XXX another gem from the Broadcom driver :( */
2234                         ap->state = ANEG_STATE_LINK_OK;
2235                 }
2236                 break;
2237
2238         case ANEG_STATE_LINK_OK:
2239                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2240                 ret = ANEG_DONE;
2241                 break;
2242
2243         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2244                 /* ??? unimplemented */
2245                 break;
2246
2247         case ANEG_STATE_NEXT_PAGE_WAIT:
2248                 /* ??? unimplemented */
2249                 break;
2250
2251         default:
2252                 ret = ANEG_FAILED;
2253                 break;
2254         };
2255
2256         return ret;
2257 }
2258
2259 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2260 {
2261         int res = 0;
2262         struct tg3_fiber_aneginfo aninfo;
2263         int status = ANEG_FAILED;
2264         unsigned int tick;
2265         u32 tmp;
2266
2267         tw32_f(MAC_TX_AUTO_NEG, 0);
2268
2269         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2270         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2271         udelay(40);
2272
2273         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2274         udelay(40);
2275
2276         memset(&aninfo, 0, sizeof(aninfo));
2277         aninfo.flags |= MR_AN_ENABLE;
2278         aninfo.state = ANEG_STATE_UNKNOWN;
2279         aninfo.cur_time = 0;
2280         tick = 0;
2281         while (++tick < 195000) {
2282                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2283                 if (status == ANEG_DONE || status == ANEG_FAILED)
2284                         break;
2285
2286                 udelay(1);
2287         }
2288
2289         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2290         tw32_f(MAC_MODE, tp->mac_mode);
2291         udelay(40);
2292
2293         *flags = aninfo.flags;
2294
2295         if (status == ANEG_DONE &&
2296             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2297                              MR_LP_ADV_FULL_DUPLEX)))
2298                 res = 1;
2299
2300         return res;
2301 }
2302
2303 static void tg3_init_bcm8002(struct tg3 *tp)
2304 {
2305         u32 mac_status = tr32(MAC_STATUS);
2306         int i;
2307
2308         /* Reset when initting first time or we have a link. */
2309         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2310             !(mac_status & MAC_STATUS_PCS_SYNCED))
2311                 return;
2312
2313         /* Set PLL lock range. */
2314         tg3_writephy(tp, 0x16, 0x8007);
2315
2316         /* SW reset */
2317         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2318
2319         /* Wait for reset to complete. */
2320         /* XXX schedule_timeout() ... */
2321         for (i = 0; i < 500; i++)
2322                 udelay(10);
2323
2324         /* Config mode; select PMA/Ch 1 regs. */
2325         tg3_writephy(tp, 0x10, 0x8411);
2326
2327         /* Enable auto-lock and comdet, select txclk for tx. */
2328         tg3_writephy(tp, 0x11, 0x0a10);
2329
2330         tg3_writephy(tp, 0x18, 0x00a0);
2331         tg3_writephy(tp, 0x16, 0x41ff);
2332
2333         /* Assert and deassert POR. */
2334         tg3_writephy(tp, 0x13, 0x0400);
2335         udelay(40);
2336         tg3_writephy(tp, 0x13, 0x0000);
2337
2338         tg3_writephy(tp, 0x11, 0x0a50);
2339         udelay(40);
2340         tg3_writephy(tp, 0x11, 0x0a10);
2341
2342         /* Wait for signal to stabilize */
2343         /* XXX schedule_timeout() ... */
2344         for (i = 0; i < 15000; i++)
2345                 udelay(10);
2346
2347         /* Deselect the channel register so we can read the PHYID
2348          * later.
2349          */
2350         tg3_writephy(tp, 0x10, 0x8011);
2351 }
2352
2353 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2354 {
2355         u32 sg_dig_ctrl, sg_dig_status;
2356         u32 serdes_cfg, expected_sg_dig_ctrl;
2357         int workaround, port_a;
2358         int current_link_up;
2359
2360         serdes_cfg = 0;
2361         expected_sg_dig_ctrl = 0;
2362         workaround = 0;
2363         port_a = 1;
2364         current_link_up = 0;
2365
2366         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2367             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2368                 workaround = 1;
2369                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2370                         port_a = 0;
2371
2372                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2373                 /* preserve bits 20-23 for voltage regulator */
2374                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2375         }
2376
2377         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2378
2379         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2380                 if (sg_dig_ctrl & (1 << 31)) {
2381                         if (workaround) {
2382                                 u32 val = serdes_cfg;
2383
2384                                 if (port_a)
2385                                         val |= 0xc010000;
2386                                 else
2387                                         val |= 0x4010000;
2388                                 tw32_f(MAC_SERDES_CFG, val);
2389                         }
2390                         tw32_f(SG_DIG_CTRL, 0x01388400);
2391                 }
2392                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2393                         tg3_setup_flow_control(tp, 0, 0);
2394                         current_link_up = 1;
2395                 }
2396                 goto out;
2397         }
2398
2399         /* Want auto-negotiation.  */
2400         expected_sg_dig_ctrl = 0x81388400;
2401
2402         /* Pause capability */
2403         expected_sg_dig_ctrl |= (1 << 11);
2404
2405         /* Asymettric pause */
2406         expected_sg_dig_ctrl |= (1 << 12);
2407
2408         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2409                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2410                     tp->serdes_counter &&
2411                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
2412                                     MAC_STATUS_RCVD_CFG)) ==
2413                      MAC_STATUS_PCS_SYNCED)) {
2414                         tp->serdes_counter--;
2415                         current_link_up = 1;
2416                         goto out;
2417                 }
2418 restart_autoneg:
2419                 if (workaround)
2420                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2421                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2422                 udelay(5);
2423                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2424
2425                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2426                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2427         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2428                                  MAC_STATUS_SIGNAL_DET)) {
2429                 sg_dig_status = tr32(SG_DIG_STATUS);
2430                 mac_status = tr32(MAC_STATUS);
2431
2432                 if ((sg_dig_status & (1 << 1)) &&
2433                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2434                         u32 local_adv, remote_adv;
2435
2436                         local_adv = ADVERTISE_PAUSE_CAP;
2437                         remote_adv = 0;
2438                         if (sg_dig_status & (1 << 19))
2439                                 remote_adv |= LPA_PAUSE_CAP;
2440                         if (sg_dig_status & (1 << 20))
2441                                 remote_adv |= LPA_PAUSE_ASYM;
2442
2443                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2444                         current_link_up = 1;
2445                         tp->serdes_counter = 0;
2446                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2447                 } else if (!(sg_dig_status & (1 << 1))) {
2448                         if (tp->serdes_counter)
2449                                 tp->serdes_counter--;
2450                         else {
2451                                 if (workaround) {
2452                                         u32 val = serdes_cfg;
2453
2454                                         if (port_a)
2455                                                 val |= 0xc010000;
2456                                         else
2457                                                 val |= 0x4010000;
2458
2459                                         tw32_f(MAC_SERDES_CFG, val);
2460                                 }
2461
2462                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2463                                 udelay(40);
2464
2465                                 /* Link parallel detection - link is up */
2466                                 /* only if we have PCS_SYNC and not */
2467                                 /* receiving config code words */
2468                                 mac_status = tr32(MAC_STATUS);
2469                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2470                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2471                                         tg3_setup_flow_control(tp, 0, 0);
2472                                         current_link_up = 1;
2473                                         tp->tg3_flags2 |=
2474                                                 TG3_FLG2_PARALLEL_DETECT;
2475                                         tp->serdes_counter =
2476                                                 SERDES_PARALLEL_DET_TIMEOUT;
2477                                 } else
2478                                         goto restart_autoneg;
2479                         }
2480                 }
2481         } else {
2482                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2483                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2484         }
2485
2486 out:
2487         return current_link_up;
2488 }
2489
2490 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2491 {
2492         int current_link_up = 0;
2493
2494         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2495                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2496                 goto out;
2497         }
2498
2499         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2500                 u32 flags;
2501                 int i;
2502
2503                 if (fiber_autoneg(tp, &flags)) {
2504                         u32 local_adv, remote_adv;
2505
2506                         local_adv = ADVERTISE_PAUSE_CAP;
2507                         remote_adv = 0;
2508                         if (flags & MR_LP_ADV_SYM_PAUSE)
2509                                 remote_adv |= LPA_PAUSE_CAP;
2510                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2511                                 remote_adv |= LPA_PAUSE_ASYM;
2512
2513                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2514
2515                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2516                         current_link_up = 1;
2517                 }
2518                 for (i = 0; i < 30; i++) {
2519                         udelay(20);
2520                         tw32_f(MAC_STATUS,
2521                                (MAC_STATUS_SYNC_CHANGED |
2522                                 MAC_STATUS_CFG_CHANGED));
2523                         udelay(40);
2524                         if ((tr32(MAC_STATUS) &
2525                              (MAC_STATUS_SYNC_CHANGED |
2526                               MAC_STATUS_CFG_CHANGED)) == 0)
2527                                 break;
2528                 }
2529
2530                 mac_status = tr32(MAC_STATUS);
2531                 if (current_link_up == 0 &&
2532                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2533                     !(mac_status & MAC_STATUS_RCVD_CFG))
2534                         current_link_up = 1;
2535         } else {
2536                 /* Forcing 1000FD link up. */
2537                 current_link_up = 1;
2538                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2539
2540                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2541                 udelay(40);
2542         }
2543
2544 out:
2545         return current_link_up;
2546 }
2547
2548 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2549 {
2550         u32 orig_pause_cfg;
2551         u16 orig_active_speed;
2552         u8 orig_active_duplex;
2553         u32 mac_status;
2554         int current_link_up;
2555         int i;
2556
2557         orig_pause_cfg =
2558                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2559                                   TG3_FLAG_TX_PAUSE));
2560         orig_active_speed = tp->link_config.active_speed;
2561         orig_active_duplex = tp->link_config.active_duplex;
2562
2563         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2564             netif_carrier_ok(tp->dev) &&
2565             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2566                 mac_status = tr32(MAC_STATUS);
2567                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2568                                MAC_STATUS_SIGNAL_DET |
2569                                MAC_STATUS_CFG_CHANGED |
2570                                MAC_STATUS_RCVD_CFG);
2571                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2572                                    MAC_STATUS_SIGNAL_DET)) {
2573                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2574                                             MAC_STATUS_CFG_CHANGED));
2575                         return 0;
2576                 }
2577         }
2578
2579         tw32_f(MAC_TX_AUTO_NEG, 0);
2580
2581         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2582         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2583         tw32_f(MAC_MODE, tp->mac_mode);
2584         udelay(40);
2585
2586         if (tp->phy_id == PHY_ID_BCM8002)
2587                 tg3_init_bcm8002(tp);
2588
2589         /* Enable link change event even when serdes polling.  */
2590         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2591         udelay(40);
2592
2593         current_link_up = 0;
2594         mac_status = tr32(MAC_STATUS);
2595
2596         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2597                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2598         else
2599                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2600
2601         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2602         tw32_f(MAC_MODE, tp->mac_mode);
2603         udelay(40);
2604
2605         tp->hw_status->status =
2606                 (SD_STATUS_UPDATED |
2607                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2608
2609         for (i = 0; i < 100; i++) {
2610                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2611                                     MAC_STATUS_CFG_CHANGED));
2612                 udelay(5);
2613                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2614                                          MAC_STATUS_CFG_CHANGED |
2615                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2616                         break;
2617         }
2618
2619         mac_status = tr32(MAC_STATUS);
2620         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2621                 current_link_up = 0;
2622                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2623                     tp->serdes_counter == 0) {
2624                         tw32_f(MAC_MODE, (tp->mac_mode |
2625                                           MAC_MODE_SEND_CONFIGS));
2626                         udelay(1);
2627                         tw32_f(MAC_MODE, tp->mac_mode);
2628                 }
2629         }
2630
2631         if (current_link_up == 1) {
2632                 tp->link_config.active_speed = SPEED_1000;
2633                 tp->link_config.active_duplex = DUPLEX_FULL;
2634                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2635                                     LED_CTRL_LNKLED_OVERRIDE |
2636                                     LED_CTRL_1000MBPS_ON));
2637         } else {
2638                 tp->link_config.active_speed = SPEED_INVALID;
2639                 tp->link_config.active_duplex = DUPLEX_INVALID;
2640                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2641                                     LED_CTRL_LNKLED_OVERRIDE |
2642                                     LED_CTRL_TRAFFIC_OVERRIDE));
2643         }
2644
2645         if (current_link_up != netif_carrier_ok(tp->dev)) {
2646                 if (current_link_up)
2647                         netif_carrier_on(tp->dev);
2648                 else
2649                         netif_carrier_off(tp->dev);
2650                 tg3_link_report(tp);
2651         } else {
2652                 u32 now_pause_cfg =
2653                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2654                                          TG3_FLAG_TX_PAUSE);
2655                 if (orig_pause_cfg != now_pause_cfg ||
2656                     orig_active_speed != tp->link_config.active_speed ||
2657                     orig_active_duplex != tp->link_config.active_duplex)
2658                         tg3_link_report(tp);
2659         }
2660
2661         return 0;
2662 }
2663
2664 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2665 {
2666         int current_link_up, err = 0;
2667         u32 bmsr, bmcr;
2668         u16 current_speed;
2669         u8 current_duplex;
2670
2671         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2672         tw32_f(MAC_MODE, tp->mac_mode);
2673         udelay(40);
2674
2675         tw32(MAC_EVENT, 0);
2676
2677         tw32_f(MAC_STATUS,
2678              (MAC_STATUS_SYNC_CHANGED |
2679               MAC_STATUS_CFG_CHANGED |
2680               MAC_STATUS_MI_COMPLETION |
2681               MAC_STATUS_LNKSTATE_CHANGED));
2682         udelay(40);
2683
2684         if (force_reset)
2685                 tg3_phy_reset(tp);
2686
2687         current_link_up = 0;
2688         current_speed = SPEED_INVALID;
2689         current_duplex = DUPLEX_INVALID;
2690
2691         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2692         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2694                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2695                         bmsr |= BMSR_LSTATUS;
2696                 else
2697                         bmsr &= ~BMSR_LSTATUS;
2698         }
2699
2700         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2701
2702         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2703             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2704                 /* do nothing, just check for link up at the end */
2705         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2706                 u32 adv, new_adv;
2707
2708                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2709                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2710                                   ADVERTISE_1000XPAUSE |
2711                                   ADVERTISE_1000XPSE_ASYM |
2712                                   ADVERTISE_SLCT);
2713
2714                 /* Always advertise symmetric PAUSE just like copper */
2715                 new_adv |= ADVERTISE_1000XPAUSE;
2716
2717                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2718                         new_adv |= ADVERTISE_1000XHALF;
2719                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2720                         new_adv |= ADVERTISE_1000XFULL;
2721
2722                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2723                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2724                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2725                         tg3_writephy(tp, MII_BMCR, bmcr);
2726
2727                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2728                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2729                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2730
2731                         return err;
2732                 }
2733         } else {
2734                 u32 new_bmcr;
2735
2736                 bmcr &= ~BMCR_SPEED1000;
2737                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2738
2739                 if (tp->link_config.duplex == DUPLEX_FULL)
2740                         new_bmcr |= BMCR_FULLDPLX;
2741
2742                 if (new_bmcr != bmcr) {
2743                         /* BMCR_SPEED1000 is a reserved bit that needs
2744                          * to be set on write.
2745                          */
2746                         new_bmcr |= BMCR_SPEED1000;
2747
2748                         /* Force a linkdown */
2749                         if (netif_carrier_ok(tp->dev)) {
2750                                 u32 adv;
2751
2752                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2753                                 adv &= ~(ADVERTISE_1000XFULL |
2754                                          ADVERTISE_1000XHALF |
2755                                          ADVERTISE_SLCT);
2756                                 tg3_writephy(tp, MII_ADVERTISE, adv);
2757                                 tg3_writephy(tp, MII_BMCR, bmcr |
2758                                                            BMCR_ANRESTART |
2759                                                            BMCR_ANENABLE);
2760                                 udelay(10);
2761                                 netif_carrier_off(tp->dev);
2762                         }
2763                         tg3_writephy(tp, MII_BMCR, new_bmcr);
2764                         bmcr = new_bmcr;
2765                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2766                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2767                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2768                             ASIC_REV_5714) {
2769                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2770                                         bmsr |= BMSR_LSTATUS;
2771                                 else
2772                                         bmsr &= ~BMSR_LSTATUS;
2773                         }
2774                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2775                 }
2776         }
2777
2778         if (bmsr & BMSR_LSTATUS) {
2779                 current_speed = SPEED_1000;
2780                 current_link_up = 1;
2781                 if (bmcr & BMCR_FULLDPLX)
2782                         current_duplex = DUPLEX_FULL;
2783                 else
2784                         current_duplex = DUPLEX_HALF;
2785
2786                 if (bmcr & BMCR_ANENABLE) {
2787                         u32 local_adv, remote_adv, common;
2788
2789                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2790                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2791                         common = local_adv & remote_adv;
2792                         if (common & (ADVERTISE_1000XHALF |
2793                                       ADVERTISE_1000XFULL)) {
2794                                 if (common & ADVERTISE_1000XFULL)
2795                                         current_duplex = DUPLEX_FULL;
2796                                 else
2797                                         current_duplex = DUPLEX_HALF;
2798
2799                                 tg3_setup_flow_control(tp, local_adv,
2800                                                        remote_adv);
2801                         }
2802                         else
2803                                 current_link_up = 0;
2804                 }
2805         }
2806
2807         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2808         if (tp->link_config.active_duplex == DUPLEX_HALF)
2809                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2810
2811         tw32_f(MAC_MODE, tp->mac_mode);
2812         udelay(40);
2813
2814         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2815
2816         tp->link_config.active_speed = current_speed;
2817         tp->link_config.active_duplex = current_duplex;
2818
2819         if (current_link_up != netif_carrier_ok(tp->dev)) {
2820                 if (current_link_up)
2821                         netif_carrier_on(tp->dev);
2822                 else {
2823                         netif_carrier_off(tp->dev);
2824                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2825                 }
2826                 tg3_link_report(tp);
2827         }
2828         return err;
2829 }
2830
2831 static void tg3_serdes_parallel_detect(struct tg3 *tp)
2832 {
2833         if (tp->serdes_counter) {
2834                 /* Give autoneg time to complete. */
2835                 tp->serdes_counter--;
2836                 return;
2837         }
2838         if (!netif_carrier_ok(tp->dev) &&
2839             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2840                 u32 bmcr;
2841
2842                 tg3_readphy(tp, MII_BMCR, &bmcr);
2843                 if (bmcr & BMCR_ANENABLE) {
2844                         u32 phy1, phy2;
2845
2846                         /* Select shadow register 0x1f */
2847                         tg3_writephy(tp, 0x1c, 0x7c00);
2848                         tg3_readphy(tp, 0x1c, &phy1);
2849
2850                         /* Select expansion interrupt status register */
2851                         tg3_writephy(tp, 0x17, 0x0f01);
2852                         tg3_readphy(tp, 0x15, &phy2);
2853                         tg3_readphy(tp, 0x15, &phy2);
2854
2855                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2856                                 /* We have signal detect and not receiving
2857                                  * config code words, link is up by parallel
2858                                  * detection.
2859                                  */
2860
2861                                 bmcr &= ~BMCR_ANENABLE;
2862                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2863                                 tg3_writephy(tp, MII_BMCR, bmcr);
2864                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2865                         }
2866                 }
2867         }
2868         else if (netif_carrier_ok(tp->dev) &&
2869                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2870                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2871                 u32 phy2;
2872
2873                 /* Select expansion interrupt status register */
2874                 tg3_writephy(tp, 0x17, 0x0f01);
2875                 tg3_readphy(tp, 0x15, &phy2);
2876                 if (phy2 & 0x20) {
2877                         u32 bmcr;
2878
2879                         /* Config code words received, turn on autoneg. */
2880                         tg3_readphy(tp, MII_BMCR, &bmcr);
2881                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2882
2883                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2884
2885                 }
2886         }
2887 }
2888
2889 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2890 {
2891         int err;
2892
2893         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2894                 err = tg3_setup_fiber_phy(tp, force_reset);
2895         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2896                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
2897         } else {
2898                 err = tg3_setup_copper_phy(tp, force_reset);
2899         }
2900
2901         if (tp->link_config.active_speed == SPEED_1000 &&
2902             tp->link_config.active_duplex == DUPLEX_HALF)
2903                 tw32(MAC_TX_LENGTHS,
2904                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2905                       (6 << TX_LENGTHS_IPG_SHIFT) |
2906                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2907         else
2908                 tw32(MAC_TX_LENGTHS,
2909                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2910                       (6 << TX_LENGTHS_IPG_SHIFT) |
2911                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2912
2913         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2914                 if (netif_carrier_ok(tp->dev)) {
2915                         tw32(HOSTCC_STAT_COAL_TICKS,
2916                              tp->coal.stats_block_coalesce_usecs);
2917                 } else {
2918                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2919                 }
2920         }
2921
2922         return err;
2923 }
2924
2925 /* This is called whenever we suspect that the system chipset is re-
2926  * ordering the sequence of MMIO to the tx send mailbox. The symptom
2927  * is bogus tx completions. We try to recover by setting the
2928  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2929  * in the workqueue.
2930  */
2931 static void tg3_tx_recover(struct tg3 *tp)
2932 {
2933         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
2934                tp->write32_tx_mbox == tg3_write_indirect_mbox);
2935
2936         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
2937                "mapped I/O cycles to the network device, attempting to "
2938                "recover. Please report the problem to the driver maintainer "
2939                "and include system chipset information.\n", tp->dev->name);
2940
2941         spin_lock(&tp->lock);
2942         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
2943         spin_unlock(&tp->lock);
2944 }
2945
2946 static inline u32 tg3_tx_avail(struct tg3 *tp)
2947 {
2948         smp_mb();
2949         return (tp->tx_pending -
2950                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
2951 }
2952
2953 /* Tigon3 never reports partial packet sends.  So we do not
2954  * need special logic to handle SKBs that have not had all
2955  * of their frags sent yet, like SunGEM does.
2956  */
2957 static void tg3_tx(struct tg3 *tp)
2958 {
2959         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2960         u32 sw_idx = tp->tx_cons;
2961
2962         while (sw_idx != hw_idx) {
2963                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2964                 struct sk_buff *skb = ri->skb;
2965                 int i, tx_bug = 0;
2966
2967                 if (unlikely(skb == NULL)) {
2968                         tg3_tx_recover(tp);
2969                         return;
2970                 }
2971
2972                 pci_unmap_single(tp->pdev,
2973                                  pci_unmap_addr(ri, mapping),
2974                                  skb_headlen(skb),
2975                                  PCI_DMA_TODEVICE);
2976
2977                 ri->skb = NULL;
2978
2979                 sw_idx = NEXT_TX(sw_idx);
2980
2981                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2982                         ri = &tp->tx_buffers[sw_idx];
2983                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
2984                                 tx_bug = 1;
2985
2986                         pci_unmap_page(tp->pdev,
2987                                        pci_unmap_addr(ri, mapping),
2988                                        skb_shinfo(skb)->frags[i].size,
2989                                        PCI_DMA_TODEVICE);
2990
2991                         sw_idx = NEXT_TX(sw_idx);
2992                 }
2993
2994                 dev_kfree_skb(skb);
2995
2996                 if (unlikely(tx_bug)) {
2997                         tg3_tx_recover(tp);
2998                         return;
2999                 }
3000         }
3001
3002         tp->tx_cons = sw_idx;
3003
3004         /* Need to make the tx_cons update visible to tg3_start_xmit()
3005          * before checking for netif_queue_stopped().  Without the
3006          * memory barrier, there is a small possibility that tg3_start_xmit()
3007          * will miss it and cause the queue to be stopped forever.
3008          */
3009         smp_mb();
3010
3011         if (unlikely(netif_queue_stopped(tp->dev) &&
3012                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))) {
3013                 netif_tx_lock(tp->dev);
3014                 if (netif_queue_stopped(tp->dev) &&
3015                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH))
3016                         netif_wake_queue(tp->dev);
3017                 netif_tx_unlock(tp->dev);
3018         }
3019 }
3020
3021 /* Returns size of skb allocated or < 0 on error.
3022  *
3023  * We only need to fill in the address because the other members
3024  * of the RX descriptor are invariant, see tg3_init_rings.
3025  *
3026  * Note the purposeful assymetry of cpu vs. chip accesses.  For
3027  * posting buffers we only dirty the first cache line of the RX
3028  * descriptor (containing the address).  Whereas for the RX status
3029  * buffers the cpu only reads the last cacheline of the RX descriptor
3030  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3031  */
3032 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3033                             int src_idx, u32 dest_idx_unmasked)
3034 {
3035         struct tg3_rx_buffer_desc *desc;
3036         struct ring_info *map, *src_map;
3037         struct sk_buff *skb;
3038         dma_addr_t mapping;
3039         int skb_size, dest_idx;
3040
3041         src_map = NULL;
3042         switch (opaque_key) {
3043         case RXD_OPAQUE_RING_STD:
3044                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3045                 desc = &tp->rx_std[dest_idx];
3046                 map = &tp->rx_std_buffers[dest_idx];
3047                 if (src_idx >= 0)
3048                         src_map = &tp->rx_std_buffers[src_idx];
3049                 skb_size = tp->rx_pkt_buf_sz;
3050                 break;
3051
3052         case RXD_OPAQUE_RING_JUMBO:
3053                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3054                 desc = &tp->rx_jumbo[dest_idx];
3055                 map = &tp->rx_jumbo_buffers[dest_idx];
3056                 if (src_idx >= 0)
3057                         src_map = &tp->rx_jumbo_buffers[src_idx];
3058                 skb_size = RX_JUMBO_PKT_BUF_SZ;
3059                 break;
3060
3061         default:
3062                 return -EINVAL;
3063         };
3064
3065         /* Do not overwrite any of the map or rp information
3066          * until we are sure we can commit to a new buffer.
3067          *
3068          * Callers depend upon this behavior and assume that
3069          * we leave everything unchanged if we fail.
3070          */
3071         skb = netdev_alloc_skb(tp->dev, skb_size);
3072         if (skb == NULL)
3073                 return -ENOMEM;
3074
3075         skb_reserve(skb, tp->rx_offset);
3076
3077         mapping = pci_map_single(tp->pdev, skb->data,
3078                                  skb_size - tp->rx_offset,
3079                                  PCI_DMA_FROMDEVICE);
3080
3081         map->skb = skb;
3082         pci_unmap_addr_set(map, mapping, mapping);
3083
3084         if (src_map != NULL)
3085                 src_map->skb = NULL;
3086
3087         desc->addr_hi = ((u64)mapping >> 32);
3088         desc->addr_lo = ((u64)mapping & 0xffffffff);
3089
3090         return skb_size;
3091 }
3092
3093 /* We only need to move over in the address because the other
3094  * members of the RX descriptor are invariant.  See notes above
3095  * tg3_alloc_rx_skb for full details.
3096  */
3097 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3098                            int src_idx, u32 dest_idx_unmasked)
3099 {
3100         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3101         struct ring_info *src_map, *dest_map;
3102         int dest_idx;
3103
3104         switch (opaque_key) {
3105         case RXD_OPAQUE_RING_STD:
3106                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3107                 dest_desc = &tp->rx_std[dest_idx];
3108                 dest_map = &tp->rx_std_buffers[dest_idx];
3109                 src_desc = &tp->rx_std[src_idx];
3110                 src_map = &tp->rx_std_buffers[src_idx];
3111                 break;
3112
3113         case RXD_OPAQUE_RING_JUMBO:
3114                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3115                 dest_desc = &tp->rx_jumbo[dest_idx];
3116                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3117                 src_desc = &tp->rx_jumbo[src_idx];
3118                 src_map = &tp->rx_jumbo_buffers[src_idx];
3119                 break;
3120
3121         default:
3122                 return;
3123         };
3124
3125         dest_map->skb = src_map->skb;
3126         pci_unmap_addr_set(dest_map, mapping,
3127                            pci_unmap_addr(src_map, mapping));
3128         dest_desc->addr_hi = src_desc->addr_hi;
3129         dest_desc->addr_lo = src_desc->addr_lo;
3130
3131         src_map->skb = NULL;
3132 }
3133
3134 #if TG3_VLAN_TAG_USED
3135 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3136 {
3137         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3138 }
3139 #endif
3140
3141 /* The RX ring scheme is composed of multiple rings which post fresh
3142  * buffers to the chip, and one special ring the chip uses to report
3143  * status back to the host.
3144  *
3145  * The special ring reports the status of received packets to the
3146  * host.  The chip does not write into the original descriptor the
3147  * RX buffer was obtained from.  The chip simply takes the original
3148  * descriptor as provided by the host, updates the status and length
3149  * field, then writes this into the next status ring entry.
3150  *
3151  * Each ring the host uses to post buffers to the chip is described
3152  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3153  * it is first placed into the on-chip ram.  When the packet's length
3154  * is known, it walks down the TG3_BDINFO entries to select the ring.
3155  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3156  * which is within the range of the new packet's length is chosen.
3157  *
3158  * The "separate ring for rx status" scheme may sound queer, but it makes
3159  * sense from a cache coherency perspective.  If only the host writes
3160  * to the buffer post rings, and only the chip writes to the rx status
3161  * rings, then cache lines never move beyond shared-modified state.
3162  * If both the host and chip were to write into the same ring, cache line
3163  * eviction could occur since both entities want it in an exclusive state.
3164  */
3165 static int tg3_rx(struct tg3 *tp, int budget)
3166 {
3167         u32 work_mask, rx_std_posted = 0;
3168         u32 sw_idx = tp->rx_rcb_ptr;
3169         u16 hw_idx;
3170         int received;
3171
3172         hw_idx = tp->hw_status->idx[0].rx_producer;
3173         /*
3174          * We need to order the read of hw_idx and the read of
3175          * the opaque cookie.
3176          */
3177         rmb();
3178         work_mask = 0;
3179         received = 0;
3180         while (sw_idx != hw_idx && budget > 0) {
3181                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3182                 unsigned int len;
3183                 struct sk_buff *skb;
3184                 dma_addr_t dma_addr;
3185                 u32 opaque_key, desc_idx, *post_ptr;
3186
3187                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3188                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3189                 if (opaque_key == RXD_OPAQUE_RING_STD) {
3190                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3191                                                   mapping);
3192                         skb = tp->rx_std_buffers[desc_idx].skb;
3193                         post_ptr = &tp->rx_std_ptr;
3194                         rx_std_posted++;
3195                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3196                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3197                                                   mapping);
3198                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
3199                         post_ptr = &tp->rx_jumbo_ptr;
3200                 }
3201                 else {
3202                         goto next_pkt_nopost;
3203                 }
3204
3205                 work_mask |= opaque_key;
3206
3207                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3208                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3209                 drop_it:
3210                         tg3_recycle_rx(tp, opaque_key,
3211                                        desc_idx, *post_ptr);
3212                 drop_it_no_recycle:
3213                         /* Other statistics kept track of by card. */
3214                         tp->net_stats.rx_dropped++;
3215                         goto next_pkt;
3216                 }
3217
3218                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3219
3220                 if (len > RX_COPY_THRESHOLD
3221                         && tp->rx_offset == 2
3222                         /* rx_offset != 2 iff this is a 5701 card running
3223                          * in PCI-X mode [see tg3_get_invariants()] */
3224                 ) {
3225                         int skb_size;
3226
3227                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3228                                                     desc_idx, *post_ptr);
3229                         if (skb_size < 0)
3230                                 goto drop_it;
3231
3232                         pci_unmap_single(tp->pdev, dma_addr,
3233                                          skb_size - tp->rx_offset,
3234                                          PCI_DMA_FROMDEVICE);
3235
3236                         skb_put(skb, len);
3237                 } else {
3238                         struct sk_buff *copy_skb;
3239
3240                         tg3_recycle_rx(tp, opaque_key,
3241                                        desc_idx, *post_ptr);
3242
3243                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3244                         if (copy_skb == NULL)
3245                                 goto drop_it_no_recycle;
3246
3247                         skb_reserve(copy_skb, 2);
3248                         skb_put(copy_skb, len);
3249                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3250                         memcpy(copy_skb->data, skb->data, len);
3251                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3252
3253                         /* We'll reuse the original ring buffer. */
3254                         skb = copy_skb;
3255                 }
3256
3257                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3258                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3259                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3260                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
3261                         skb->ip_summed = CHECKSUM_UNNECESSARY;
3262                 else
3263                         skb->ip_summed = CHECKSUM_NONE;
3264
3265                 skb->protocol = eth_type_trans(skb, tp->dev);
3266 #if TG3_VLAN_TAG_USED
3267                 if (tp->vlgrp != NULL &&
3268                     desc->type_flags & RXD_FLAG_VLAN) {
3269                         tg3_vlan_rx(tp, skb,
3270                                     desc->err_vlan & RXD_VLAN_MASK);
3271                 } else
3272 #endif
3273                         netif_receive_skb(skb);
3274
3275                 tp->dev->last_rx = jiffies;
3276                 received++;
3277                 budget--;
3278
3279 next_pkt:
3280                 (*post_ptr)++;
3281
3282                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3283                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3284
3285                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3286                                      TG3_64BIT_REG_LOW, idx);
3287                         work_mask &= ~RXD_OPAQUE_RING_STD;
3288                         rx_std_posted = 0;
3289                 }
3290 next_pkt_nopost:
3291                 sw_idx++;
3292                 sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
3293
3294                 /* Refresh hw_idx to see if there is new work */
3295                 if (sw_idx == hw_idx) {
3296                         hw_idx = tp->hw_status->idx[0].rx_producer;
3297                         rmb();
3298                 }
3299         }
3300
3301         /* ACK the status ring. */
3302         tp->rx_rcb_ptr = sw_idx;
3303         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3304
3305         /* Refill RX ring(s). */
3306         if (work_mask & RXD_OPAQUE_RING_STD) {
3307                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3308                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3309                              sw_idx);
3310         }
3311         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3312                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3313                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3314                              sw_idx);
3315         }
3316         mmiowb();
3317
3318         return received;
3319 }
3320
3321 static int tg3_poll(struct net_device *netdev, int *budget)
3322 {
3323         struct tg3 *tp = netdev_priv(netdev);
3324         struct tg3_hw_status *sblk = tp->hw_status;
3325         int done;
3326
3327         /* handle link change and other phy events */
3328         if (!(tp->tg3_flags &
3329               (TG3_FLAG_USE_LINKCHG_REG |
3330                TG3_FLAG_POLL_SERDES))) {
3331                 if (sblk->status & SD_STATUS_LINK_CHG) {
3332                         sblk->status = SD_STATUS_UPDATED |
3333                                 (sblk->status & ~SD_STATUS_LINK_CHG);
3334                         spin_lock(&tp->lock);
3335                         tg3_setup_phy(tp, 0);
3336                         spin_unlock(&tp->lock);
3337                 }
3338         }
3339
3340         /* run TX completion thread */
3341         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3342                 tg3_tx(tp);
3343                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3344                         netif_rx_complete(netdev);
3345                         schedule_work(&tp->reset_task);
3346                         return 0;
3347                 }
3348         }
3349
3350         /* run RX thread, within the bounds set by NAPI.
3351          * All RX "locking" is done by ensuring outside
3352          * code synchronizes with dev->poll()
3353          */
3354         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3355                 int orig_budget = *budget;
3356                 int work_done;
3357
3358                 if (orig_budget > netdev->quota)
3359                         orig_budget = netdev->quota;
3360
3361                 work_done = tg3_rx(tp, orig_budget);
3362
3363                 *budget -= work_done;
3364                 netdev->quota -= work_done;
3365         }
3366
3367         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3368                 tp->last_tag = sblk->status_tag;
3369                 rmb();
3370         } else
3371                 sblk->status &= ~SD_STATUS_UPDATED;
3372
3373         /* if no more work, tell net stack and NIC we're done */
3374         done = !tg3_has_work(tp);
3375         if (done) {
3376                 netif_rx_complete(netdev);
3377                 tg3_restart_ints(tp);
3378         }
3379
3380         return (done ? 0 : 1);
3381 }
3382
3383 static void tg3_irq_quiesce(struct tg3 *tp)
3384 {
3385         BUG_ON(tp->irq_sync);
3386
3387         tp->irq_sync = 1;
3388         smp_mb();
3389
3390         synchronize_irq(tp->pdev->irq);
3391 }
3392
3393 static inline int tg3_irq_sync(struct tg3 *tp)
3394 {
3395         return tp->irq_sync;
3396 }
3397
3398 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3399  * If irq_sync is non-zero, then the IRQ handler must be synchronized
3400  * with as well.  Most of the time, this is not necessary except when
3401  * shutting down the device.
3402  */
3403 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3404 {
3405         if (irq_sync)
3406                 tg3_irq_quiesce(tp);
3407         spin_lock_bh(&tp->lock);
3408 }
3409
3410 static inline void tg3_full_unlock(struct tg3 *tp)
3411 {
3412         spin_unlock_bh(&tp->lock);
3413 }
3414
3415 /* One-shot MSI handler - Chip automatically disables interrupt
3416  * after sending MSI so driver doesn't have to do it.
3417  */
3418 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs)
3419 {
3420         struct net_device *dev = dev_id;
3421         struct tg3 *tp = netdev_priv(dev);
3422
3423         prefetch(tp->hw_status);
3424         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3425
3426         if (likely(!tg3_irq_sync(tp)))
3427                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3428
3429         return IRQ_HANDLED;
3430 }
3431
3432 /* MSI ISR - No need to check for interrupt sharing and no need to
3433  * flush status block and interrupt mailbox. PCI ordering rules
3434  * guarantee that MSI will arrive after the status block.
3435  */
3436 static irqreturn_t tg3_msi(int irq, void *dev_id, struct pt_regs *regs)
3437 {
3438         struct net_device *dev = dev_id;
3439         struct tg3 *tp = netdev_priv(dev);
3440
3441         prefetch(tp->hw_status);
3442         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3443         /*
3444          * Writing any value to intr-mbox-0 clears PCI INTA# and
3445          * chip-internal interrupt pending events.
3446          * Writing non-zero to intr-mbox-0 additional tells the
3447          * NIC to stop sending us irqs, engaging "in-intr-handler"
3448          * event coalescing.
3449          */
3450         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3451         if (likely(!tg3_irq_sync(tp)))
3452                 netif_rx_schedule(dev);         /* schedule NAPI poll */
3453
3454         return IRQ_RETVAL(1);
3455 }
3456
3457 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3458 {
3459         struct net_device *dev = dev_id;
3460         struct tg3 *tp = netdev_priv(dev);
3461         struct tg3_hw_status *sblk = tp->hw_status;
3462         unsigned int handled = 1;
3463
3464         /* In INTx mode, it is possible for the interrupt to arrive at
3465          * the CPU before the status block posted prior to the interrupt.
3466          * Reading the PCI State register will confirm whether the
3467          * interrupt is ours and will flush the status block.
3468          */
3469         if ((sblk->status & SD_STATUS_UPDATED) ||
3470             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3471                 /*
3472                  * Writing any value to intr-mbox-0 clears PCI INTA# and
3473                  * chip-internal interrupt pending events.
3474                  * Writing non-zero to intr-mbox-0 additional tells the
3475                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3476                  * event coalescing.
3477                  */
3478                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3479                              0x00000001);
3480                 if (tg3_irq_sync(tp))
3481                         goto out;
3482                 sblk->status &= ~SD_STATUS_UPDATED;
3483                 if (likely(tg3_has_work(tp))) {
3484                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3485                         netif_rx_schedule(dev);         /* schedule NAPI poll */
3486                 } else {
3487                         /* No work, shared interrupt perhaps?  re-enable
3488                          * interrupts, and flush that PCI write
3489                          */
3490                         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3491                                 0x00000000);
3492                 }
3493         } else {        /* shared interrupt */
3494                 handled = 0;
3495         }
3496 out:
3497         return IRQ_RETVAL(handled);
3498 }
3499
3500 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id, struct pt_regs *regs)
3501 {
3502         struct net_device *dev = dev_id;
3503         struct tg3 *tp = netdev_priv(dev);
3504         struct tg3_hw_status *sblk = tp->hw_status;
3505         unsigned int handled = 1;
3506
3507         /* In INTx mode, it is possible for the interrupt to arrive at
3508          * the CPU before the status block posted prior to the interrupt.
3509          * Reading the PCI State register will confirm whether the
3510          * interrupt is ours and will flush the status block.
3511          */
3512         if ((sblk->status_tag != tp->last_tag) ||
3513             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3514                 /*
3515                  * writing any value to intr-mbox-0 clears PCI INTA# and
3516                  * chip-internal interrupt pending events.
3517                  * writing non-zero to intr-mbox-0 additional tells the
3518                  * NIC to stop sending us irqs, engaging "in-intr-handler"
3519                  * event coalescing.
3520                  */
3521                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3522                              0x00000001);
3523                 if (tg3_irq_sync(tp))
3524                         goto out;
3525                 if (netif_rx_schedule_prep(dev)) {
3526                         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3527                         /* Update last_tag to mark that this status has been
3528                          * seen. Because interrupt may be shared, we may be
3529                          * racing with tg3_poll(), so only update last_tag
3530                          * if tg3_poll() is not scheduled.
3531                          */
3532                         tp->last_tag = sblk->status_tag;
3533                         __netif_rx_schedule(dev);
3534                 }
3535         } else {        /* shared interrupt */
3536                 handled = 0;
3537         }
3538 out:
3539         return IRQ_RETVAL(handled);
3540 }
3541
3542 /* ISR for interrupt test */
3543 static irqreturn_t tg3_test_isr(int irq, void *dev_id,
3544                 struct pt_regs *regs)
3545 {
3546         struct net_device *dev = dev_id;
3547         struct tg3 *tp = netdev_priv(dev);
3548         struct tg3_hw_status *sblk = tp->hw_status;
3549
3550         if ((sblk->status & SD_STATUS_UPDATED) ||
3551             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3552                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3553                              0x00000001);
3554                 return IRQ_RETVAL(1);
3555         }
3556         return IRQ_RETVAL(0);
3557 }
3558
3559 static int tg3_init_hw(struct tg3 *, int);
3560 static int tg3_halt(struct tg3 *, int, int);
3561
3562 /* Restart hardware after configuration changes, self-test, etc.
3563  * Invoked with tp->lock held.
3564  */
3565 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3566 {
3567         int err;
3568
3569         err = tg3_init_hw(tp, reset_phy);
3570         if (err) {
3571                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3572                        "aborting.\n", tp->dev->name);
3573                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3574                 tg3_full_unlock(tp);
3575                 del_timer_sync(&tp->timer);
3576                 tp->irq_sync = 0;
3577                 netif_poll_enable(tp->dev);
3578                 dev_close(tp->dev);
3579                 tg3_full_lock(tp, 0);
3580         }
3581         return err;
3582 }
3583
3584 #ifdef CONFIG_NET_POLL_CONTROLLER
3585 static void tg3_poll_controller(struct net_device *dev)
3586 {
3587         struct tg3 *tp = netdev_priv(dev);
3588
3589         tg3_interrupt(tp->pdev->irq, dev, NULL);
3590 }
3591 #endif
3592
3593 static void tg3_reset_task(void *_data)
3594 {
3595         struct tg3 *tp = _data;
3596         unsigned int restart_timer;
3597
3598         tg3_full_lock(tp, 0);
3599         tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
3600
3601         if (!netif_running(tp->dev)) {
3602                 tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3603                 tg3_full_unlock(tp);
3604                 return;
3605         }
3606
3607         tg3_full_unlock(tp);
3608
3609         tg3_netif_stop(tp);
3610
3611         tg3_full_lock(tp, 1);
3612
3613         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3614         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3615
3616         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3617                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3618                 tp->write32_rx_mbox = tg3_write_flush_reg32;
3619                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3620                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3621         }
3622
3623         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3624         if (tg3_init_hw(tp, 1))
3625                 goto out;
3626
3627         tg3_netif_start(tp);
3628
3629         if (restart_timer)
3630                 mod_timer(&tp->timer, jiffies + 1);
3631
3632 out:
3633         tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
3634
3635         tg3_full_unlock(tp);
3636 }
3637
3638 static void tg3_tx_timeout(struct net_device *dev)
3639 {
3640         struct tg3 *tp = netdev_priv(dev);
3641
3642         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3643                dev->name);
3644
3645         schedule_work(&tp->reset_task);
3646 }
3647
3648 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3649 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3650 {
3651         u32 base = (u32) mapping & 0xffffffff;
3652
3653         return ((base > 0xffffdcc0) &&
3654                 (base + len + 8 < base));
3655 }
3656
3657 /* Test for DMA addresses > 40-bit */
3658 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3659                                           int len)
3660 {
3661 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3662         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3663                 return (((u64) mapping + len) > DMA_40BIT_MASK);
3664         return 0;
3665 #else
3666         return 0;
3667 #endif
3668 }
3669
3670 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3671
3672 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3673 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3674                                        u32 last_plus_one, u32 *start,
3675                                        u32 base_flags, u32 mss)
3676 {
3677         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3678         dma_addr_t new_addr = 0;
3679         u32 entry = *start;
3680         int i, ret = 0;
3681
3682         if (!new_skb) {
3683                 ret = -1;
3684         } else {
3685                 /* New SKB is guaranteed to be linear. */
3686                 entry = *start;
3687                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3688                                           PCI_DMA_TODEVICE);
3689                 /* Make sure new skb does not cross any 4G boundaries.
3690                  * Drop the packet if it does.
3691                  */
3692                 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3693                         ret = -1;
3694                         dev_kfree_skb(new_skb);
3695                         new_skb = NULL;
3696                 } else {
3697                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3698                                     base_flags, 1 | (mss << 1));
3699                         *start = NEXT_TX(entry);
3700                 }
3701         }
3702
3703         /* Now clean up the sw ring entries. */
3704         i = 0;
3705         while (entry != last_plus_one) {
3706                 int len;
3707
3708                 if (i == 0)
3709                         len = skb_headlen(skb);
3710                 else
3711                         len = skb_shinfo(skb)->frags[i-1].size;
3712                 pci_unmap_single(tp->pdev,
3713                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3714                                  len, PCI_DMA_TODEVICE);
3715                 if (i == 0) {
3716                         tp->tx_buffers[entry].skb = new_skb;
3717                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3718                 } else {
3719                         tp->tx_buffers[entry].skb = NULL;
3720                 }
3721                 entry = NEXT_TX(entry);
3722                 i++;
3723         }
3724
3725         dev_kfree_skb(skb);
3726
3727         return ret;
3728 }
3729
3730 static void tg3_set_txd(struct tg3 *tp, int entry,
3731                         dma_addr_t mapping, int len, u32 flags,
3732                         u32 mss_and_is_end)
3733 {
3734         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3735         int is_end = (mss_and_is_end & 0x1);
3736         u32 mss = (mss_and_is_end >> 1);
3737         u32 vlan_tag = 0;
3738
3739         if (is_end)
3740                 flags |= TXD_FLAG_END;
3741         if (flags & TXD_FLAG_VLAN) {
3742                 vlan_tag = flags >> 16;
3743                 flags &= 0xffff;
3744         }
3745         vlan_tag |= (mss << TXD_MSS_SHIFT);
3746
3747         txd->addr_hi = ((u64) mapping >> 32);
3748         txd->addr_lo = ((u64) mapping & 0xffffffff);
3749         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3750         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3751 }
3752
3753 /* hard_start_xmit for devices that don't have any bugs and
3754  * support TG3_FLG2_HW_TSO_2 only.
3755  */
3756 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3757 {
3758         struct tg3 *tp = netdev_priv(dev);
3759         dma_addr_t mapping;
3760         u32 len, entry, base_flags, mss;
3761
3762         len = skb_headlen(skb);
3763
3764         /* We are running in BH disabled context with netif_tx_lock
3765          * and TX reclaim runs via tp->poll inside of a software
3766          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3767          * no IRQ context deadlocks to worry about either.  Rejoice!
3768          */
3769         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3770                 if (!netif_queue_stopped(dev)) {
3771                         netif_stop_queue(dev);
3772
3773                         /* This is a hard error, log it. */
3774                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3775                                "queue awake!\n", dev->name);
3776                 }
3777                 return NETDEV_TX_BUSY;
3778         }
3779
3780         entry = tp->tx_prod;
3781         base_flags = 0;
3782 #if TG3_TSO_SUPPORT != 0
3783         mss = 0;
3784         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3785             (mss = skb_shinfo(skb)->gso_size) != 0) {
3786                 int tcp_opt_len, ip_tcp_len;
3787
3788                 if (skb_header_cloned(skb) &&
3789                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3790                         dev_kfree_skb(skb);
3791                         goto out_unlock;
3792                 }
3793
3794                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3795                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3796                 else {
3797                         tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3798                         ip_tcp_len = (skb->nh.iph->ihl * 4) +
3799                                      sizeof(struct tcphdr);
3800
3801                         skb->nh.iph->check = 0;
3802                         skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
3803                                                      tcp_opt_len);
3804                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
3805                 }
3806
3807                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3808                                TXD_FLAG_CPU_POST_DMA);
3809
3810                 skb->h.th->check = 0;
3811
3812         }
3813         else if (skb->ip_summed == CHECKSUM_PARTIAL)
3814                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3815 #else
3816         mss = 0;
3817         if (skb->ip_summed == CHECKSUM_PARTIAL)
3818                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3819 #endif
3820 #if TG3_VLAN_TAG_USED
3821         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3822                 base_flags |= (TXD_FLAG_VLAN |
3823                                (vlan_tx_tag_get(skb) << 16));
3824 #endif
3825
3826         /* Queue skb data, a.k.a. the main skb fragment. */
3827         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3828
3829         tp->tx_buffers[entry].skb = skb;
3830         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3831
3832         tg3_set_txd(tp, entry, mapping, len, base_flags,
3833                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3834
3835         entry = NEXT_TX(entry);
3836
3837         /* Now loop through additional data fragments, and queue them. */
3838         if (skb_shinfo(skb)->nr_frags > 0) {
3839                 unsigned int i, last;
3840
3841                 last = skb_shinfo(skb)->nr_frags - 1;
3842                 for (i = 0; i <= last; i++) {
3843                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3844
3845                         len = frag->size;
3846                         mapping = pci_map_page(tp->pdev,
3847                                                frag->page,
3848                                                frag->page_offset,
3849                                                len, PCI_DMA_TODEVICE);
3850
3851                         tp->tx_buffers[entry].skb = NULL;
3852                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3853
3854                         tg3_set_txd(tp, entry, mapping, len,
3855                                     base_flags, (i == last) | (mss << 1));
3856
3857                         entry = NEXT_TX(entry);
3858                 }
3859         }
3860
3861         /* Packets are ready, update Tx producer idx local and on card. */
3862         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3863
3864         tp->tx_prod = entry;
3865         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3866                 netif_stop_queue(dev);
3867                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
3868                         netif_wake_queue(tp->dev);
3869         }
3870
3871 out_unlock:
3872         mmiowb();
3873
3874         dev->trans_start = jiffies;
3875
3876         return NETDEV_TX_OK;
3877 }
3878
3879 #if TG3_TSO_SUPPORT != 0
3880 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3881
3882 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3883  * TSO header is greater than 80 bytes.
3884  */
3885 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3886 {
3887         struct sk_buff *segs, *nskb;
3888
3889         /* Estimate the number of fragments in the worst case */
3890         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
3891                 netif_stop_queue(tp->dev);
3892                 return NETDEV_TX_BUSY;
3893         }
3894
3895         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
3896         if (unlikely(IS_ERR(segs)))
3897                 goto tg3_tso_bug_end;
3898
3899         do {
3900                 nskb = segs;
3901                 segs = segs->next;
3902                 nskb->next = NULL;
3903                 tg3_start_xmit_dma_bug(nskb, tp->dev);
3904         } while (segs);
3905
3906 tg3_tso_bug_end:
3907         dev_kfree_skb(skb);
3908
3909         return NETDEV_TX_OK;
3910 }
3911 #endif
3912
3913 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3914  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3915  */
3916 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
3917 {
3918         struct tg3 *tp = netdev_priv(dev);
3919         dma_addr_t mapping;
3920         u32 len, entry, base_flags, mss;
3921         int would_hit_hwbug;
3922
3923         len = skb_headlen(skb);
3924
3925         /* We are running in BH disabled context with netif_tx_lock
3926          * and TX reclaim runs via tp->poll inside of a software
3927          * interrupt.  Furthermore, IRQ processing runs lockless so we have
3928          * no IRQ context deadlocks to worry about either.  Rejoice!
3929          */
3930         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3931                 if (!netif_queue_stopped(dev)) {
3932                         netif_stop_queue(dev);
3933
3934                         /* This is a hard error, log it. */
3935                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3936                                "queue awake!\n", dev->name);
3937                 }
3938                 return NETDEV_TX_BUSY;
3939         }
3940
3941         entry = tp->tx_prod;
3942         base_flags = 0;
3943         if (skb->ip_summed == CHECKSUM_PARTIAL)
3944                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3945 #if TG3_TSO_SUPPORT != 0
3946         mss = 0;
3947         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3948             (mss = skb_shinfo(skb)->gso_size) != 0) {
3949                 int tcp_opt_len, ip_tcp_len, hdr_len;
3950
3951                 if (skb_header_cloned(skb) &&
3952                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3953                         dev_kfree_skb(skb);
3954                         goto out_unlock;
3955                 }
3956
3957                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3958                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3959
3960                 hdr_len = ip_tcp_len + tcp_opt_len;
3961                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
3962                              (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
3963                         return (tg3_tso_bug(tp, skb));
3964
3965                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3966                                TXD_FLAG_CPU_POST_DMA);
3967
3968                 skb->nh.iph->check = 0;
3969                 skb->nh.iph->tot_len = htons(mss + hdr_len);
3970                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3971                         skb->h.th->check = 0;
3972                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3973                 }
3974                 else {
3975                         skb->h.th->check =
3976                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3977                                                    skb->nh.iph->daddr,
3978                                                    0, IPPROTO_TCP, 0);
3979                 }
3980
3981                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3982                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3983                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3984                                 int tsflags;
3985
3986                                 tsflags = ((skb->nh.iph->ihl - 5) +
3987                                            (tcp_opt_len >> 2));
3988                                 mss |= (tsflags << 11);
3989                         }
3990                 } else {
3991                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3992                                 int tsflags;
3993
3994                                 tsflags = ((skb->nh.iph->ihl - 5) +
3995                                            (tcp_opt_len >> 2));
3996                                 base_flags |= tsflags << 12;
3997                         }
3998                 }
3999         }
4000 #else
4001         mss = 0;
4002 #endif
4003 #if TG3_VLAN_TAG_USED
4004         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4005                 base_flags |= (TXD_FLAG_VLAN |
4006                                (vlan_tx_tag_get(skb) << 16));
4007 #endif
4008
4009         /* Queue skb data, a.k.a. the main skb fragment. */
4010         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4011
4012         tp->tx_buffers[entry].skb = skb;
4013         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4014
4015         would_hit_hwbug = 0;
4016
4017         if (tg3_4g_overflow_test(mapping, len))
4018                 would_hit_hwbug = 1;
4019
4020         tg3_set_txd(tp, entry, mapping, len, base_flags,
4021                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4022
4023         entry = NEXT_TX(entry);
4024
4025         /* Now loop through additional data fragments, and queue them. */
4026         if (skb_shinfo(skb)->nr_frags > 0) {
4027                 unsigned int i, last;
4028
4029                 last = skb_shinfo(skb)->nr_frags - 1;
4030                 for (i = 0; i <= last; i++) {
4031                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4032
4033                         len = frag->size;
4034                         mapping = pci_map_page(tp->pdev,
4035                                                frag->page,
4036                                                frag->page_offset,
4037                                                len, PCI_DMA_TODEVICE);
4038
4039                         tp->tx_buffers[entry].skb = NULL;
4040                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4041
4042                         if (tg3_4g_overflow_test(mapping, len))
4043                                 would_hit_hwbug = 1;
4044
4045                         if (tg3_40bit_overflow_test(tp, mapping, len))
4046                                 would_hit_hwbug = 1;
4047
4048                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4049                                 tg3_set_txd(tp, entry, mapping, len,
4050                                             base_flags, (i == last)|(mss << 1));
4051                         else
4052                                 tg3_set_txd(tp, entry, mapping, len,
4053                                             base_flags, (i == last));
4054
4055                         entry = NEXT_TX(entry);
4056                 }
4057         }
4058
4059         if (would_hit_hwbug) {
4060                 u32 last_plus_one = entry;
4061                 u32 start;
4062
4063                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4064                 start &= (TG3_TX_RING_SIZE - 1);
4065
4066                 /* If the workaround fails due to memory/mapping
4067                  * failure, silently drop this packet.
4068                  */
4069                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4070                                                 &start, base_flags, mss))
4071                         goto out_unlock;
4072
4073                 entry = start;
4074         }
4075
4076         /* Packets are ready, update Tx producer idx local and on card. */
4077         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4078
4079         tp->tx_prod = entry;
4080         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4081                 netif_stop_queue(dev);
4082                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH)
4083                         netif_wake_queue(tp->dev);
4084         }
4085
4086 out_unlock:
4087         mmiowb();
4088
4089         dev->trans_start = jiffies;
4090
4091         return NETDEV_TX_OK;
4092 }
4093
4094 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4095                                int new_mtu)
4096 {
4097         dev->mtu = new_mtu;
4098
4099         if (new_mtu > ETH_DATA_LEN) {
4100                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4101                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4102                         ethtool_op_set_tso(dev, 0);
4103                 }
4104                 else
4105                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4106         } else {
4107                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4108                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4109                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4110         }
4111 }
4112
4113 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4114 {
4115         struct tg3 *tp = netdev_priv(dev);
4116         int err;
4117
4118         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4119                 return -EINVAL;
4120
4121         if (!netif_running(dev)) {
4122                 /* We'll just catch it later when the
4123                  * device is up'd.
4124                  */
4125                 tg3_set_mtu(dev, tp, new_mtu);
4126                 return 0;
4127         }
4128
4129         tg3_netif_stop(tp);
4130
4131         tg3_full_lock(tp, 1);
4132
4133         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4134
4135         tg3_set_mtu(dev, tp, new_mtu);
4136
4137         err = tg3_restart_hw(tp, 0);
4138
4139         if (!err)
4140                 tg3_netif_start(tp);
4141
4142         tg3_full_unlock(tp);
4143
4144         return err;
4145 }
4146
4147 /* Free up pending packets in all rx/tx rings.
4148  *
4149  * The chip has been shut down and the driver detached from
4150  * the networking, so no interrupts or new tx packets will
4151  * end up in the driver.  tp->{tx,}lock is not held and we are not
4152  * in an interrupt context and thus may sleep.
4153  */
4154 static void tg3_free_rings(struct tg3 *tp)
4155 {
4156         struct ring_info *rxp;
4157         int i;
4158
4159         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4160                 rxp = &tp->rx_std_buffers[i];
4161
4162                 if (rxp->skb == NULL)
4163                         continue;
4164                 pci_unmap_single(tp->pdev,
4165                                  pci_unmap_addr(rxp, mapping),
4166                                  tp->rx_pkt_buf_sz - tp->rx_offset,
4167                                  PCI_DMA_FROMDEVICE);
4168                 dev_kfree_skb_any(rxp->skb);
4169                 rxp->skb = NULL;
4170         }
4171
4172         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4173                 rxp = &tp->rx_jumbo_buffers[i];
4174
4175                 if (rxp->skb == NULL)
4176                         continue;
4177                 pci_unmap_single(tp->pdev,
4178                                  pci_unmap_addr(rxp, mapping),
4179                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4180                                  PCI_DMA_FROMDEVICE);
4181                 dev_kfree_skb_any(rxp->skb);
4182                 rxp->skb = NULL;
4183         }
4184
4185         for (i = 0; i < TG3_TX_RING_SIZE; ) {
4186                 struct tx_ring_info *txp;
4187                 struct sk_buff *skb;
4188                 int j;
4189
4190                 txp = &tp->tx_buffers[i];
4191                 skb = txp->skb;
4192
4193                 if (skb == NULL) {
4194                         i++;
4195                         continue;
4196                 }
4197
4198                 pci_unmap_single(tp->pdev,
4199                                  pci_unmap_addr(txp, mapping),
4200                                  skb_headlen(skb),
4201                                  PCI_DMA_TODEVICE);
4202                 txp->skb = NULL;
4203
4204                 i++;
4205
4206                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4207                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4208                         pci_unmap_page(tp->pdev,
4209                                        pci_unmap_addr(txp, mapping),
4210                                        skb_shinfo(skb)->frags[j].size,
4211                                        PCI_DMA_TODEVICE);
4212                         i++;
4213                 }
4214
4215                 dev_kfree_skb_any(skb);
4216         }
4217 }
4218
4219 /* Initialize tx/rx rings for packet processing.
4220  *
4221  * The chip has been shut down and the driver detached from
4222  * the networking, so no interrupts or new tx packets will
4223  * end up in the driver.  tp->{tx,}lock are held and thus
4224  * we may not sleep.
4225  */
4226 static int tg3_init_rings(struct tg3 *tp)
4227 {
4228         u32 i;
4229
4230         /* Free up all the SKBs. */
4231         tg3_free_rings(tp);
4232
4233         /* Zero out all descriptors. */
4234         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4235         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4236         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4237         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4238
4239         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4240         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4241             (tp->dev->mtu > ETH_DATA_LEN))
4242                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4243
4244         /* Initialize invariants of the rings, we only set this
4245          * stuff once.  This works because the card does not
4246          * write into the rx buffer posting rings.
4247          */
4248         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4249                 struct tg3_rx_buffer_desc *rxd;
4250
4251                 rxd = &tp->rx_std[i];
4252                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4253                         << RXD_LEN_SHIFT;
4254                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4255                 rxd->opaque = (RXD_OPAQUE_RING_STD |
4256                                (i << RXD_OPAQUE_INDEX_SHIFT));
4257         }
4258
4259         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4260                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4261                         struct tg3_rx_buffer_desc *rxd;
4262
4263                         rxd = &tp->rx_jumbo[i];
4264                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4265                                 << RXD_LEN_SHIFT;
4266                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4267                                 RXD_FLAG_JUMBO;
4268                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4269                                (i << RXD_OPAQUE_INDEX_SHIFT));
4270                 }
4271         }
4272
4273         /* Now allocate fresh SKBs for each rx ring. */
4274         for (i = 0; i < tp->rx_pending; i++) {
4275                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4276                         printk(KERN_WARNING PFX
4277                                "%s: Using a smaller RX standard ring, "
4278                                "only %d out of %d buffers were allocated "
4279                                "successfully.\n",
4280                                tp->dev->name, i, tp->rx_pending);
4281                         if (i == 0)
4282                                 return -ENOMEM;
4283                         tp->rx_pending = i;
4284                         break;
4285                 }
4286         }
4287
4288         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4289                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4290                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4291                                              -1, i) < 0) {
4292                                 printk(KERN_WARNING PFX
4293                                        "%s: Using a smaller RX jumbo ring, "
4294                                        "only %d out of %d buffers were "
4295                                        "allocated successfully.\n",
4296                                        tp->dev->name, i, tp->rx_jumbo_pending);
4297                                 if (i == 0) {
4298                                         tg3_free_rings(tp);
4299                                         return -ENOMEM;
4300                                 }
4301                                 tp->rx_jumbo_pending = i;
4302                                 break;
4303                         }
4304                 }
4305         }
4306         return 0;
4307 }
4308
4309 /*
4310  * Must not be invoked with interrupt sources disabled and
4311  * the hardware shutdown down.
4312  */
4313 static void tg3_free_consistent(struct tg3 *tp)
4314 {
4315         kfree(tp->rx_std_buffers);
4316         tp->rx_std_buffers = NULL;
4317         if (tp->rx_std) {
4318                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4319                                     tp->rx_std, tp->rx_std_mapping);
4320                 tp->rx_std = NULL;
4321         }
4322         if (tp->rx_jumbo) {
4323                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4324                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
4325                 tp->rx_jumbo = NULL;
4326         }
4327         if (tp->rx_rcb) {
4328                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4329                                     tp->rx_rcb, tp->rx_rcb_mapping);
4330                 tp->rx_rcb = NULL;
4331         }
4332         if (tp->tx_ring) {
4333                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4334                         tp->tx_ring, tp->tx_desc_mapping);
4335                 tp->tx_ring = NULL;
4336         }
4337         if (tp->hw_status) {
4338                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4339                                     tp->hw_status, tp->status_mapping);
4340                 tp->hw_status = NULL;
4341         }
4342         if (tp->hw_stats) {
4343                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4344                                     tp->hw_stats, tp->stats_mapping);
4345                 tp->hw_stats = NULL;
4346         }
4347 }
4348
4349 /*
4350  * Must not be invoked with interrupt sources disabled and
4351  * the hardware shutdown down.  Can sleep.
4352  */
4353 static int tg3_alloc_consistent(struct tg3 *tp)
4354 {
4355         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
4356                                       (TG3_RX_RING_SIZE +
4357                                        TG3_RX_JUMBO_RING_SIZE)) +
4358                                      (sizeof(struct tx_ring_info) *
4359                                       TG3_TX_RING_SIZE),
4360                                      GFP_KERNEL);
4361         if (!tp->rx_std_buffers)
4362                 return -ENOMEM;
4363
4364         memset(tp->rx_std_buffers, 0,
4365                (sizeof(struct ring_info) *
4366                 (TG3_RX_RING_SIZE +
4367                  TG3_RX_JUMBO_RING_SIZE)) +
4368                (sizeof(struct tx_ring_info) *
4369                 TG3_TX_RING_SIZE));
4370
4371         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4372         tp->tx_buffers = (struct tx_ring_info *)
4373                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4374
4375         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4376                                           &tp->rx_std_mapping);
4377         if (!tp->rx_std)
4378                 goto err_out;
4379
4380         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4381                                             &tp->rx_jumbo_mapping);
4382
4383         if (!tp->rx_jumbo)
4384                 goto err_out;
4385
4386         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4387                                           &tp->rx_rcb_mapping);
4388         if (!tp->rx_rcb)
4389                 goto err_out;
4390
4391         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4392                                            &tp->tx_desc_mapping);
4393         if (!tp->tx_ring)
4394                 goto err_out;
4395
4396         tp->hw_status = pci_alloc_consistent(tp->pdev,
4397                                              TG3_HW_STATUS_SIZE,
4398                                              &tp->status_mapping);
4399         if (!tp->hw_status)
4400                 goto err_out;
4401
4402         tp->hw_stats = pci_alloc_consistent(tp->pdev,
4403                                             sizeof(struct tg3_hw_stats),
4404                                             &tp->stats_mapping);
4405         if (!tp->hw_stats)
4406                 goto err_out;
4407
4408         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4409         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4410
4411         return 0;
4412
4413 err_out:
4414         tg3_free_consistent(tp);
4415         return -ENOMEM;
4416 }
4417
4418 #define MAX_WAIT_CNT 1000
4419
4420 /* To stop a block, clear the enable bit and poll till it
4421  * clears.  tp->lock is held.
4422  */
4423 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4424 {
4425         unsigned int i;
4426         u32 val;
4427
4428         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4429                 switch (ofs) {
4430                 case RCVLSC_MODE:
4431                 case DMAC_MODE:
4432                 case MBFREE_MODE:
4433                 case BUFMGR_MODE:
4434                 case MEMARB_MODE:
4435                         /* We can't enable/disable these bits of the
4436                          * 5705/5750, just say success.
4437                          */
4438                         return 0;
4439
4440                 default:
4441                         break;
4442                 };
4443         }
4444
4445         val = tr32(ofs);
4446         val &= ~enable_bit;
4447         tw32_f(ofs, val);
4448
4449         for (i = 0; i < MAX_WAIT_CNT; i++) {
4450                 udelay(100);
4451                 val = tr32(ofs);
4452                 if ((val & enable_bit) == 0)
4453                         break;
4454         }
4455
4456         if (i == MAX_WAIT_CNT && !silent) {
4457                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4458                        "ofs=%lx enable_bit=%x\n",
4459                        ofs, enable_bit);
4460                 return -ENODEV;
4461         }
4462
4463         return 0;
4464 }
4465
4466 /* tp->lock is held. */
4467 static int tg3_abort_hw(struct tg3 *tp, int silent)
4468 {
4469         int i, err;
4470
4471         tg3_disable_ints(tp);
4472
4473         tp->rx_mode &= ~RX_MODE_ENABLE;
4474         tw32_f(MAC_RX_MODE, tp->rx_mode);
4475         udelay(10);
4476
4477         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4478         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4479         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4480         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4481         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4482         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4483
4484         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4485         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4486         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4487         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4488         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4489         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4490         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4491
4492         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4493         tw32_f(MAC_MODE, tp->mac_mode);
4494         udelay(40);
4495
4496         tp->tx_mode &= ~TX_MODE_ENABLE;
4497         tw32_f(MAC_TX_MODE, tp->tx_mode);
4498
4499         for (i = 0; i < MAX_WAIT_CNT; i++) {
4500                 udelay(100);
4501                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4502                         break;
4503         }
4504         if (i >= MAX_WAIT_CNT) {
4505                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4506                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4507                        tp->dev->name, tr32(MAC_TX_MODE));
4508                 err |= -ENODEV;
4509         }
4510
4511         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4512         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4513         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4514
4515         tw32(FTQ_RESET, 0xffffffff);
4516         tw32(FTQ_RESET, 0x00000000);
4517
4518         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4519         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4520
4521         if (tp->hw_status)
4522                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4523         if (tp->hw_stats)
4524                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4525
4526         return err;
4527 }
4528
4529 /* tp->lock is held. */
4530 static int tg3_nvram_lock(struct tg3 *tp)
4531 {
4532         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4533                 int i;
4534
4535                 if (tp->nvram_lock_cnt == 0) {
4536                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4537                         for (i = 0; i < 8000; i++) {
4538                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4539                                         break;
4540                                 udelay(20);
4541                         }
4542                         if (i == 8000) {
4543                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4544                                 return -ENODEV;
4545                         }
4546                 }
4547                 tp->nvram_lock_cnt++;
4548         }
4549         return 0;
4550 }
4551
4552 /* tp->lock is held. */
4553 static void tg3_nvram_unlock(struct tg3 *tp)
4554 {
4555         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4556                 if (tp->nvram_lock_cnt > 0)
4557                         tp->nvram_lock_cnt--;
4558                 if (tp->nvram_lock_cnt == 0)
4559                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4560         }
4561 }
4562
4563 /* tp->lock is held. */
4564 static void tg3_enable_nvram_access(struct tg3 *tp)
4565 {
4566         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4567             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4568                 u32 nvaccess = tr32(NVRAM_ACCESS);
4569
4570                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4571         }
4572 }
4573
4574 /* tp->lock is held. */
4575 static void tg3_disable_nvram_access(struct tg3 *tp)
4576 {
4577         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4578             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4579                 u32 nvaccess = tr32(NVRAM_ACCESS);
4580
4581                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4582         }
4583 }
4584
4585 /* tp->lock is held. */
4586 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4587 {
4588         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4589                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4590
4591         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4592                 switch (kind) {
4593                 case RESET_KIND_INIT:
4594                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4595                                       DRV_STATE_START);
4596                         break;
4597
4598                 case RESET_KIND_SHUTDOWN:
4599                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4600                                       DRV_STATE_UNLOAD);
4601                         break;
4602
4603                 case RESET_KIND_SUSPEND:
4604                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4605                                       DRV_STATE_SUSPEND);
4606                         break;
4607
4608                 default:
4609                         break;
4610                 };
4611         }
4612 }
4613
4614 /* tp->lock is held. */
4615 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4616 {
4617         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4618                 switch (kind) {
4619                 case RESET_KIND_INIT:
4620                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4621                                       DRV_STATE_START_DONE);
4622                         break;
4623
4624                 case RESET_KIND_SHUTDOWN:
4625                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4626                                       DRV_STATE_UNLOAD_DONE);
4627                         break;
4628
4629                 default:
4630                         break;
4631                 };
4632         }
4633 }
4634
4635 /* tp->lock is held. */
4636 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4637 {
4638         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4639                 switch (kind) {
4640                 case RESET_KIND_INIT:
4641                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4642                                       DRV_STATE_START);
4643                         break;
4644
4645                 case RESET_KIND_SHUTDOWN:
4646                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4647                                       DRV_STATE_UNLOAD);
4648                         break;
4649
4650                 case RESET_KIND_SUSPEND:
4651                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4652                                       DRV_STATE_SUSPEND);
4653                         break;
4654
4655                 default:
4656                         break;
4657                 };
4658         }
4659 }
4660
4661 static void tg3_stop_fw(struct tg3 *);
4662
4663 /* tp->lock is held. */
4664 static int tg3_chip_reset(struct tg3 *tp)
4665 {
4666         u32 val;
4667         void (*write_op)(struct tg3 *, u32, u32);
4668         int i;
4669
4670         tg3_nvram_lock(tp);
4671
4672         /* No matching tg3_nvram_unlock() after this because
4673          * chip reset below will undo the nvram lock.
4674          */
4675         tp->nvram_lock_cnt = 0;
4676
4677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4678             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4680                 tw32(GRC_FASTBOOT_PC, 0);
4681
4682         /*
4683          * We must avoid the readl() that normally takes place.
4684          * It locks machines, causes machine checks, and other
4685          * fun things.  So, temporarily disable the 5701
4686          * hardware workaround, while we do the reset.
4687          */
4688         write_op = tp->write32;
4689         if (write_op == tg3_write_flush_reg32)
4690                 tp->write32 = tg3_write32;
4691
4692         /* do the reset */
4693         val = GRC_MISC_CFG_CORECLK_RESET;
4694
4695         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4696                 if (tr32(0x7e2c) == 0x60) {
4697                         tw32(0x7e2c, 0x20);
4698                 }
4699                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4700                         tw32(GRC_MISC_CFG, (1 << 29));
4701                         val |= (1 << 29);
4702                 }
4703         }
4704
4705         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4706                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4707         tw32(GRC_MISC_CFG, val);
4708
4709         /* restore 5701 hardware bug workaround write method */
4710         tp->write32 = write_op;
4711
4712         /* Unfortunately, we have to delay before the PCI read back.
4713          * Some 575X chips even will not respond to a PCI cfg access
4714          * when the reset command is given to the chip.
4715          *
4716          * How do these hardware designers expect things to work
4717          * properly if the PCI write is posted for a long period
4718          * of time?  It is always necessary to have some method by
4719          * which a register read back can occur to push the write
4720          * out which does the reset.
4721          *
4722          * For most tg3 variants the trick below was working.
4723          * Ho hum...
4724          */
4725         udelay(120);
4726
4727         /* Flush PCI posted writes.  The normal MMIO registers
4728          * are inaccessible at this time so this is the only
4729          * way to make this reliably (actually, this is no longer
4730          * the case, see above).  I tried to use indirect
4731          * register read/write but this upset some 5701 variants.
4732          */
4733         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4734
4735         udelay(120);
4736
4737         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4738                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4739                         int i;
4740                         u32 cfg_val;
4741
4742                         /* Wait for link training to complete.  */
4743                         for (i = 0; i < 5000; i++)
4744                                 udelay(100);
4745
4746                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4747                         pci_write_config_dword(tp->pdev, 0xc4,
4748                                                cfg_val | (1 << 15));
4749                 }
4750                 /* Set PCIE max payload size and clear error status.  */
4751                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4752         }
4753
4754         /* Re-enable indirect register accesses. */
4755         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4756                                tp->misc_host_ctrl);
4757
4758         /* Set MAX PCI retry to zero. */
4759         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4760         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4761             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4762                 val |= PCISTATE_RETRY_SAME_DMA;
4763         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4764
4765         pci_restore_state(tp->pdev);
4766
4767         /* Make sure PCI-X relaxed ordering bit is clear. */
4768         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4769         val &= ~PCIX_CAPS_RELAXED_ORDERING;
4770         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4771
4772         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4773                 u32 val;
4774
4775                 /* Chip reset on 5780 will reset MSI enable bit,
4776                  * so need to restore it.
4777                  */
4778                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4779                         u16 ctrl;
4780
4781                         pci_read_config_word(tp->pdev,
4782                                              tp->msi_cap + PCI_MSI_FLAGS,
4783                                              &ctrl);
4784                         pci_write_config_word(tp->pdev,
4785                                               tp->msi_cap + PCI_MSI_FLAGS,
4786                                               ctrl | PCI_MSI_FLAGS_ENABLE);
4787                         val = tr32(MSGINT_MODE);
4788                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4789                 }
4790
4791                 val = tr32(MEMARB_MODE);
4792                 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4793
4794         } else
4795                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4796
4797         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4798                 tg3_stop_fw(tp);
4799                 tw32(0x5000, 0x400);
4800         }
4801
4802         tw32(GRC_MODE, tp->grc_mode);
4803
4804         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4805                 u32 val = tr32(0xc4);
4806
4807                 tw32(0xc4, val | (1 << 15));
4808         }
4809
4810         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4811             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4812                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4813                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4814                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4815                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4816         }
4817
4818         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4819                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4820                 tw32_f(MAC_MODE, tp->mac_mode);
4821         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4822                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4823                 tw32_f(MAC_MODE, tp->mac_mode);
4824         } else
4825                 tw32_f(MAC_MODE, 0);
4826         udelay(40);
4827
4828         /* Wait for firmware initialization to complete. */
4829         for (i = 0; i < 100000; i++) {
4830                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4831                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4832                         break;
4833                 udelay(10);
4834         }
4835
4836         /* Chip might not be fitted with firmare.  Some Sun onboard
4837          * parts are configured like that.  So don't signal the timeout
4838          * of the above loop as an error, but do report the lack of
4839          * running firmware once.
4840          */
4841         if (i >= 100000 &&
4842             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4843                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4844
4845                 printk(KERN_INFO PFX "%s: No firmware running.\n",
4846                        tp->dev->name);
4847         }
4848
4849         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4850             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4851                 u32 val = tr32(0x7c00);
4852
4853                 tw32(0x7c00, val | (1 << 25));
4854         }
4855
4856         /* Reprobe ASF enable state.  */
4857         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4858         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4859         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4860         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4861                 u32 nic_cfg;
4862
4863                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4864                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4865                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4866                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4867                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
4868                 }
4869         }
4870
4871         return 0;
4872 }
4873
4874 /* tp->lock is held. */
4875 static void tg3_stop_fw(struct tg3 *tp)
4876 {
4877         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4878                 u32 val;
4879                 int i;
4880
4881                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
4882                 val = tr32(GRC_RX_CPU_EVENT);
4883                 val |= (1 << 14);
4884                 tw32(GRC_RX_CPU_EVENT, val);
4885
4886                 /* Wait for RX cpu to ACK the event.  */
4887                 for (i = 0; i < 100; i++) {
4888                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
4889                                 break;
4890                         udelay(1);
4891                 }
4892         }
4893 }
4894
4895 /* tp->lock is held. */
4896 static int tg3_halt(struct tg3 *tp, int kind, int silent)
4897 {
4898         int err;
4899
4900         tg3_stop_fw(tp);
4901
4902         tg3_write_sig_pre_reset(tp, kind);
4903
4904         tg3_abort_hw(tp, silent);
4905         err = tg3_chip_reset(tp);
4906
4907         tg3_write_sig_legacy(tp, kind);
4908         tg3_write_sig_post_reset(tp, kind);
4909
4910         if (err)
4911                 return err;
4912
4913         return 0;
4914 }
4915
4916 #define TG3_FW_RELEASE_MAJOR    0x0
4917 #define TG3_FW_RELASE_MINOR     0x0
4918 #define TG3_FW_RELEASE_FIX      0x0
4919 #define TG3_FW_START_ADDR       0x08000000
4920 #define TG3_FW_TEXT_ADDR        0x08000000
4921 #define TG3_FW_TEXT_LEN         0x9c0
4922 #define TG3_FW_RODATA_ADDR      0x080009c0
4923 #define TG3_FW_RODATA_LEN       0x60
4924 #define TG3_FW_DATA_ADDR        0x08000a40
4925 #define TG3_FW_DATA_LEN         0x20
4926 #define TG3_FW_SBSS_ADDR        0x08000a60
4927 #define TG3_FW_SBSS_LEN         0xc
4928 #define TG3_FW_BSS_ADDR         0x08000a70
4929 #define TG3_FW_BSS_LEN          0x10
4930
4931 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4932         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4933         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4934         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4935         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4936         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4937         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4938         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4939         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4940         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4941         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4942         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4943         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4944         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4945         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4946         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4947         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4948         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4949         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4950         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4951         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4952         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4953         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4954         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4955         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4956         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4957         0, 0, 0, 0, 0, 0,
4958         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4959         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4960         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4961         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4962         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4963         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4964         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4965         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4966         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4967         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4968         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4969         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4970         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4971         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4972         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4973         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4974         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4975         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4976         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4977         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4978         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4979         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4980         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4981         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4982         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4983         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4984         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4985         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4986         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4987         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4988         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4989         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4990         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4991         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4992         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4993         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4994         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4995         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4996         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4997         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4998         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4999         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5000         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5001         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5002         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5003         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5004         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5005         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5006         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5007         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5008         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5009         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5010         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5011         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5012         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5013         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5014         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5015         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5016         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5017         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5018         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5019         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5020         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5021         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5022         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5023 };
5024
5025 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5026         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5027         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5028         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5029         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5030         0x00000000
5031 };
5032
5033 #if 0 /* All zeros, don't eat up space with it. */
5034 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5035         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5036         0x00000000, 0x00000000, 0x00000000, 0x00000000
5037 };
5038 #endif
5039
5040 #define RX_CPU_SCRATCH_BASE     0x30000
5041 #define RX_CPU_SCRATCH_SIZE     0x04000
5042 #define TX_CPU_SCRATCH_BASE     0x34000
5043 #define TX_CPU_SCRATCH_SIZE     0x04000
5044
5045 /* tp->lock is held. */
5046 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5047 {
5048         int i;
5049
5050         BUG_ON(offset == TX_CPU_BASE &&
5051             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5052
5053         if (offset == RX_CPU_BASE) {
5054                 for (i = 0; i < 10000; i++) {
5055                         tw32(offset + CPU_STATE, 0xffffffff);
5056                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5057                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5058                                 break;
5059                 }
5060
5061                 tw32(offset + CPU_STATE, 0xffffffff);
5062                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5063                 udelay(10);
5064         } else {
5065                 for (i = 0; i < 10000; i++) {
5066                         tw32(offset + CPU_STATE, 0xffffffff);
5067                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5068                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5069                                 break;
5070                 }
5071         }
5072
5073         if (i >= 10000) {
5074                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5075                        "and %s CPU\n",
5076                        tp->dev->name,
5077                        (offset == RX_CPU_BASE ? "RX" : "TX"));
5078                 return -ENODEV;
5079         }
5080
5081         /* Clear firmware's nvram arbitration. */
5082         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5083                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5084         return 0;
5085 }
5086
5087 struct fw_info {
5088         unsigned int text_base;
5089         unsigned int text_len;
5090         const u32 *text_data;
5091         unsigned int rodata_base;
5092         unsigned int rodata_len;
5093         const u32 *rodata_data;
5094         unsigned int data_base;
5095         unsigned int data_len;
5096         const u32 *data_data;
5097 };
5098
5099 /* tp->lock is held. */
5100 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5101                                  int cpu_scratch_size, struct fw_info *info)
5102 {
5103         int err, lock_err, i;
5104         void (*write_op)(struct tg3 *, u32, u32);
5105
5106         if (cpu_base == TX_CPU_BASE &&
5107             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5108                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5109                        "TX cpu firmware on %s which is 5705.\n",
5110                        tp->dev->name);
5111                 return -EINVAL;
5112         }
5113
5114         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5115                 write_op = tg3_write_mem;
5116         else
5117                 write_op = tg3_write_indirect_reg32;
5118
5119         /* It is possible that bootcode is still loading at this point.
5120          * Get the nvram lock first before halting the cpu.
5121          */
5122         lock_err = tg3_nvram_lock(tp);
5123         err = tg3_halt_cpu(tp, cpu_base);
5124         if (!lock_err)
5125                 tg3_nvram_unlock(tp);
5126         if (err)
5127                 goto out;
5128
5129         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5130                 write_op(tp, cpu_scratch_base + i, 0);
5131         tw32(cpu_base + CPU_STATE, 0xffffffff);
5132         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5133         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5134                 write_op(tp, (cpu_scratch_base +
5135                               (info->text_base & 0xffff) +
5136                               (i * sizeof(u32))),
5137                          (info->text_data ?
5138                           info->text_data[i] : 0));
5139         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5140                 write_op(tp, (cpu_scratch_base +
5141                               (info->rodata_base & 0xffff) +
5142                               (i * sizeof(u32))),
5143                          (info->rodata_data ?
5144                           info->rodata_data[i] : 0));
5145         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5146                 write_op(tp, (cpu_scratch_base +
5147                               (info->data_base & 0xffff) +
5148                               (i * sizeof(u32))),
5149                          (info->data_data ?
5150                           info->data_data[i] : 0));
5151
5152         err = 0;
5153
5154 out:
5155         return err;
5156 }
5157
5158 /* tp->lock is held. */
5159 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5160 {
5161         struct fw_info info;
5162         int err, i;
5163
5164         info.text_base = TG3_FW_TEXT_ADDR;
5165         info.text_len = TG3_FW_TEXT_LEN;
5166         info.text_data = &tg3FwText[0];
5167         info.rodata_base = TG3_FW_RODATA_ADDR;
5168         info.rodata_len = TG3_FW_RODATA_LEN;
5169         info.rodata_data = &tg3FwRodata[0];
5170         info.data_base = TG3_FW_DATA_ADDR;
5171         info.data_len = TG3_FW_DATA_LEN;
5172         info.data_data = NULL;
5173
5174         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5175                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5176                                     &info);
5177         if (err)
5178                 return err;
5179
5180         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5181                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5182                                     &info);
5183         if (err)
5184                 return err;
5185
5186         /* Now startup only the RX cpu. */
5187         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5188         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5189
5190         for (i = 0; i < 5; i++) {
5191                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5192                         break;
5193                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5194                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5195                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5196                 udelay(1000);
5197         }
5198         if (i >= 5) {
5199                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5200                        "to set RX CPU PC, is %08x should be %08x\n",
5201                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5202                        TG3_FW_TEXT_ADDR);
5203                 return -ENODEV;
5204         }
5205         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5206         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5207
5208         return 0;
5209 }
5210
5211 #if TG3_TSO_SUPPORT != 0
5212
5213 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
5214 #define TG3_TSO_FW_RELASE_MINOR         0x6
5215 #define TG3_TSO_FW_RELEASE_FIX          0x0
5216 #define TG3_TSO_FW_START_ADDR           0x08000000
5217 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
5218 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
5219 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
5220 #define TG3_TSO_FW_RODATA_LEN           0x60
5221 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
5222 #define TG3_TSO_FW_DATA_LEN             0x30
5223 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
5224 #define TG3_TSO_FW_SBSS_LEN             0x2c
5225 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
5226 #define TG3_TSO_FW_BSS_LEN              0x894
5227
5228 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5229         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5230         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5231         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5232         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5233         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5234         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5235         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5236         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5237         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5238         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5239         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5240         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5241         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5242         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5243         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5244         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5245         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5246         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5247         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5248         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5249         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5250         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5251         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5252         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5253         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5254         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5255         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5256         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5257         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5258         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5259         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5260         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5261         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5262         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5263         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5264         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5265         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5266         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5267         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5268         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5269         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5270         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5271         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5272         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5273         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5274         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5275         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5276         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5277         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5278         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5279         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5280         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5281         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5282         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5283         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5284         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5285         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5286         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5287         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5288         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5289         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5290         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5291         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5292         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5293         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5294         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5295         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5296         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5297         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5298         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5299         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5300         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5301         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5302         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5303         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5304         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5305         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5306         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5307         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5308         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5309         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5310         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5311         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5312         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5313         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5314         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5315         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5316         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5317         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5318         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5319         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5320         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5321         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5322         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5323         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5324         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5325         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5326         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5327         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5328         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5329         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5330         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5331         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5332         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5333         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5334         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5335         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5336         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5337         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5338         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5339         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5340         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5341         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5342         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5343         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5344         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5345         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5346         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5347         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5348         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5349         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5350         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5351         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5352         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5353         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5354         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5355         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5356         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5357         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5358         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5359         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5360         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5361         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5362         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5363         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5364         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5365         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5366         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5367         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5368         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5369         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5370         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5371         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5372         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5373         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5374         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5375         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5376         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5377         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5378         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5379         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5380         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5381         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5382         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5383         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5384         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5385         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5386         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5387         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5388         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5389         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5390         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5391         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5392         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5393         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5394         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5395         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5396         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5397         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5398         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5399         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5400         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5401         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5402         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5403         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5404         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5405         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5406         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5407         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5408         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5409         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5410         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5411         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5412         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5413         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5414         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5415         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5416         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5417         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5418         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5419         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5420         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5421         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5422         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5423         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5424         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5425         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5426         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5427         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5428         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5429         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5430         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5431         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5432         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5433         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5434         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5435         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5436         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5437         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5438         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5439         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5440         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5441         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5442         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5443         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5444         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5445         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5446         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5447         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5448         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5449         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5450         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5451         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5452         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5453         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5454         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5455         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5456         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5457         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5458         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5459         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5460         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5461         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5462         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5463         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5464         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5465         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5466         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5467         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5468         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5469         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5470         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5471         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5472         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5473         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5474         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5475         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5476         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5477         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5478         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5479         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5480         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5481         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5482         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5483         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5484         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5485         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5486         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5487         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5488         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5489         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5490         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5491         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5492         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5493         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5494         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5495         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5496         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5497         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5498         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5499         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5500         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5501         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5502         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5503         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5504         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5505         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5506         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5507         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5508         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5509         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5510         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5511         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5512         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5513 };
5514
5515 static const u32 tg3TsoFwRodata[] = {
5516         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5517         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5518         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5519         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5520         0x00000000,
5521 };
5522
5523 static const u32 tg3TsoFwData[] = {
5524         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5525         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5526         0x00000000,
5527 };
5528
5529 /* 5705 needs a special version of the TSO firmware.  */
5530 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
5531 #define TG3_TSO5_FW_RELASE_MINOR        0x2
5532 #define TG3_TSO5_FW_RELEASE_FIX         0x0
5533 #define TG3_TSO5_FW_START_ADDR          0x00010000
5534 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
5535 #define TG3_TSO5_FW_TEXT_LEN            0xe90
5536 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
5537 #define TG3_TSO5_FW_RODATA_LEN          0x50
5538 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
5539 #define TG3_TSO5_FW_DATA_LEN            0x20
5540 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
5541 #define TG3_TSO5_FW_SBSS_LEN            0x28
5542 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
5543 #define TG3_TSO5_FW_BSS_LEN             0x88
5544
5545 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5546         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5547         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5548         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5549         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5550         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5551         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5552         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5553         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5554         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5555         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5556         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5557         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5558         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5559         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5560         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5561         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5562         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5563         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5564         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5565         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5566         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5567         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5568         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5569         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5570         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5571         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5572         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5573         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5574         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5575         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5576         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5577         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5578         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5579         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5580         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5581         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5582         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5583         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5584         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5585         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5586         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5587         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5588         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5589         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5590         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5591         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5592         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5593         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5594         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5595         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5596         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5597         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5598         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5599         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5600         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5601         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5602         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5603         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5604         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5605         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5606         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5607         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5608         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5609         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5610         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5611         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5612         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5613         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5614         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5615         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5616         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5617         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5618         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5619         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5620         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5621         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5622         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5623         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5624         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5625         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5626         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5627         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5628         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5629         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5630         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5631         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5632         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5633         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5634         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5635         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5636         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5637         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5638         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5639         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5640         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5641         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5642         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5643         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5644         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5645         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5646         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5647         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5648         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5649         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5650         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5651         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5652         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5653         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5654         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5655         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5656         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5657         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5658         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5659         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5660         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5661         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5662         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5663         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5664         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5665         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5666         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5667         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5668         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5669         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5670         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5671         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5672         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5673         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5674         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5675         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5676         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5677         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5678         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5679         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5680         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5681         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5682         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5683         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5684         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5685         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5686         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5687         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5688         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5689         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5690         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5691         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5692         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5693         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5694         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5695         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5696         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5697         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5698         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5699         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5700         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5701         0x00000000, 0x00000000, 0x00000000,
5702 };
5703
5704 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5705         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5706         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5707         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5708         0x00000000, 0x00000000, 0x00000000,
5709 };
5710
5711 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5712         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5713         0x00000000, 0x00000000, 0x00000000,
5714 };
5715
5716 /* tp->lock is held. */
5717 static int tg3_load_tso_firmware(struct tg3 *tp)
5718 {
5719         struct fw_info info;
5720         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5721         int err, i;
5722
5723         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5724                 return 0;
5725
5726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5727                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5728                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
5729                 info.text_data = &tg3Tso5FwText[0];
5730                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5731                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5732                 info.rodata_data = &tg3Tso5FwRodata[0];
5733                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
5734                 info.data_len = TG3_TSO5_FW_DATA_LEN;
5735                 info.data_data = &tg3Tso5FwData[0];
5736                 cpu_base = RX_CPU_BASE;
5737                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5738                 cpu_scratch_size = (info.text_len +
5739                                     info.rodata_len +
5740                                     info.data_len +
5741                                     TG3_TSO5_FW_SBSS_LEN +
5742                                     TG3_TSO5_FW_BSS_LEN);
5743         } else {
5744                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
5745                 info.text_len = TG3_TSO_FW_TEXT_LEN;
5746                 info.text_data = &tg3TsoFwText[0];
5747                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5748                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5749                 info.rodata_data = &tg3TsoFwRodata[0];
5750                 info.data_base = TG3_TSO_FW_DATA_ADDR;
5751                 info.data_len = TG3_TSO_FW_DATA_LEN;
5752                 info.data_data = &tg3TsoFwData[0];
5753                 cpu_base = TX_CPU_BASE;
5754                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5755                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5756         }
5757
5758         err = tg3_load_firmware_cpu(tp, cpu_base,
5759                                     cpu_scratch_base, cpu_scratch_size,
5760                                     &info);
5761         if (err)
5762                 return err;
5763
5764         /* Now startup the cpu. */
5765         tw32(cpu_base + CPU_STATE, 0xffffffff);
5766         tw32_f(cpu_base + CPU_PC,    info.text_base);
5767
5768         for (i = 0; i < 5; i++) {
5769                 if (tr32(cpu_base + CPU_PC) == info.text_base)
5770                         break;
5771                 tw32(cpu_base + CPU_STATE, 0xffffffff);
5772                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5773                 tw32_f(cpu_base + CPU_PC,    info.text_base);
5774                 udelay(1000);
5775         }
5776         if (i >= 5) {
5777                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5778                        "to set CPU PC, is %08x should be %08x\n",
5779                        tp->dev->name, tr32(cpu_base + CPU_PC),
5780                        info.text_base);
5781                 return -ENODEV;
5782         }
5783         tw32(cpu_base + CPU_STATE, 0xffffffff);
5784         tw32_f(cpu_base + CPU_MODE,  0x00000000);
5785         return 0;
5786 }
5787
5788 #endif /* TG3_TSO_SUPPORT != 0 */
5789
5790 /* tp->lock is held. */
5791 static void __tg3_set_mac_addr(struct tg3 *tp)
5792 {
5793         u32 addr_high, addr_low;
5794         int i;
5795
5796         addr_high = ((tp->dev->dev_addr[0] << 8) |
5797                      tp->dev->dev_addr[1]);
5798         addr_low = ((tp->dev->dev_addr[2] << 24) |
5799                     (tp->dev->dev_addr[3] << 16) |
5800                     (tp->dev->dev_addr[4] <<  8) |
5801                     (tp->dev->dev_addr[5] <<  0));
5802         for (i = 0; i < 4; i++) {
5803                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5804                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5805         }
5806
5807         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5808             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5809                 for (i = 0; i < 12; i++) {
5810                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5811                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5812                 }
5813         }
5814
5815         addr_high = (tp->dev->dev_addr[0] +
5816                      tp->dev->dev_addr[1] +
5817                      tp->dev->dev_addr[2] +
5818                      tp->dev->dev_addr[3] +
5819                      tp->dev->dev_addr[4] +
5820                      tp->dev->dev_addr[5]) &
5821                 TX_BACKOFF_SEED_MASK;
5822         tw32(MAC_TX_BACKOFF_SEED, addr_high);
5823 }
5824
5825 static int tg3_set_mac_addr(struct net_device *dev, void *p)
5826 {
5827         struct tg3 *tp = netdev_priv(dev);
5828         struct sockaddr *addr = p;
5829         int err = 0;
5830
5831         if (!is_valid_ether_addr(addr->sa_data))
5832                 return -EINVAL;
5833
5834         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5835
5836         if (!netif_running(dev))
5837                 return 0;
5838
5839         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5840                 /* Reset chip so that ASF can re-init any MAC addresses it
5841                  * needs.
5842                  */
5843                 tg3_netif_stop(tp);
5844                 tg3_full_lock(tp, 1);
5845
5846                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5847                 err = tg3_restart_hw(tp, 0);
5848                 if (!err)
5849                         tg3_netif_start(tp);
5850                 tg3_full_unlock(tp);
5851         } else {
5852                 spin_lock_bh(&tp->lock);
5853                 __tg3_set_mac_addr(tp);
5854                 spin_unlock_bh(&tp->lock);
5855         }
5856
5857         return err;
5858 }
5859
5860 /* tp->lock is held. */
5861 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5862                            dma_addr_t mapping, u32 maxlen_flags,
5863                            u32 nic_addr)
5864 {
5865         tg3_write_mem(tp,
5866                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5867                       ((u64) mapping >> 32));
5868         tg3_write_mem(tp,
5869                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
5870                       ((u64) mapping & 0xffffffff));
5871         tg3_write_mem(tp,
5872                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
5873                        maxlen_flags);
5874
5875         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5876                 tg3_write_mem(tp,
5877                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
5878                               nic_addr);
5879 }
5880
5881 static void __tg3_set_rx_mode(struct net_device *);
5882 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
5883 {
5884         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
5885         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
5886         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
5887         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
5888         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5889                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
5890                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
5891         }
5892         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
5893         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
5894         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5895                 u32 val = ec->stats_block_coalesce_usecs;
5896
5897                 if (!netif_carrier_ok(tp->dev))
5898                         val = 0;
5899
5900                 tw32(HOSTCC_STAT_COAL_TICKS, val);
5901         }
5902 }
5903
5904 /* tp->lock is held. */
5905 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
5906 {
5907         u32 val, rdmac_mode;
5908         int i, err, limit;
5909
5910         tg3_disable_ints(tp);
5911
5912         tg3_stop_fw(tp);
5913
5914         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
5915
5916         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
5917                 tg3_abort_hw(tp, 1);
5918         }
5919
5920         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
5921                 tg3_phy_reset(tp);
5922
5923         err = tg3_chip_reset(tp);
5924         if (err)
5925                 return err;
5926
5927         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
5928
5929         /* This works around an issue with Athlon chipsets on
5930          * B3 tigon3 silicon.  This bit has no effect on any
5931          * other revision.  But do not set this on PCI Express
5932          * chips.
5933          */
5934         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
5935                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5936         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5937
5938         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5939             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5940                 val = tr32(TG3PCI_PCISTATE);
5941                 val |= PCISTATE_RETRY_SAME_DMA;
5942                 tw32(TG3PCI_PCISTATE, val);
5943         }
5944
5945         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5946                 /* Enable some hw fixes.  */
5947                 val = tr32(TG3PCI_MSI_DATA);
5948                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5949                 tw32(TG3PCI_MSI_DATA, val);
5950         }
5951
5952         /* Descriptor ring init may make accesses to the
5953          * NIC SRAM area to setup the TX descriptors, so we
5954          * can only do this after the hardware has been
5955          * successfully reset.
5956          */
5957         err = tg3_init_rings(tp);
5958         if (err)
5959                 return err;
5960
5961         /* This value is determined during the probe time DMA
5962          * engine test, tg3_test_dma.
5963          */
5964         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5965
5966         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5967                           GRC_MODE_4X_NIC_SEND_RINGS |
5968                           GRC_MODE_NO_TX_PHDR_CSUM |
5969                           GRC_MODE_NO_RX_PHDR_CSUM);
5970         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5971
5972         /* Pseudo-header checksum is done by hardware logic and not
5973          * the offload processers, so make the chip do the pseudo-
5974          * header checksums on receive.  For transmit it is more
5975          * convenient to do the pseudo-header checksum in software
5976          * as Linux does that on transmit for us in all cases.
5977          */
5978         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5979
5980         tw32(GRC_MODE,
5981              tp->grc_mode |
5982              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5983
5984         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5985         val = tr32(GRC_MISC_CFG);
5986         val &= ~0xff;
5987         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5988         tw32(GRC_MISC_CFG, val);
5989
5990         /* Initialize MBUF/DESC pool. */
5991         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
5992                 /* Do nothing.  */
5993         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5994                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5995                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5996                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5997                 else
5998                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5999                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6000                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6001         }
6002 #if TG3_TSO_SUPPORT != 0
6003         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6004                 int fw_len;
6005
6006                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6007                           TG3_TSO5_FW_RODATA_LEN +
6008                           TG3_TSO5_FW_DATA_LEN +
6009                           TG3_TSO5_FW_SBSS_LEN +
6010                           TG3_TSO5_FW_BSS_LEN);
6011                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6012                 tw32(BUFMGR_MB_POOL_ADDR,
6013                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6014                 tw32(BUFMGR_MB_POOL_SIZE,
6015                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6016         }
6017 #endif
6018
6019         if (tp->dev->mtu <= ETH_DATA_LEN) {
6020                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6021                      tp->bufmgr_config.mbuf_read_dma_low_water);
6022                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6023                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6024                 tw32(BUFMGR_MB_HIGH_WATER,
6025                      tp->bufmgr_config.mbuf_high_water);
6026         } else {
6027                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6028                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6029                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6030                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6031                 tw32(BUFMGR_MB_HIGH_WATER,
6032                      tp->bufmgr_config.mbuf_high_water_jumbo);
6033         }
6034         tw32(BUFMGR_DMA_LOW_WATER,
6035              tp->bufmgr_config.dma_low_water);
6036         tw32(BUFMGR_DMA_HIGH_WATER,
6037              tp->bufmgr_config.dma_high_water);
6038
6039         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6040         for (i = 0; i < 2000; i++) {
6041                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6042                         break;
6043                 udelay(10);
6044         }
6045         if (i >= 2000) {
6046                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6047                        tp->dev->name);
6048                 return -ENODEV;
6049         }
6050
6051         /* Setup replenish threshold. */
6052         val = tp->rx_pending / 8;
6053         if (val == 0)
6054                 val = 1;
6055         else if (val > tp->rx_std_max_post)
6056                 val = tp->rx_std_max_post;
6057
6058         tw32(RCVBDI_STD_THRESH, val);
6059
6060         /* Initialize TG3_BDINFO's at:
6061          *  RCVDBDI_STD_BD:     standard eth size rx ring
6062          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6063          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6064          *
6065          * like so:
6066          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6067          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6068          *                              ring attribute flags
6069          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6070          *
6071          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6072          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6073          *
6074          * The size of each ring is fixed in the firmware, but the location is
6075          * configurable.
6076          */
6077         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6078              ((u64) tp->rx_std_mapping >> 32));
6079         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6080              ((u64) tp->rx_std_mapping & 0xffffffff));
6081         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6082              NIC_SRAM_RX_BUFFER_DESC);
6083
6084         /* Don't even try to program the JUMBO/MINI buffer descriptor
6085          * configs on 5705.
6086          */
6087         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6088                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6089                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6090         } else {
6091                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6092                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6093
6094                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6095                      BDINFO_FLAGS_DISABLED);
6096
6097                 /* Setup replenish threshold. */
6098                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6099
6100                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6101                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6102                              ((u64) tp->rx_jumbo_mapping >> 32));
6103                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6104                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6105                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6106                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6107                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6108                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6109                 } else {
6110                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6111                              BDINFO_FLAGS_DISABLED);
6112                 }
6113
6114         }
6115
6116         /* There is only one send ring on 5705/5750, no need to explicitly
6117          * disable the others.
6118          */
6119         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6120                 /* Clear out send RCB ring in SRAM. */
6121                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6122                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6123                                       BDINFO_FLAGS_DISABLED);
6124         }
6125
6126         tp->tx_prod = 0;
6127         tp->tx_cons = 0;
6128         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6129         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6130
6131         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6132                        tp->tx_desc_mapping,
6133                        (TG3_TX_RING_SIZE <<
6134                         BDINFO_FLAGS_MAXLEN_SHIFT),
6135                        NIC_SRAM_TX_BUFFER_DESC);
6136
6137         /* There is only one receive return ring on 5705/5750, no need
6138          * to explicitly disable the others.
6139          */
6140         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6141                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6142                      i += TG3_BDINFO_SIZE) {
6143                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6144                                       BDINFO_FLAGS_DISABLED);
6145                 }
6146         }
6147
6148         tp->rx_rcb_ptr = 0;
6149         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6150
6151         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6152                        tp->rx_rcb_mapping,
6153                        (TG3_RX_RCB_RING_SIZE(tp) <<
6154                         BDINFO_FLAGS_MAXLEN_SHIFT),
6155                        0);
6156
6157         tp->rx_std_ptr = tp->rx_pending;
6158         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6159                      tp->rx_std_ptr);
6160
6161         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6162                                                 tp->rx_jumbo_pending : 0;
6163         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6164                      tp->rx_jumbo_ptr);
6165
6166         /* Initialize MAC address and backoff seed. */
6167         __tg3_set_mac_addr(tp);
6168
6169         /* MTU + ethernet header + FCS + optional VLAN tag */
6170         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6171
6172         /* The slot time is changed by tg3_setup_phy if we
6173          * run at gigabit with half duplex.
6174          */
6175         tw32(MAC_TX_LENGTHS,
6176              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6177              (6 << TX_LENGTHS_IPG_SHIFT) |
6178              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6179
6180         /* Receive rules. */
6181         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6182         tw32(RCVLPC_CONFIG, 0x0181);
6183
6184         /* Calculate RDMAC_MODE setting early, we need it to determine
6185          * the RCVLPC_STATE_ENABLE mask.
6186          */
6187         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6188                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6189                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6190                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6191                       RDMAC_MODE_LNGREAD_ENAB);
6192         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6193                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
6194
6195         /* If statement applies to 5705 and 5750 PCI devices only */
6196         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6197              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6198             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6199                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6200                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6201                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6202                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6203                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6204                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6205                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6206                 }
6207         }
6208
6209         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6210                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6211
6212 #if TG3_TSO_SUPPORT != 0
6213         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6214                 rdmac_mode |= (1 << 27);
6215 #endif
6216
6217         /* Receive/send statistics. */
6218         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6219                 val = tr32(RCVLPC_STATS_ENABLE);
6220                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6221                 tw32(RCVLPC_STATS_ENABLE, val);
6222         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6223                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6224                 val = tr32(RCVLPC_STATS_ENABLE);
6225                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6226                 tw32(RCVLPC_STATS_ENABLE, val);
6227         } else {
6228                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6229         }
6230         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6231         tw32(SNDDATAI_STATSENAB, 0xffffff);
6232         tw32(SNDDATAI_STATSCTRL,
6233              (SNDDATAI_SCTRL_ENABLE |
6234               SNDDATAI_SCTRL_FASTUPD));
6235
6236         /* Setup host coalescing engine. */
6237         tw32(HOSTCC_MODE, 0);
6238         for (i = 0; i < 2000; i++) {
6239                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6240                         break;
6241                 udelay(10);
6242         }
6243
6244         __tg3_set_coalesce(tp, &tp->coal);
6245
6246         /* set status block DMA address */
6247         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6248              ((u64) tp->status_mapping >> 32));
6249         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6250              ((u64) tp->status_mapping & 0xffffffff));
6251
6252         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6253                 /* Status/statistics block address.  See tg3_timer,
6254                  * the tg3_periodic_fetch_stats call there, and
6255                  * tg3_get_stats to see how this works for 5705/5750 chips.
6256                  */
6257                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6258                      ((u64) tp->stats_mapping >> 32));
6259                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6260                      ((u64) tp->stats_mapping & 0xffffffff));
6261                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6262                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6263         }
6264
6265         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6266
6267         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6268         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6269         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6270                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6271
6272         /* Clear statistics/status block in chip, and status block in ram. */
6273         for (i = NIC_SRAM_STATS_BLK;
6274              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6275              i += sizeof(u32)) {
6276                 tg3_write_mem(tp, i, 0);
6277                 udelay(40);
6278         }
6279         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6280
6281         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6282                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6283                 /* reset to prevent losing 1st rx packet intermittently */
6284                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6285                 udelay(10);
6286         }
6287
6288         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6289                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6290         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6291         udelay(40);
6292
6293         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6294          * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6295          * register to preserve the GPIO settings for LOMs. The GPIOs,
6296          * whether used as inputs or outputs, are set by boot code after
6297          * reset.
6298          */
6299         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
6300                 u32 gpio_mask;
6301
6302                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE2 |
6303                             GRC_LCLCTRL_GPIO_OUTPUT0 | GRC_LCLCTRL_GPIO_OUTPUT2;
6304
6305                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6306                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6307                                      GRC_LCLCTRL_GPIO_OUTPUT3;
6308
6309                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6310                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6311
6312                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6313
6314                 /* GPIO1 must be driven high for eeprom write protect */
6315                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6316                                        GRC_LCLCTRL_GPIO_OUTPUT1);
6317         }
6318         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6319         udelay(100);
6320
6321         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6322         tp->last_tag = 0;
6323
6324         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6325                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6326                 udelay(40);
6327         }
6328
6329         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6330                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6331                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6332                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6333                WDMAC_MODE_LNGREAD_ENAB);
6334
6335         /* If statement applies to 5705 and 5750 PCI devices only */
6336         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6337              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6338             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6339                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6340                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6341                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6342                         /* nothing */
6343                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6344                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6345                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6346                         val |= WDMAC_MODE_RX_ACCEL;
6347                 }
6348         }
6349
6350         /* Enable host coalescing bug fix */
6351         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6352             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6353                 val |= (1 << 29);
6354
6355         tw32_f(WDMAC_MODE, val);
6356         udelay(40);
6357
6358         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6359                 val = tr32(TG3PCI_X_CAPS);
6360                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6361                         val &= ~PCIX_CAPS_BURST_MASK;
6362                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6363                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6364                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6365                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6366                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
6367                                 val |= (tp->split_mode_max_reqs <<
6368                                         PCIX_CAPS_SPLIT_SHIFT);
6369                 }
6370                 tw32(TG3PCI_X_CAPS, val);
6371         }
6372
6373         tw32_f(RDMAC_MODE, rdmac_mode);
6374         udelay(40);
6375
6376         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6377         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6378                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6379         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6380         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6381         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6382         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6383         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6384 #if TG3_TSO_SUPPORT != 0
6385         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6386                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6387 #endif
6388         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6389         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6390
6391         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6392                 err = tg3_load_5701_a0_firmware_fix(tp);
6393                 if (err)
6394                         return err;
6395         }
6396
6397 #if TG3_TSO_SUPPORT != 0
6398         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6399                 err = tg3_load_tso_firmware(tp);
6400                 if (err)
6401                         return err;
6402         }
6403 #endif
6404
6405         tp->tx_mode = TX_MODE_ENABLE;
6406         tw32_f(MAC_TX_MODE, tp->tx_mode);
6407         udelay(100);
6408
6409         tp->rx_mode = RX_MODE_ENABLE;
6410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6411                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6412
6413         tw32_f(MAC_RX_MODE, tp->rx_mode);
6414         udelay(10);
6415
6416         if (tp->link_config.phy_is_low_power) {
6417                 tp->link_config.phy_is_low_power = 0;
6418                 tp->link_config.speed = tp->link_config.orig_speed;
6419                 tp->link_config.duplex = tp->link_config.orig_duplex;
6420                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6421         }
6422
6423         tp->mi_mode = MAC_MI_MODE_BASE;
6424         tw32_f(MAC_MI_MODE, tp->mi_mode);
6425         udelay(80);
6426
6427         tw32(MAC_LED_CTRL, tp->led_ctrl);
6428
6429         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6430         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6431                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6432                 udelay(10);
6433         }
6434         tw32_f(MAC_RX_MODE, tp->rx_mode);
6435         udelay(10);
6436
6437         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6438                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6439                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6440                         /* Set drive transmission level to 1.2V  */
6441                         /* only if the signal pre-emphasis bit is not set  */
6442                         val = tr32(MAC_SERDES_CFG);
6443                         val &= 0xfffff000;
6444                         val |= 0x880;
6445                         tw32(MAC_SERDES_CFG, val);
6446                 }
6447                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6448                         tw32(MAC_SERDES_CFG, 0x616000);
6449         }
6450
6451         /* Prevent chip from dropping frames when flow control
6452          * is enabled.
6453          */
6454         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6455
6456         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6457             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6458                 /* Use hardware link auto-negotiation */
6459                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6460         }
6461
6462         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6463             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6464                 u32 tmp;
6465
6466                 tmp = tr32(SERDES_RX_CTRL);
6467                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6468                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6469                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6470                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6471         }
6472
6473         err = tg3_setup_phy(tp, reset_phy);
6474         if (err)
6475                 return err;
6476
6477         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6478                 u32 tmp;
6479
6480                 /* Clear CRC stats. */
6481                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
6482                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
6483                         tg3_readphy(tp, 0x14, &tmp);
6484                 }
6485         }
6486
6487         __tg3_set_rx_mode(tp->dev);
6488
6489         /* Initialize receive rules. */
6490         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6491         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6492         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6493         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6494
6495         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6496             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6497                 limit = 8;
6498         else
6499                 limit = 16;
6500         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6501                 limit -= 4;
6502         switch (limit) {
6503         case 16:
6504                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6505         case 15:
6506                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6507         case 14:
6508                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6509         case 13:
6510                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6511         case 12:
6512                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6513         case 11:
6514                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6515         case 10:
6516                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6517         case 9:
6518                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6519         case 8:
6520                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6521         case 7:
6522                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6523         case 6:
6524                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6525         case 5:
6526                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6527         case 4:
6528                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6529         case 3:
6530                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6531         case 2:
6532         case 1:
6533
6534         default:
6535                 break;
6536         };
6537
6538         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6539
6540         return 0;
6541 }
6542
6543 /* Called at device open time to get the chip ready for
6544  * packet processing.  Invoked with tp->lock held.
6545  */
6546 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6547 {
6548         int err;
6549
6550         /* Force the chip into D0. */
6551         err = tg3_set_power_state(tp, PCI_D0);
6552         if (err)
6553                 goto out;
6554
6555         tg3_switch_clocks(tp);
6556
6557         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6558
6559         err = tg3_reset_hw(tp, reset_phy);
6560
6561 out:
6562         return err;
6563 }
6564
6565 #define TG3_STAT_ADD32(PSTAT, REG) \
6566 do {    u32 __val = tr32(REG); \
6567         (PSTAT)->low += __val; \
6568         if ((PSTAT)->low < __val) \
6569                 (PSTAT)->high += 1; \
6570 } while (0)
6571
6572 static void tg3_periodic_fetch_stats(struct tg3 *tp)
6573 {
6574         struct tg3_hw_stats *sp = tp->hw_stats;
6575
6576         if (!netif_carrier_ok(tp->dev))
6577                 return;
6578
6579         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6580         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6581         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6582         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6583         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6584         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6585         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6586         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6587         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6588         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6589         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6590         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6591         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6592
6593         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6594         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6595         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6596         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6597         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6598         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6599         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6600         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6601         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6602         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6603         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6604         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6605         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6606         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6607
6608         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6609         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6610         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6611 }
6612
6613 static void tg3_timer(unsigned long __opaque)
6614 {
6615         struct tg3 *tp = (struct tg3 *) __opaque;
6616
6617         if (tp->irq_sync)
6618                 goto restart_timer;
6619
6620         spin_lock(&tp->lock);
6621
6622         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6623                 /* All of this garbage is because when using non-tagged
6624                  * IRQ status the mailbox/status_block protocol the chip
6625                  * uses with the cpu is race prone.
6626                  */
6627                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
6628                         tw32(GRC_LOCAL_CTRL,
6629                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6630                 } else {
6631                         tw32(HOSTCC_MODE, tp->coalesce_mode |
6632                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6633                 }
6634
6635                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6636                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6637                         spin_unlock(&tp->lock);
6638                         schedule_work(&tp->reset_task);
6639                         return;
6640                 }
6641         }
6642
6643         /* This part only runs once per second. */
6644         if (!--tp->timer_counter) {
6645                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6646                         tg3_periodic_fetch_stats(tp);
6647
6648                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6649                         u32 mac_stat;
6650                         int phy_event;
6651
6652                         mac_stat = tr32(MAC_STATUS);
6653
6654                         phy_event = 0;
6655                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6656                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6657                                         phy_event = 1;
6658                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6659                                 phy_event = 1;
6660
6661                         if (phy_event)
6662                                 tg3_setup_phy(tp, 0);
6663                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6664                         u32 mac_stat = tr32(MAC_STATUS);
6665                         int need_setup = 0;
6666
6667                         if (netif_carrier_ok(tp->dev) &&
6668                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6669                                 need_setup = 1;
6670                         }
6671                         if (! netif_carrier_ok(tp->dev) &&
6672                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
6673                                          MAC_STATUS_SIGNAL_DET))) {
6674                                 need_setup = 1;
6675                         }
6676                         if (need_setup) {
6677                                 if (!tp->serdes_counter) {
6678                                         tw32_f(MAC_MODE,
6679                                              (tp->mac_mode &
6680                                               ~MAC_MODE_PORT_MODE_MASK));
6681                                         udelay(40);
6682                                         tw32_f(MAC_MODE, tp->mac_mode);
6683                                         udelay(40);
6684                                 }
6685                                 tg3_setup_phy(tp, 0);
6686                         }
6687                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6688                         tg3_serdes_parallel_detect(tp);
6689
6690                 tp->timer_counter = tp->timer_multiplier;
6691         }
6692
6693         /* Heartbeat is only sent once every 2 seconds.
6694          *
6695          * The heartbeat is to tell the ASF firmware that the host
6696          * driver is still alive.  In the event that the OS crashes,
6697          * ASF needs to reset the hardware to free up the FIFO space
6698          * that may be filled with rx packets destined for the host.
6699          * If the FIFO is full, ASF will no longer function properly.
6700          *
6701          * Unintended resets have been reported on real time kernels
6702          * where the timer doesn't run on time.  Netpoll will also have
6703          * same problem.
6704          *
6705          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6706          * to check the ring condition when the heartbeat is expiring
6707          * before doing the reset.  This will prevent most unintended
6708          * resets.
6709          */
6710         if (!--tp->asf_counter) {
6711                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6712                         u32 val;
6713
6714                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6715                                       FWCMD_NICDRV_ALIVE3);
6716                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6717                         /* 5 seconds timeout */
6718                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6719                         val = tr32(GRC_RX_CPU_EVENT);
6720                         val |= (1 << 14);
6721                         tw32(GRC_RX_CPU_EVENT, val);
6722                 }
6723                 tp->asf_counter = tp->asf_multiplier;
6724         }
6725
6726         spin_unlock(&tp->lock);
6727
6728 restart_timer:
6729         tp->timer.expires = jiffies + tp->timer_offset;
6730         add_timer(&tp->timer);
6731 }
6732
6733 static int tg3_request_irq(struct tg3 *tp)
6734 {
6735         irqreturn_t (*fn)(int, void *, struct pt_regs *);
6736         unsigned long flags;
6737         struct net_device *dev = tp->dev;
6738
6739         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6740                 fn = tg3_msi;
6741                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6742                         fn = tg3_msi_1shot;
6743                 flags = IRQF_SAMPLE_RANDOM;
6744         } else {
6745                 fn = tg3_interrupt;
6746                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6747                         fn = tg3_interrupt_tagged;
6748                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6749         }
6750         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6751 }
6752
6753 static int tg3_test_interrupt(struct tg3 *tp)
6754 {
6755         struct net_device *dev = tp->dev;
6756         int err, i;
6757         u32 int_mbox = 0;
6758
6759         if (!netif_running(dev))
6760                 return -ENODEV;
6761
6762         tg3_disable_ints(tp);
6763
6764         free_irq(tp->pdev->irq, dev);
6765
6766         err = request_irq(tp->pdev->irq, tg3_test_isr,
6767                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6768         if (err)
6769                 return err;
6770
6771         tp->hw_status->status &= ~SD_STATUS_UPDATED;
6772         tg3_enable_ints(tp);
6773
6774         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6775                HOSTCC_MODE_NOW);
6776
6777         for (i = 0; i < 5; i++) {
6778                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6779                                         TG3_64BIT_REG_LOW);
6780                 if (int_mbox != 0)
6781                         break;
6782                 msleep(10);
6783         }
6784
6785         tg3_disable_ints(tp);
6786
6787         free_irq(tp->pdev->irq, dev);
6788
6789         err = tg3_request_irq(tp);
6790
6791         if (err)
6792                 return err;
6793
6794         if (int_mbox != 0)
6795                 return 0;
6796
6797         return -EIO;
6798 }
6799
6800 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6801  * successfully restored
6802  */
6803 static int tg3_test_msi(struct tg3 *tp)
6804 {
6805         struct net_device *dev = tp->dev;
6806         int err;
6807         u16 pci_cmd;
6808
6809         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6810                 return 0;
6811
6812         /* Turn off SERR reporting in case MSI terminates with Master
6813          * Abort.
6814          */
6815         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6816         pci_write_config_word(tp->pdev, PCI_COMMAND,
6817                               pci_cmd & ~PCI_COMMAND_SERR);
6818
6819         err = tg3_test_interrupt(tp);
6820
6821         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6822
6823         if (!err)
6824                 return 0;
6825
6826         /* other failures */
6827         if (err != -EIO)
6828                 return err;
6829
6830         /* MSI test failed, go back to INTx mode */
6831         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6832                "switching to INTx mode. Please report this failure to "
6833                "the PCI maintainer and include system chipset information.\n",
6834                        tp->dev->name);
6835
6836         free_irq(tp->pdev->irq, dev);
6837         pci_disable_msi(tp->pdev);
6838
6839         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6840
6841         err = tg3_request_irq(tp);
6842         if (err)
6843                 return err;
6844
6845         /* Need to reset the chip because the MSI cycle may have terminated
6846          * with Master Abort.
6847          */
6848         tg3_full_lock(tp, 1);
6849
6850         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6851         err = tg3_init_hw(tp, 1);
6852
6853         tg3_full_unlock(tp);
6854
6855         if (err)
6856                 free_irq(tp->pdev->irq, dev);
6857
6858         return err;
6859 }
6860
6861 static int tg3_open(struct net_device *dev)
6862 {
6863         struct tg3 *tp = netdev_priv(dev);
6864         int err;
6865
6866         tg3_full_lock(tp, 0);
6867
6868         err = tg3_set_power_state(tp, PCI_D0);
6869         if (err)
6870                 return err;
6871
6872         tg3_disable_ints(tp);
6873         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
6874
6875         tg3_full_unlock(tp);
6876
6877         /* The placement of this call is tied
6878          * to the setup and use of Host TX descriptors.
6879          */
6880         err = tg3_alloc_consistent(tp);
6881         if (err)
6882                 return err;
6883
6884         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
6885             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
6886             (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
6887             !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
6888               (tp->pdev_peer == tp->pdev))) {
6889                 /* All MSI supporting chips should support tagged
6890                  * status.  Assert that this is the case.
6891                  */
6892                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6893                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
6894                                "Not using MSI.\n", tp->dev->name);
6895                 } else if (pci_enable_msi(tp->pdev) == 0) {
6896                         u32 msi_mode;
6897
6898                         msi_mode = tr32(MSGINT_MODE);
6899                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
6900                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
6901                 }
6902         }
6903         err = tg3_request_irq(tp);
6904
6905         if (err) {
6906                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6907                         pci_disable_msi(tp->pdev);
6908                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6909                 }
6910                 tg3_free_consistent(tp);
6911                 return err;
6912         }
6913
6914         tg3_full_lock(tp, 0);
6915
6916         err = tg3_init_hw(tp, 1);
6917         if (err) {
6918                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6919                 tg3_free_rings(tp);
6920         } else {
6921                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6922                         tp->timer_offset = HZ;
6923                 else
6924                         tp->timer_offset = HZ / 10;
6925
6926                 BUG_ON(tp->timer_offset > HZ);
6927                 tp->timer_counter = tp->timer_multiplier =
6928                         (HZ / tp->timer_offset);
6929                 tp->asf_counter = tp->asf_multiplier =
6930                         ((HZ / tp->timer_offset) * 2);
6931
6932                 init_timer(&tp->timer);
6933                 tp->timer.expires = jiffies + tp->timer_offset;
6934                 tp->timer.data = (unsigned long) tp;
6935                 tp->timer.function = tg3_timer;
6936         }
6937
6938         tg3_full_unlock(tp);
6939
6940         if (err) {
6941                 free_irq(tp->pdev->irq, dev);
6942                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6943                         pci_disable_msi(tp->pdev);
6944                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6945                 }
6946                 tg3_free_consistent(tp);
6947                 return err;
6948         }
6949
6950         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6951                 err = tg3_test_msi(tp);
6952
6953                 if (err) {
6954                         tg3_full_lock(tp, 0);
6955
6956                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6957                                 pci_disable_msi(tp->pdev);
6958                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6959                         }
6960                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6961                         tg3_free_rings(tp);
6962                         tg3_free_consistent(tp);
6963
6964                         tg3_full_unlock(tp);
6965
6966                         return err;
6967                 }
6968
6969                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6970                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
6971                                 u32 val = tr32(0x7c04);
6972
6973                                 tw32(0x7c04, val | (1 << 29));
6974                         }
6975                 }
6976         }
6977
6978         tg3_full_lock(tp, 0);
6979
6980         add_timer(&tp->timer);
6981         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
6982         tg3_enable_ints(tp);
6983
6984         tg3_full_unlock(tp);
6985
6986         netif_start_queue(dev);
6987
6988         return 0;
6989 }
6990
6991 #if 0
6992 /*static*/ void tg3_dump_state(struct tg3 *tp)
6993 {
6994         u32 val32, val32_2, val32_3, val32_4, val32_5;
6995         u16 val16;
6996         int i;
6997
6998         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
6999         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7000         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7001                val16, val32);
7002
7003         /* MAC block */
7004         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7005                tr32(MAC_MODE), tr32(MAC_STATUS));
7006         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7007                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7008         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7009                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7010         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7011                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7012
7013         /* Send data initiator control block */
7014         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7015                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7016         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7017                tr32(SNDDATAI_STATSCTRL));
7018
7019         /* Send data completion control block */
7020         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7021
7022         /* Send BD ring selector block */
7023         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7024                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7025
7026         /* Send BD initiator control block */
7027         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7028                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7029
7030         /* Send BD completion control block */
7031         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7032
7033         /* Receive list placement control block */
7034         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7035                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7036         printk("       RCVLPC_STATSCTRL[%08x]\n",
7037                tr32(RCVLPC_STATSCTRL));
7038
7039         /* Receive data and receive BD initiator control block */
7040         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7041                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7042
7043         /* Receive data completion control block */
7044         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7045                tr32(RCVDCC_MODE));
7046
7047         /* Receive BD initiator control block */
7048         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7049                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7050
7051         /* Receive BD completion control block */
7052         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7053                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7054
7055         /* Receive list selector control block */
7056         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7057                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7058
7059         /* Mbuf cluster free block */
7060         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7061                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7062
7063         /* Host coalescing control block */
7064         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7065                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7066         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7067                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7068                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7069         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7070                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7071                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7072         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7073                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7074         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7075                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7076
7077         /* Memory arbiter control block */
7078         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7079                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7080
7081         /* Buffer manager control block */
7082         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7083                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7084         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7085                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7086         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7087                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7088                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7089                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7090
7091         /* Read DMA control block */
7092         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7093                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7094
7095         /* Write DMA control block */
7096         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7097                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7098
7099         /* DMA completion block */
7100         printk("DEBUG: DMAC_MODE[%08x]\n",
7101                tr32(DMAC_MODE));
7102
7103         /* GRC block */
7104         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7105                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7106         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7107                tr32(GRC_LOCAL_CTRL));
7108
7109         /* TG3_BDINFOs */
7110         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7111                tr32(RCVDBDI_JUMBO_BD + 0x0),
7112                tr32(RCVDBDI_JUMBO_BD + 0x4),
7113                tr32(RCVDBDI_JUMBO_BD + 0x8),
7114                tr32(RCVDBDI_JUMBO_BD + 0xc));
7115         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7116                tr32(RCVDBDI_STD_BD + 0x0),
7117                tr32(RCVDBDI_STD_BD + 0x4),
7118                tr32(RCVDBDI_STD_BD + 0x8),
7119                tr32(RCVDBDI_STD_BD + 0xc));
7120         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7121                tr32(RCVDBDI_MINI_BD + 0x0),
7122                tr32(RCVDBDI_MINI_BD + 0x4),
7123                tr32(RCVDBDI_MINI_BD + 0x8),
7124                tr32(RCVDBDI_MINI_BD + 0xc));
7125
7126         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7127         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7128         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7129         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7130         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7131                val32, val32_2, val32_3, val32_4);
7132
7133         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7134         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7135         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7136         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7137         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7138                val32, val32_2, val32_3, val32_4);
7139
7140         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7141         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7142         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7143         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7144         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7145         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7146                val32, val32_2, val32_3, val32_4, val32_5);
7147
7148         /* SW status block */
7149         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7150                tp->hw_status->status,
7151                tp->hw_status->status_tag,
7152                tp->hw_status->rx_jumbo_consumer,
7153                tp->hw_status->rx_consumer,
7154                tp->hw_status->rx_mini_consumer,
7155                tp->hw_status->idx[0].rx_producer,
7156                tp->hw_status->idx[0].tx_consumer);
7157
7158         /* SW statistics block */
7159         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7160                ((u32 *)tp->hw_stats)[0],
7161                ((u32 *)tp->hw_stats)[1],
7162                ((u32 *)tp->hw_stats)[2],
7163                ((u32 *)tp->hw_stats)[3]);
7164
7165         /* Mailboxes */
7166         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7167                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7168                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7169                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7170                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7171
7172         /* NIC side send descriptors. */
7173         for (i = 0; i < 6; i++) {
7174                 unsigned long txd;
7175
7176                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7177                         + (i * sizeof(struct tg3_tx_buffer_desc));
7178                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7179                        i,
7180                        readl(txd + 0x0), readl(txd + 0x4),
7181                        readl(txd + 0x8), readl(txd + 0xc));
7182         }
7183
7184         /* NIC side RX descriptors. */
7185         for (i = 0; i < 6; i++) {
7186                 unsigned long rxd;
7187
7188                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7189                         + (i * sizeof(struct tg3_rx_buffer_desc));
7190                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7191                        i,
7192                        readl(rxd + 0x0), readl(rxd + 0x4),
7193                        readl(rxd + 0x8), readl(rxd + 0xc));
7194                 rxd += (4 * sizeof(u32));
7195                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7196                        i,
7197                        readl(rxd + 0x0), readl(rxd + 0x4),
7198                        readl(rxd + 0x8), readl(rxd + 0xc));
7199         }
7200
7201         for (i = 0; i < 6; i++) {
7202                 unsigned long rxd;
7203
7204                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7205                         + (i * sizeof(struct tg3_rx_buffer_desc));
7206                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7207                        i,
7208                        readl(rxd + 0x0), readl(rxd + 0x4),
7209                        readl(rxd + 0x8), readl(rxd + 0xc));
7210                 rxd += (4 * sizeof(u32));
7211                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7212                        i,
7213                        readl(rxd + 0x0), readl(rxd + 0x4),
7214                        readl(rxd + 0x8), readl(rxd + 0xc));
7215         }
7216 }
7217 #endif
7218
7219 static struct net_device_stats *tg3_get_stats(struct net_device *);
7220 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7221
7222 static int tg3_close(struct net_device *dev)
7223 {
7224         struct tg3 *tp = netdev_priv(dev);
7225
7226         /* Calling flush_scheduled_work() may deadlock because
7227          * linkwatch_event() may be on the workqueue and it will try to get
7228          * the rtnl_lock which we are holding.
7229          */
7230         while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
7231                 msleep(1);
7232
7233         netif_stop_queue(dev);
7234
7235         del_timer_sync(&tp->timer);
7236
7237         tg3_full_lock(tp, 1);
7238 #if 0
7239         tg3_dump_state(tp);
7240 #endif
7241
7242         tg3_disable_ints(tp);
7243
7244         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7245         tg3_free_rings(tp);
7246         tp->tg3_flags &=
7247                 ~(TG3_FLAG_INIT_COMPLETE |
7248                   TG3_FLAG_GOT_SERDES_FLOWCTL);
7249
7250         tg3_full_unlock(tp);
7251
7252         free_irq(tp->pdev->irq, dev);
7253         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7254                 pci_disable_msi(tp->pdev);
7255                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7256         }
7257
7258         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7259                sizeof(tp->net_stats_prev));
7260         memcpy(&tp->estats_prev, tg3_get_estats(tp),
7261                sizeof(tp->estats_prev));
7262
7263         tg3_free_consistent(tp);
7264
7265         tg3_set_power_state(tp, PCI_D3hot);
7266
7267         netif_carrier_off(tp->dev);
7268
7269         return 0;
7270 }
7271
7272 static inline unsigned long get_stat64(tg3_stat64_t *val)
7273 {
7274         unsigned long ret;
7275
7276 #if (BITS_PER_LONG == 32)
7277         ret = val->low;
7278 #else
7279         ret = ((u64)val->high << 32) | ((u64)val->low);
7280 #endif
7281         return ret;
7282 }
7283
7284 static unsigned long calc_crc_errors(struct tg3 *tp)
7285 {
7286         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7287
7288         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7289             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7290              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7291                 u32 val;
7292
7293                 spin_lock_bh(&tp->lock);
7294                 if (!tg3_readphy(tp, 0x1e, &val)) {
7295                         tg3_writephy(tp, 0x1e, val | 0x8000);
7296                         tg3_readphy(tp, 0x14, &val);
7297                 } else
7298                         val = 0;
7299                 spin_unlock_bh(&tp->lock);
7300
7301                 tp->phy_crc_errors += val;
7302
7303                 return tp->phy_crc_errors;
7304         }
7305
7306         return get_stat64(&hw_stats->rx_fcs_errors);
7307 }
7308
7309 #define ESTAT_ADD(member) \
7310         estats->member =        old_estats->member + \
7311                                 get_stat64(&hw_stats->member)
7312
7313 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7314 {
7315         struct tg3_ethtool_stats *estats = &tp->estats;
7316         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7317         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7318
7319         if (!hw_stats)
7320                 return old_estats;
7321
7322         ESTAT_ADD(rx_octets);
7323         ESTAT_ADD(rx_fragments);
7324         ESTAT_ADD(rx_ucast_packets);
7325         ESTAT_ADD(rx_mcast_packets);
7326         ESTAT_ADD(rx_bcast_packets);
7327         ESTAT_ADD(rx_fcs_errors);
7328         ESTAT_ADD(rx_align_errors);
7329         ESTAT_ADD(rx_xon_pause_rcvd);
7330         ESTAT_ADD(rx_xoff_pause_rcvd);
7331         ESTAT_ADD(rx_mac_ctrl_rcvd);
7332         ESTAT_ADD(rx_xoff_entered);
7333         ESTAT_ADD(rx_frame_too_long_errors);
7334         ESTAT_ADD(rx_jabbers);
7335         ESTAT_ADD(rx_undersize_packets);
7336         ESTAT_ADD(rx_in_length_errors);
7337         ESTAT_ADD(rx_out_length_errors);
7338         ESTAT_ADD(rx_64_or_less_octet_packets);
7339         ESTAT_ADD(rx_65_to_127_octet_packets);
7340         ESTAT_ADD(rx_128_to_255_octet_packets);
7341         ESTAT_ADD(rx_256_to_511_octet_packets);
7342         ESTAT_ADD(rx_512_to_1023_octet_packets);
7343         ESTAT_ADD(rx_1024_to_1522_octet_packets);
7344         ESTAT_ADD(rx_1523_to_2047_octet_packets);
7345         ESTAT_ADD(rx_2048_to_4095_octet_packets);
7346         ESTAT_ADD(rx_4096_to_8191_octet_packets);
7347         ESTAT_ADD(rx_8192_to_9022_octet_packets);
7348
7349         ESTAT_ADD(tx_octets);
7350         ESTAT_ADD(tx_collisions);
7351         ESTAT_ADD(tx_xon_sent);
7352         ESTAT_ADD(tx_xoff_sent);
7353         ESTAT_ADD(tx_flow_control);
7354         ESTAT_ADD(tx_mac_errors);
7355         ESTAT_ADD(tx_single_collisions);
7356         ESTAT_ADD(tx_mult_collisions);
7357         ESTAT_ADD(tx_deferred);
7358         ESTAT_ADD(tx_excessive_collisions);
7359         ESTAT_ADD(tx_late_collisions);
7360         ESTAT_ADD(tx_collide_2times);
7361         ESTAT_ADD(tx_collide_3times);
7362         ESTAT_ADD(tx_collide_4times);
7363         ESTAT_ADD(tx_collide_5times);
7364         ESTAT_ADD(tx_collide_6times);
7365         ESTAT_ADD(tx_collide_7times);
7366         ESTAT_ADD(tx_collide_8times);
7367         ESTAT_ADD(tx_collide_9times);
7368         ESTAT_ADD(tx_collide_10times);
7369         ESTAT_ADD(tx_collide_11times);
7370         ESTAT_ADD(tx_collide_12times);
7371         ESTAT_ADD(tx_collide_13times);
7372         ESTAT_ADD(tx_collide_14times);
7373         ESTAT_ADD(tx_collide_15times);
7374         ESTAT_ADD(tx_ucast_packets);
7375         ESTAT_ADD(tx_mcast_packets);
7376         ESTAT_ADD(tx_bcast_packets);
7377         ESTAT_ADD(tx_carrier_sense_errors);
7378         ESTAT_ADD(tx_discards);
7379         ESTAT_ADD(tx_errors);
7380
7381         ESTAT_ADD(dma_writeq_full);
7382         ESTAT_ADD(dma_write_prioq_full);
7383         ESTAT_ADD(rxbds_empty);
7384         ESTAT_ADD(rx_discards);
7385         ESTAT_ADD(rx_errors);
7386         ESTAT_ADD(rx_threshold_hit);
7387
7388         ESTAT_ADD(dma_readq_full);
7389         ESTAT_ADD(dma_read_prioq_full);
7390         ESTAT_ADD(tx_comp_queue_full);
7391
7392         ESTAT_ADD(ring_set_send_prod_index);
7393         ESTAT_ADD(ring_status_update);
7394         ESTAT_ADD(nic_irqs);
7395         ESTAT_ADD(nic_avoided_irqs);
7396         ESTAT_ADD(nic_tx_threshold_hit);
7397
7398         return estats;
7399 }
7400
7401 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7402 {
7403         struct tg3 *tp = netdev_priv(dev);
7404         struct net_device_stats *stats = &tp->net_stats;
7405         struct net_device_stats *old_stats = &tp->net_stats_prev;
7406         struct tg3_hw_stats *hw_stats = tp->hw_stats;
7407
7408         if (!hw_stats)
7409                 return old_stats;
7410
7411         stats->rx_packets = old_stats->rx_packets +
7412                 get_stat64(&hw_stats->rx_ucast_packets) +
7413                 get_stat64(&hw_stats->rx_mcast_packets) +
7414                 get_stat64(&hw_stats->rx_bcast_packets);
7415
7416         stats->tx_packets = old_stats->tx_packets +
7417                 get_stat64(&hw_stats->tx_ucast_packets) +
7418                 get_stat64(&hw_stats->tx_mcast_packets) +
7419                 get_stat64(&hw_stats->tx_bcast_packets);
7420
7421         stats->rx_bytes = old_stats->rx_bytes +
7422                 get_stat64(&hw_stats->rx_octets);
7423         stats->tx_bytes = old_stats->tx_bytes +
7424                 get_stat64(&hw_stats->tx_octets);
7425
7426         stats->rx_errors = old_stats->rx_errors +
7427                 get_stat64(&hw_stats->rx_errors);
7428         stats->tx_errors = old_stats->tx_errors +
7429                 get_stat64(&hw_stats->tx_errors) +
7430                 get_stat64(&hw_stats->tx_mac_errors) +
7431                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7432                 get_stat64(&hw_stats->tx_discards);
7433
7434         stats->multicast = old_stats->multicast +
7435                 get_stat64(&hw_stats->rx_mcast_packets);
7436         stats->collisions = old_stats->collisions +
7437                 get_stat64(&hw_stats->tx_collisions);
7438
7439         stats->rx_length_errors = old_stats->rx_length_errors +
7440                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7441                 get_stat64(&hw_stats->rx_undersize_packets);
7442
7443         stats->rx_over_errors = old_stats->rx_over_errors +
7444                 get_stat64(&hw_stats->rxbds_empty);
7445         stats->rx_frame_errors = old_stats->rx_frame_errors +
7446                 get_stat64(&hw_stats->rx_align_errors);
7447         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7448                 get_stat64(&hw_stats->tx_discards);
7449         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7450                 get_stat64(&hw_stats->tx_carrier_sense_errors);
7451
7452         stats->rx_crc_errors = old_stats->rx_crc_errors +
7453                 calc_crc_errors(tp);
7454
7455         stats->rx_missed_errors = old_stats->rx_missed_errors +
7456                 get_stat64(&hw_stats->rx_discards);
7457
7458         return stats;
7459 }
7460
7461 static inline u32 calc_crc(unsigned char *buf, int len)
7462 {
7463         u32 reg;
7464         u32 tmp;
7465         int j, k;
7466
7467         reg = 0xffffffff;
7468
7469         for (j = 0; j < len; j++) {
7470                 reg ^= buf[j];
7471
7472                 for (k = 0; k < 8; k++) {
7473                         tmp = reg & 0x01;
7474
7475                         reg >>= 1;
7476
7477                         if (tmp) {
7478                                 reg ^= 0xedb88320;
7479                         }
7480                 }
7481         }
7482
7483         return ~reg;
7484 }
7485
7486 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7487 {
7488         /* accept or reject all multicast frames */
7489         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7490         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7491         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7492         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7493 }
7494
7495 static void __tg3_set_rx_mode(struct net_device *dev)
7496 {
7497         struct tg3 *tp = netdev_priv(dev);
7498         u32 rx_mode;
7499
7500         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7501                                   RX_MODE_KEEP_VLAN_TAG);
7502
7503         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7504          * flag clear.
7505          */
7506 #if TG3_VLAN_TAG_USED
7507         if (!tp->vlgrp &&
7508             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7509                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7510 #else
7511         /* By definition, VLAN is disabled always in this
7512          * case.
7513          */
7514         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7515                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7516 #endif
7517
7518         if (dev->flags & IFF_PROMISC) {
7519                 /* Promiscuous mode. */
7520                 rx_mode |= RX_MODE_PROMISC;
7521         } else if (dev->flags & IFF_ALLMULTI) {
7522                 /* Accept all multicast. */
7523                 tg3_set_multi (tp, 1);
7524         } else if (dev->mc_count < 1) {
7525                 /* Reject all multicast. */
7526                 tg3_set_multi (tp, 0);
7527         } else {
7528                 /* Accept one or more multicast(s). */
7529                 struct dev_mc_list *mclist;
7530                 unsigned int i;
7531                 u32 mc_filter[4] = { 0, };
7532                 u32 regidx;
7533                 u32 bit;
7534                 u32 crc;
7535
7536                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7537                      i++, mclist = mclist->next) {
7538
7539                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7540                         bit = ~crc & 0x7f;
7541                         regidx = (bit & 0x60) >> 5;
7542                         bit &= 0x1f;
7543                         mc_filter[regidx] |= (1 << bit);
7544                 }
7545
7546                 tw32(MAC_HASH_REG_0, mc_filter[0]);
7547                 tw32(MAC_HASH_REG_1, mc_filter[1]);
7548                 tw32(MAC_HASH_REG_2, mc_filter[2]);
7549                 tw32(MAC_HASH_REG_3, mc_filter[3]);
7550         }
7551
7552         if (rx_mode != tp->rx_mode) {
7553                 tp->rx_mode = rx_mode;
7554                 tw32_f(MAC_RX_MODE, rx_mode);
7555                 udelay(10);
7556         }
7557 }
7558
7559 static void tg3_set_rx_mode(struct net_device *dev)
7560 {
7561         struct tg3 *tp = netdev_priv(dev);
7562
7563         if (!netif_running(dev))
7564                 return;
7565
7566         tg3_full_lock(tp, 0);
7567         __tg3_set_rx_mode(dev);
7568         tg3_full_unlock(tp);
7569 }
7570
7571 #define TG3_REGDUMP_LEN         (32 * 1024)
7572
7573 static int tg3_get_regs_len(struct net_device *dev)
7574 {
7575         return TG3_REGDUMP_LEN;
7576 }
7577
7578 static void tg3_get_regs(struct net_device *dev,
7579                 struct ethtool_regs *regs, void *_p)
7580 {
7581         u32 *p = _p;
7582         struct tg3 *tp = netdev_priv(dev);
7583         u8 *orig_p = _p;
7584         int i;
7585
7586         regs->version = 0;
7587
7588         memset(p, 0, TG3_REGDUMP_LEN);
7589
7590         if (tp->link_config.phy_is_low_power)
7591                 return;
7592
7593         tg3_full_lock(tp, 0);
7594
7595 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
7596 #define GET_REG32_LOOP(base,len)                \
7597 do {    p = (u32 *)(orig_p + (base));           \
7598         for (i = 0; i < len; i += 4)            \
7599                 __GET_REG32((base) + i);        \
7600 } while (0)
7601 #define GET_REG32_1(reg)                        \
7602 do {    p = (u32 *)(orig_p + (reg));            \
7603         __GET_REG32((reg));                     \
7604 } while (0)
7605
7606         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7607         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7608         GET_REG32_LOOP(MAC_MODE, 0x4f0);
7609         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7610         GET_REG32_1(SNDDATAC_MODE);
7611         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7612         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7613         GET_REG32_1(SNDBDC_MODE);
7614         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7615         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7616         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7617         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7618         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7619         GET_REG32_1(RCVDCC_MODE);
7620         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7621         GET_REG32_LOOP(RCVCC_MODE, 0x14);
7622         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7623         GET_REG32_1(MBFREE_MODE);
7624         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7625         GET_REG32_LOOP(MEMARB_MODE, 0x10);
7626         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7627         GET_REG32_LOOP(RDMAC_MODE, 0x08);
7628         GET_REG32_LOOP(WDMAC_MODE, 0x08);
7629         GET_REG32_1(RX_CPU_MODE);
7630         GET_REG32_1(RX_CPU_STATE);
7631         GET_REG32_1(RX_CPU_PGMCTR);
7632         GET_REG32_1(RX_CPU_HWBKPT);
7633         GET_REG32_1(TX_CPU_MODE);
7634         GET_REG32_1(TX_CPU_STATE);
7635         GET_REG32_1(TX_CPU_PGMCTR);
7636         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7637         GET_REG32_LOOP(FTQ_RESET, 0x120);
7638         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7639         GET_REG32_1(DMAC_MODE);
7640         GET_REG32_LOOP(GRC_MODE, 0x4c);
7641         if (tp->tg3_flags & TG3_FLAG_NVRAM)
7642                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
7643
7644 #undef __GET_REG32
7645 #undef GET_REG32_LOOP
7646 #undef GET_REG32_1
7647
7648         tg3_full_unlock(tp);
7649 }
7650
7651 static int tg3_get_eeprom_len(struct net_device *dev)
7652 {
7653         struct tg3 *tp = netdev_priv(dev);
7654
7655         return tp->nvram_size;
7656 }
7657
7658 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7659 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7660
7661 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7662 {
7663         struct tg3 *tp = netdev_priv(dev);
7664         int ret;
7665         u8  *pd;
7666         u32 i, offset, len, val, b_offset, b_count;
7667
7668         if (tp->link_config.phy_is_low_power)
7669                 return -EAGAIN;
7670
7671         offset = eeprom->offset;
7672         len = eeprom->len;
7673         eeprom->len = 0;
7674
7675         eeprom->magic = TG3_EEPROM_MAGIC;
7676
7677         if (offset & 3) {
7678                 /* adjustments to start on required 4 byte boundary */
7679                 b_offset = offset & 3;
7680                 b_count = 4 - b_offset;
7681                 if (b_count > len) {
7682                         /* i.e. offset=1 len=2 */
7683                         b_count = len;
7684                 }
7685                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
7686                 if (ret)
7687                         return ret;
7688                 val = cpu_to_le32(val);
7689                 memcpy(data, ((char*)&val) + b_offset, b_count);
7690                 len -= b_count;
7691                 offset += b_count;
7692                 eeprom->len += b_count;
7693         }
7694
7695         /* read bytes upto the last 4 byte boundary */
7696         pd = &data[eeprom->len];
7697         for (i = 0; i < (len - (len & 3)); i += 4) {
7698                 ret = tg3_nvram_read(tp, offset + i, &val);
7699                 if (ret) {
7700                         eeprom->len += i;
7701                         return ret;
7702                 }
7703                 val = cpu_to_le32(val);
7704                 memcpy(pd + i, &val, 4);
7705         }
7706         eeprom->len += i;
7707
7708         if (len & 3) {
7709                 /* read last bytes not ending on 4 byte boundary */
7710                 pd = &data[eeprom->len];
7711                 b_count = len & 3;
7712                 b_offset = offset + len - b_count;
7713                 ret = tg3_nvram_read(tp, b_offset, &val);
7714                 if (ret)
7715                         return ret;
7716                 val = cpu_to_le32(val);
7717                 memcpy(pd, ((char*)&val), b_count);
7718                 eeprom->len += b_count;
7719         }
7720         return 0;
7721 }
7722
7723 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7724
7725 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7726 {
7727         struct tg3 *tp = netdev_priv(dev);
7728         int ret;
7729         u32 offset, len, b_offset, odd_len, start, end;
7730         u8 *buf;
7731
7732         if (tp->link_config.phy_is_low_power)
7733                 return -EAGAIN;
7734
7735         if (eeprom->magic != TG3_EEPROM_MAGIC)
7736                 return -EINVAL;
7737
7738         offset = eeprom->offset;
7739         len = eeprom->len;
7740
7741         if ((b_offset = (offset & 3))) {
7742                 /* adjustments to start on required 4 byte boundary */
7743                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
7744                 if (ret)
7745                         return ret;
7746                 start = cpu_to_le32(start);
7747                 len += b_offset;
7748                 offset &= ~3;
7749                 if (len < 4)
7750                         len = 4;
7751         }
7752
7753         odd_len = 0;
7754         if (len & 3) {
7755                 /* adjustments to end on required 4 byte boundary */
7756                 odd_len = 1;
7757                 len = (len + 3) & ~3;
7758                 ret = tg3_nvram_read(tp, offset+len-4, &end);
7759                 if (ret)
7760                         return ret;
7761                 end = cpu_to_le32(end);
7762         }
7763
7764         buf = data;
7765         if (b_offset || odd_len) {
7766                 buf = kmalloc(len, GFP_KERNEL);
7767                 if (buf == 0)
7768                         return -ENOMEM;
7769                 if (b_offset)
7770                         memcpy(buf, &start, 4);
7771                 if (odd_len)
7772                         memcpy(buf+len-4, &end, 4);
7773                 memcpy(buf + b_offset, data, eeprom->len);
7774         }
7775
7776         ret = tg3_nvram_write_block(tp, offset, len, buf);
7777
7778         if (buf != data)
7779                 kfree(buf);
7780
7781         return ret;
7782 }
7783
7784 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7785 {
7786         struct tg3 *tp = netdev_priv(dev);
7787
7788         cmd->supported = (SUPPORTED_Autoneg);
7789
7790         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7791                 cmd->supported |= (SUPPORTED_1000baseT_Half |
7792                                    SUPPORTED_1000baseT_Full);
7793
7794         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7795                 cmd->supported |= (SUPPORTED_100baseT_Half |
7796                                   SUPPORTED_100baseT_Full |
7797                                   SUPPORTED_10baseT_Half |
7798                                   SUPPORTED_10baseT_Full |
7799                                   SUPPORTED_MII);
7800                 cmd->port = PORT_TP;
7801         } else {
7802                 cmd->supported |= SUPPORTED_FIBRE;
7803                 cmd->port = PORT_FIBRE;
7804         }
7805
7806         cmd->advertising = tp->link_config.advertising;
7807         if (netif_running(dev)) {
7808                 cmd->speed = tp->link_config.active_speed;
7809                 cmd->duplex = tp->link_config.active_duplex;
7810         }
7811         cmd->phy_address = PHY_ADDR;
7812         cmd->transceiver = 0;
7813         cmd->autoneg = tp->link_config.autoneg;
7814         cmd->maxtxpkt = 0;
7815         cmd->maxrxpkt = 0;
7816         return 0;
7817 }
7818
7819 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7820 {
7821         struct tg3 *tp = netdev_priv(dev);
7822
7823         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7824                 /* These are the only valid advertisement bits allowed.  */
7825                 if (cmd->autoneg == AUTONEG_ENABLE &&
7826                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7827                                           ADVERTISED_1000baseT_Full |
7828                                           ADVERTISED_Autoneg |
7829                                           ADVERTISED_FIBRE)))
7830                         return -EINVAL;
7831                 /* Fiber can only do SPEED_1000.  */
7832                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7833                          (cmd->speed != SPEED_1000))
7834                         return -EINVAL;
7835         /* Copper cannot force SPEED_1000.  */
7836         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7837                    (cmd->speed == SPEED_1000))
7838                 return -EINVAL;
7839         else if ((cmd->speed == SPEED_1000) &&
7840                  (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7841                 return -EINVAL;
7842
7843         tg3_full_lock(tp, 0);
7844
7845         tp->link_config.autoneg = cmd->autoneg;
7846         if (cmd->autoneg == AUTONEG_ENABLE) {
7847                 tp->link_config.advertising = cmd->advertising;
7848                 tp->link_config.speed = SPEED_INVALID;
7849                 tp->link_config.duplex = DUPLEX_INVALID;
7850         } else {
7851                 tp->link_config.advertising = 0;
7852                 tp->link_config.speed = cmd->speed;
7853                 tp->link_config.duplex = cmd->duplex;
7854         }
7855
7856         if (netif_running(dev))
7857                 tg3_setup_phy(tp, 1);
7858
7859         tg3_full_unlock(tp);
7860
7861         return 0;
7862 }
7863
7864 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7865 {
7866         struct tg3 *tp = netdev_priv(dev);
7867
7868         strcpy(info->driver, DRV_MODULE_NAME);
7869         strcpy(info->version, DRV_MODULE_VERSION);
7870         strcpy(info->fw_version, tp->fw_ver);
7871         strcpy(info->bus_info, pci_name(tp->pdev));
7872 }
7873
7874 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7875 {
7876         struct tg3 *tp = netdev_priv(dev);
7877
7878         wol->supported = WAKE_MAGIC;
7879         wol->wolopts = 0;
7880         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7881                 wol->wolopts = WAKE_MAGIC;
7882         memset(&wol->sopass, 0, sizeof(wol->sopass));
7883 }
7884
7885 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7886 {
7887         struct tg3 *tp = netdev_priv(dev);
7888
7889         if (wol->wolopts & ~WAKE_MAGIC)
7890                 return -EINVAL;
7891         if ((wol->wolopts & WAKE_MAGIC) &&
7892             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
7893             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
7894                 return -EINVAL;
7895
7896         spin_lock_bh(&tp->lock);
7897         if (wol->wolopts & WAKE_MAGIC)
7898                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7899         else
7900                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7901         spin_unlock_bh(&tp->lock);
7902
7903         return 0;
7904 }
7905
7906 static u32 tg3_get_msglevel(struct net_device *dev)
7907 {
7908         struct tg3 *tp = netdev_priv(dev);
7909         return tp->msg_enable;
7910 }
7911
7912 static void tg3_set_msglevel(struct net_device *dev, u32 value)
7913 {
7914         struct tg3 *tp = netdev_priv(dev);
7915         tp->msg_enable = value;
7916 }
7917
7918 #if TG3_TSO_SUPPORT != 0
7919 static int tg3_set_tso(struct net_device *dev, u32 value)
7920 {
7921         struct tg3 *tp = netdev_priv(dev);
7922
7923         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7924                 if (value)
7925                         return -EINVAL;
7926                 return 0;
7927         }
7928         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
7929                 if (value)
7930                         dev->features |= NETIF_F_TSO6;
7931                 else
7932                         dev->features &= ~NETIF_F_TSO6;
7933         }
7934         return ethtool_op_set_tso(dev, value);
7935 }
7936 #endif
7937
7938 static int tg3_nway_reset(struct net_device *dev)
7939 {
7940         struct tg3 *tp = netdev_priv(dev);
7941         u32 bmcr;
7942         int r;
7943
7944         if (!netif_running(dev))
7945                 return -EAGAIN;
7946
7947         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7948                 return -EINVAL;
7949
7950         spin_lock_bh(&tp->lock);
7951         r = -EINVAL;
7952         tg3_readphy(tp, MII_BMCR, &bmcr);
7953         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7954             ((bmcr & BMCR_ANENABLE) ||
7955              (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7956                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7957                                            BMCR_ANENABLE);
7958                 r = 0;
7959         }
7960         spin_unlock_bh(&tp->lock);
7961
7962         return r;
7963 }
7964
7965 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7966 {
7967         struct tg3 *tp = netdev_priv(dev);
7968
7969         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7970         ering->rx_mini_max_pending = 0;
7971         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7972                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7973         else
7974                 ering->rx_jumbo_max_pending = 0;
7975
7976         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7977
7978         ering->rx_pending = tp->rx_pending;
7979         ering->rx_mini_pending = 0;
7980         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7981                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7982         else
7983                 ering->rx_jumbo_pending = 0;
7984
7985         ering->tx_pending = tp->tx_pending;
7986 }
7987
7988 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7989 {
7990         struct tg3 *tp = netdev_priv(dev);
7991         int irq_sync = 0, err = 0;
7992
7993         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7994             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7995             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
7996                 return -EINVAL;
7997
7998         if (netif_running(dev)) {
7999                 tg3_netif_stop(tp);
8000                 irq_sync = 1;
8001         }
8002
8003         tg3_full_lock(tp, irq_sync);
8004
8005         tp->rx_pending = ering->rx_pending;
8006
8007         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8008             tp->rx_pending > 63)
8009                 tp->rx_pending = 63;
8010         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8011         tp->tx_pending = ering->tx_pending;
8012
8013         if (netif_running(dev)) {
8014                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8015                 err = tg3_restart_hw(tp, 1);
8016                 if (!err)
8017                         tg3_netif_start(tp);
8018         }
8019
8020         tg3_full_unlock(tp);
8021
8022         return err;
8023 }
8024
8025 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8026 {
8027         struct tg3 *tp = netdev_priv(dev);
8028
8029         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8030         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
8031         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
8032 }
8033
8034 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8035 {
8036         struct tg3 *tp = netdev_priv(dev);
8037         int irq_sync = 0, err = 0;
8038
8039         if (netif_running(dev)) {
8040                 tg3_netif_stop(tp);
8041                 irq_sync = 1;
8042         }
8043
8044         tg3_full_lock(tp, irq_sync);
8045
8046         if (epause->autoneg)
8047                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8048         else
8049                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8050         if (epause->rx_pause)
8051                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
8052         else
8053                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
8054         if (epause->tx_pause)
8055                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
8056         else
8057                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
8058
8059         if (netif_running(dev)) {
8060                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8061                 err = tg3_restart_hw(tp, 1);
8062                 if (!err)
8063                         tg3_netif_start(tp);
8064         }
8065
8066         tg3_full_unlock(tp);
8067
8068         return err;
8069 }
8070
8071 static u32 tg3_get_rx_csum(struct net_device *dev)
8072 {
8073         struct tg3 *tp = netdev_priv(dev);
8074         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8075 }
8076
8077 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8078 {
8079         struct tg3 *tp = netdev_priv(dev);
8080
8081         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8082                 if (data != 0)
8083                         return -EINVAL;
8084                 return 0;
8085         }
8086
8087         spin_lock_bh(&tp->lock);
8088         if (data)
8089                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8090         else
8091                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8092         spin_unlock_bh(&tp->lock);
8093
8094         return 0;
8095 }
8096
8097 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8098 {
8099         struct tg3 *tp = netdev_priv(dev);
8100
8101         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8102                 if (data != 0)
8103                         return -EINVAL;
8104                 return 0;
8105         }
8106
8107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8109                 ethtool_op_set_tx_hw_csum(dev, data);
8110         else
8111                 ethtool_op_set_tx_csum(dev, data);
8112
8113         return 0;
8114 }
8115
8116 static int tg3_get_stats_count (struct net_device *dev)
8117 {
8118         return TG3_NUM_STATS;
8119 }
8120
8121 static int tg3_get_test_count (struct net_device *dev)
8122 {
8123         return TG3_NUM_TEST;
8124 }
8125
8126 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8127 {
8128         switch (stringset) {
8129         case ETH_SS_STATS:
8130                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8131                 break;
8132         case ETH_SS_TEST:
8133                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8134                 break;
8135         default:
8136                 WARN_ON(1);     /* we need a WARN() */
8137                 break;
8138         }
8139 }
8140
8141 static int tg3_phys_id(struct net_device *dev, u32 data)
8142 {
8143         struct tg3 *tp = netdev_priv(dev);
8144         int i;
8145
8146         if (!netif_running(tp->dev))
8147                 return -EAGAIN;
8148
8149         if (data == 0)
8150                 data = 2;
8151
8152         for (i = 0; i < (data * 2); i++) {
8153                 if ((i % 2) == 0)
8154                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8155                                            LED_CTRL_1000MBPS_ON |
8156                                            LED_CTRL_100MBPS_ON |
8157                                            LED_CTRL_10MBPS_ON |
8158                                            LED_CTRL_TRAFFIC_OVERRIDE |
8159                                            LED_CTRL_TRAFFIC_BLINK |
8160                                            LED_CTRL_TRAFFIC_LED);
8161
8162                 else
8163                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8164                                            LED_CTRL_TRAFFIC_OVERRIDE);
8165
8166                 if (msleep_interruptible(500))
8167                         break;
8168         }
8169         tw32(MAC_LED_CTRL, tp->led_ctrl);
8170         return 0;
8171 }
8172
8173 static void tg3_get_ethtool_stats (struct net_device *dev,
8174                                    struct ethtool_stats *estats, u64 *tmp_stats)
8175 {
8176         struct tg3 *tp = netdev_priv(dev);
8177         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8178 }
8179
8180 #define NVRAM_TEST_SIZE 0x100
8181 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8182
8183 static int tg3_test_nvram(struct tg3 *tp)
8184 {
8185         u32 *buf, csum, magic;
8186         int i, j, err = 0, size;
8187
8188         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8189                 return -EIO;
8190
8191         if (magic == TG3_EEPROM_MAGIC)
8192                 size = NVRAM_TEST_SIZE;
8193         else if ((magic & 0xff000000) == 0xa5000000) {
8194                 if ((magic & 0xe00000) == 0x200000)
8195                         size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8196                 else
8197                         return 0;
8198         } else
8199                 return -EIO;
8200
8201         buf = kmalloc(size, GFP_KERNEL);
8202         if (buf == NULL)
8203                 return -ENOMEM;
8204
8205         err = -EIO;
8206         for (i = 0, j = 0; i < size; i += 4, j++) {
8207                 u32 val;
8208
8209                 if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8210                         break;
8211                 buf[j] = cpu_to_le32(val);
8212         }
8213         if (i < size)
8214                 goto out;
8215
8216         /* Selfboot format */
8217         if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) {
8218                 u8 *buf8 = (u8 *) buf, csum8 = 0;
8219
8220                 for (i = 0; i < size; i++)
8221                         csum8 += buf8[i];
8222
8223                 if (csum8 == 0) {
8224                         err = 0;
8225                         goto out;
8226                 }
8227
8228                 err = -EIO;
8229                 goto out;
8230         }
8231
8232         /* Bootstrap checksum at offset 0x10 */
8233         csum = calc_crc((unsigned char *) buf, 0x10);
8234         if(csum != cpu_to_le32(buf[0x10/4]))
8235                 goto out;
8236
8237         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8238         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8239         if (csum != cpu_to_le32(buf[0xfc/4]))
8240                  goto out;
8241
8242         err = 0;
8243
8244 out:
8245         kfree(buf);
8246         return err;
8247 }
8248
8249 #define TG3_SERDES_TIMEOUT_SEC  2
8250 #define TG3_COPPER_TIMEOUT_SEC  6
8251
8252 static int tg3_test_link(struct tg3 *tp)
8253 {
8254         int i, max;
8255
8256         if (!netif_running(tp->dev))
8257                 return -ENODEV;
8258
8259         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8260                 max = TG3_SERDES_TIMEOUT_SEC;
8261         else
8262                 max = TG3_COPPER_TIMEOUT_SEC;
8263
8264         for (i = 0; i < max; i++) {
8265                 if (netif_carrier_ok(tp->dev))
8266                         return 0;
8267
8268                 if (msleep_interruptible(1000))
8269                         break;
8270         }
8271
8272         return -EIO;
8273 }
8274
8275 /* Only test the commonly used registers */
8276 static int tg3_test_registers(struct tg3 *tp)
8277 {
8278         int i, is_5705;
8279         u32 offset, read_mask, write_mask, val, save_val, read_val;
8280         static struct {
8281                 u16 offset;
8282                 u16 flags;
8283 #define TG3_FL_5705     0x1
8284 #define TG3_FL_NOT_5705 0x2
8285 #define TG3_FL_NOT_5788 0x4
8286                 u32 read_mask;
8287                 u32 write_mask;
8288         } reg_tbl[] = {
8289                 /* MAC Control Registers */
8290                 { MAC_MODE, TG3_FL_NOT_5705,
8291                         0x00000000, 0x00ef6f8c },
8292                 { MAC_MODE, TG3_FL_5705,
8293                         0x00000000, 0x01ef6b8c },
8294                 { MAC_STATUS, TG3_FL_NOT_5705,
8295                         0x03800107, 0x00000000 },
8296                 { MAC_STATUS, TG3_FL_5705,
8297                         0x03800100, 0x00000000 },
8298                 { MAC_ADDR_0_HIGH, 0x0000,
8299                         0x00000000, 0x0000ffff },
8300                 { MAC_ADDR_0_LOW, 0x0000,
8301                         0x00000000, 0xffffffff },
8302                 { MAC_RX_MTU_SIZE, 0x0000,
8303                         0x00000000, 0x0000ffff },
8304                 { MAC_TX_MODE, 0x0000,
8305                         0x00000000, 0x00000070 },
8306                 { MAC_TX_LENGTHS, 0x0000,
8307                         0x00000000, 0x00003fff },
8308                 { MAC_RX_MODE, TG3_FL_NOT_5705,
8309                         0x00000000, 0x000007fc },
8310                 { MAC_RX_MODE, TG3_FL_5705,
8311                         0x00000000, 0x000007dc },
8312                 { MAC_HASH_REG_0, 0x0000,
8313                         0x00000000, 0xffffffff },
8314                 { MAC_HASH_REG_1, 0x0000,
8315                         0x00000000, 0xffffffff },
8316                 { MAC_HASH_REG_2, 0x0000,
8317                         0x00000000, 0xffffffff },
8318                 { MAC_HASH_REG_3, 0x0000,
8319                         0x00000000, 0xffffffff },
8320
8321                 /* Receive Data and Receive BD Initiator Control Registers. */
8322                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8323                         0x00000000, 0xffffffff },
8324                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8325                         0x00000000, 0xffffffff },
8326                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8327                         0x00000000, 0x00000003 },
8328                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8329                         0x00000000, 0xffffffff },
8330                 { RCVDBDI_STD_BD+0, 0x0000,
8331                         0x00000000, 0xffffffff },
8332                 { RCVDBDI_STD_BD+4, 0x0000,
8333                         0x00000000, 0xffffffff },
8334                 { RCVDBDI_STD_BD+8, 0x0000,
8335                         0x00000000, 0xffff0002 },
8336                 { RCVDBDI_STD_BD+0xc, 0x0000,
8337                         0x00000000, 0xffffffff },
8338
8339                 /* Receive BD Initiator Control Registers. */
8340                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8341                         0x00000000, 0xffffffff },
8342                 { RCVBDI_STD_THRESH, TG3_FL_5705,
8343                         0x00000000, 0x000003ff },
8344                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8345                         0x00000000, 0xffffffff },
8346
8347                 /* Host Coalescing Control Registers. */
8348                 { HOSTCC_MODE, TG3_FL_NOT_5705,
8349                         0x00000000, 0x00000004 },
8350                 { HOSTCC_MODE, TG3_FL_5705,
8351                         0x00000000, 0x000000f6 },
8352                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8353                         0x00000000, 0xffffffff },
8354                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8355                         0x00000000, 0x000003ff },
8356                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8357                         0x00000000, 0xffffffff },
8358                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8359                         0x00000000, 0x000003ff },
8360                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8361                         0x00000000, 0xffffffff },
8362                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8363                         0x00000000, 0x000000ff },
8364                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8365                         0x00000000, 0xffffffff },
8366                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8367                         0x00000000, 0x000000ff },
8368                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8369                         0x00000000, 0xffffffff },
8370                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8371                         0x00000000, 0xffffffff },
8372                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8373                         0x00000000, 0xffffffff },
8374                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8375                         0x00000000, 0x000000ff },
8376                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8377                         0x00000000, 0xffffffff },
8378                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8379                         0x00000000, 0x000000ff },
8380                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8381                         0x00000000, 0xffffffff },
8382                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8383                         0x00000000, 0xffffffff },
8384                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8385                         0x00000000, 0xffffffff },
8386                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8387                         0x00000000, 0xffffffff },
8388                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8389                         0x00000000, 0xffffffff },
8390                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8391                         0xffffffff, 0x00000000 },
8392                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8393                         0xffffffff, 0x00000000 },
8394
8395                 /* Buffer Manager Control Registers. */
8396                 { BUFMGR_MB_POOL_ADDR, 0x0000,
8397                         0x00000000, 0x007fff80 },
8398                 { BUFMGR_MB_POOL_SIZE, 0x0000,
8399                         0x00000000, 0x007fffff },
8400                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8401                         0x00000000, 0x0000003f },
8402                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8403                         0x00000000, 0x000001ff },
8404                 { BUFMGR_MB_HIGH_WATER, 0x0000,
8405                         0x00000000, 0x000001ff },
8406                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8407                         0xffffffff, 0x00000000 },
8408                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8409                         0xffffffff, 0x00000000 },
8410
8411                 /* Mailbox Registers */
8412                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8413                         0x00000000, 0x000001ff },
8414                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8415                         0x00000000, 0x000001ff },
8416                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8417                         0x00000000, 0x000007ff },
8418                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8419                         0x00000000, 0x000001ff },
8420
8421                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8422         };
8423
8424         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8425                 is_5705 = 1;
8426         else
8427                 is_5705 = 0;
8428
8429         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8430                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8431                         continue;
8432
8433                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8434                         continue;
8435
8436                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8437                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
8438                         continue;
8439
8440                 offset = (u32) reg_tbl[i].offset;
8441                 read_mask = reg_tbl[i].read_mask;
8442                 write_mask = reg_tbl[i].write_mask;
8443
8444                 /* Save the original register content */
8445                 save_val = tr32(offset);
8446
8447                 /* Determine the read-only value. */
8448                 read_val = save_val & read_mask;
8449
8450                 /* Write zero to the register, then make sure the read-only bits
8451                  * are not changed and the read/write bits are all zeros.
8452                  */
8453                 tw32(offset, 0);
8454
8455                 val = tr32(offset);
8456
8457                 /* Test the read-only and read/write bits. */
8458                 if (((val & read_mask) != read_val) || (val & write_mask))
8459                         goto out;
8460
8461                 /* Write ones to all the bits defined by RdMask and WrMask, then
8462                  * make sure the read-only bits are not changed and the
8463                  * read/write bits are all ones.
8464                  */
8465                 tw32(offset, read_mask | write_mask);
8466
8467                 val = tr32(offset);
8468
8469                 /* Test the read-only bits. */
8470                 if ((val & read_mask) != read_val)
8471                         goto out;
8472
8473                 /* Test the read/write bits. */
8474                 if ((val & write_mask) != write_mask)
8475                         goto out;
8476
8477                 tw32(offset, save_val);
8478         }
8479
8480         return 0;
8481
8482 out:
8483         printk(KERN_ERR PFX "Register test failed at offset %x\n", offset);
8484         tw32(offset, save_val);
8485         return -EIO;
8486 }
8487
8488 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8489 {
8490         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8491         int i;
8492         u32 j;
8493
8494         for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8495                 for (j = 0; j < len; j += 4) {
8496                         u32 val;
8497
8498                         tg3_write_mem(tp, offset + j, test_pattern[i]);
8499                         tg3_read_mem(tp, offset + j, &val);
8500                         if (val != test_pattern[i])
8501                                 return -EIO;
8502                 }
8503         }
8504         return 0;
8505 }
8506
8507 static int tg3_test_memory(struct tg3 *tp)
8508 {
8509         static struct mem_entry {
8510                 u32 offset;
8511                 u32 len;
8512         } mem_tbl_570x[] = {
8513                 { 0x00000000, 0x00b50},
8514                 { 0x00002000, 0x1c000},
8515                 { 0xffffffff, 0x00000}
8516         }, mem_tbl_5705[] = {
8517                 { 0x00000100, 0x0000c},
8518                 { 0x00000200, 0x00008},
8519                 { 0x00004000, 0x00800},
8520                 { 0x00006000, 0x01000},
8521                 { 0x00008000, 0x02000},
8522                 { 0x00010000, 0x0e000},
8523                 { 0xffffffff, 0x00000}
8524         }, mem_tbl_5755[] = {
8525                 { 0x00000200, 0x00008},
8526                 { 0x00004000, 0x00800},
8527                 { 0x00006000, 0x00800},
8528                 { 0x00008000, 0x02000},
8529                 { 0x00010000, 0x0c000},
8530                 { 0xffffffff, 0x00000}
8531         };
8532         struct mem_entry *mem_tbl;
8533         int err = 0;
8534         int i;
8535
8536         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8537                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8538                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8539                         mem_tbl = mem_tbl_5755;
8540                 else
8541                         mem_tbl = mem_tbl_5705;
8542         } else
8543                 mem_tbl = mem_tbl_570x;
8544
8545         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8546                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8547                     mem_tbl[i].len)) != 0)
8548                         break;
8549         }
8550
8551         return err;
8552 }
8553
8554 #define TG3_MAC_LOOPBACK        0
8555 #define TG3_PHY_LOOPBACK        1
8556
8557 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8558 {
8559         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8560         u32 desc_idx;
8561         struct sk_buff *skb, *rx_skb;
8562         u8 *tx_data;
8563         dma_addr_t map;
8564         int num_pkts, tx_len, rx_len, i, err;
8565         struct tg3_rx_buffer_desc *desc;
8566
8567         if (loopback_mode == TG3_MAC_LOOPBACK) {
8568                 /* HW errata - mac loopback fails in some cases on 5780.
8569                  * Normal traffic and PHY loopback are not affected by
8570                  * errata.
8571                  */
8572                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8573                         return 0;
8574
8575                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8576                            MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY |
8577                            MAC_MODE_PORT_MODE_GMII;
8578                 tw32(MAC_MODE, mac_mode);
8579         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
8580                 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
8581                                            BMCR_SPEED1000);
8582                 udelay(40);
8583                 /* reset to prevent losing 1st rx packet intermittently */
8584                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8585                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8586                         udelay(10);
8587                         tw32_f(MAC_RX_MODE, tp->rx_mode);
8588                 }
8589                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8590                            MAC_MODE_LINK_POLARITY | MAC_MODE_PORT_MODE_GMII;
8591                 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8592                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8593                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
8594                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8595                 }
8596                 tw32(MAC_MODE, mac_mode);
8597         }
8598         else
8599                 return -EINVAL;
8600
8601         err = -EIO;
8602
8603         tx_len = 1514;
8604         skb = netdev_alloc_skb(tp->dev, tx_len);
8605         if (!skb)
8606                 return -ENOMEM;
8607
8608         tx_data = skb_put(skb, tx_len);
8609         memcpy(tx_data, tp->dev->dev_addr, 6);
8610         memset(tx_data + 6, 0x0, 8);
8611
8612         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8613
8614         for (i = 14; i < tx_len; i++)
8615                 tx_data[i] = (u8) (i & 0xff);
8616
8617         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8618
8619         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8620              HOSTCC_MODE_NOW);
8621
8622         udelay(10);
8623
8624         rx_start_idx = tp->hw_status->idx[0].rx_producer;
8625
8626         num_pkts = 0;
8627
8628         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8629
8630         tp->tx_prod++;
8631         num_pkts++;
8632
8633         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8634                      tp->tx_prod);
8635         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8636
8637         udelay(10);
8638
8639         for (i = 0; i < 10; i++) {
8640                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8641                        HOSTCC_MODE_NOW);
8642
8643                 udelay(10);
8644
8645                 tx_idx = tp->hw_status->idx[0].tx_consumer;
8646                 rx_idx = tp->hw_status->idx[0].rx_producer;
8647                 if ((tx_idx == tp->tx_prod) &&
8648                     (rx_idx == (rx_start_idx + num_pkts)))
8649                         break;
8650         }
8651
8652         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8653         dev_kfree_skb(skb);
8654
8655         if (tx_idx != tp->tx_prod)
8656                 goto out;
8657
8658         if (rx_idx != rx_start_idx + num_pkts)
8659                 goto out;
8660
8661         desc = &tp->rx_rcb[rx_start_idx];
8662         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8663         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8664         if (opaque_key != RXD_OPAQUE_RING_STD)
8665                 goto out;
8666
8667         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8668             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8669                 goto out;
8670
8671         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8672         if (rx_len != tx_len)
8673                 goto out;
8674
8675         rx_skb = tp->rx_std_buffers[desc_idx].skb;
8676
8677         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8678         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8679
8680         for (i = 14; i < tx_len; i++) {
8681                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
8682                         goto out;
8683         }
8684         err = 0;
8685
8686         /* tg3_free_rings will unmap and free the rx_skb */
8687 out:
8688         return err;
8689 }
8690
8691 #define TG3_MAC_LOOPBACK_FAILED         1
8692 #define TG3_PHY_LOOPBACK_FAILED         2
8693 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
8694                                          TG3_PHY_LOOPBACK_FAILED)
8695
8696 static int tg3_test_loopback(struct tg3 *tp)
8697 {
8698         int err = 0;
8699
8700         if (!netif_running(tp->dev))
8701                 return TG3_LOOPBACK_FAILED;
8702
8703         err = tg3_reset_hw(tp, 1);
8704         if (err)
8705                 return TG3_LOOPBACK_FAILED;
8706
8707         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8708                 err |= TG3_MAC_LOOPBACK_FAILED;
8709         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8710                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8711                         err |= TG3_PHY_LOOPBACK_FAILED;
8712         }
8713
8714         return err;
8715 }
8716
8717 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8718                           u64 *data)
8719 {
8720         struct tg3 *tp = netdev_priv(dev);
8721
8722         if (tp->link_config.phy_is_low_power)
8723                 tg3_set_power_state(tp, PCI_D0);
8724
8725         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8726
8727         if (tg3_test_nvram(tp) != 0) {
8728                 etest->flags |= ETH_TEST_FL_FAILED;
8729                 data[0] = 1;
8730         }
8731         if (tg3_test_link(tp) != 0) {
8732                 etest->flags |= ETH_TEST_FL_FAILED;
8733                 data[1] = 1;
8734         }
8735         if (etest->flags & ETH_TEST_FL_OFFLINE) {
8736                 int err, irq_sync = 0;
8737
8738                 if (netif_running(dev)) {
8739                         tg3_netif_stop(tp);
8740                         irq_sync = 1;
8741                 }
8742
8743                 tg3_full_lock(tp, irq_sync);
8744
8745                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8746                 err = tg3_nvram_lock(tp);
8747                 tg3_halt_cpu(tp, RX_CPU_BASE);
8748                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8749                         tg3_halt_cpu(tp, TX_CPU_BASE);
8750                 if (!err)
8751                         tg3_nvram_unlock(tp);
8752
8753                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8754                         tg3_phy_reset(tp);
8755
8756                 if (tg3_test_registers(tp) != 0) {
8757                         etest->flags |= ETH_TEST_FL_FAILED;
8758                         data[2] = 1;
8759                 }
8760                 if (tg3_test_memory(tp) != 0) {
8761                         etest->flags |= ETH_TEST_FL_FAILED;
8762                         data[3] = 1;
8763                 }
8764                 if ((data[4] = tg3_test_loopback(tp)) != 0)
8765                         etest->flags |= ETH_TEST_FL_FAILED;
8766
8767                 tg3_full_unlock(tp);
8768
8769                 if (tg3_test_interrupt(tp) != 0) {
8770                         etest->flags |= ETH_TEST_FL_FAILED;
8771                         data[5] = 1;
8772                 }
8773
8774                 tg3_full_lock(tp, 0);
8775
8776                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8777                 if (netif_running(dev)) {
8778                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8779                         if (!tg3_restart_hw(tp, 1))
8780                                 tg3_netif_start(tp);
8781                 }
8782
8783                 tg3_full_unlock(tp);
8784         }
8785         if (tp->link_config.phy_is_low_power)
8786                 tg3_set_power_state(tp, PCI_D3hot);
8787
8788 }
8789
8790 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8791 {
8792         struct mii_ioctl_data *data = if_mii(ifr);
8793         struct tg3 *tp = netdev_priv(dev);
8794         int err;
8795
8796         switch(cmd) {
8797         case SIOCGMIIPHY:
8798                 data->phy_id = PHY_ADDR;
8799
8800                 /* fallthru */
8801         case SIOCGMIIREG: {
8802                 u32 mii_regval;
8803
8804                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8805                         break;                  /* We have no PHY */
8806
8807                 if (tp->link_config.phy_is_low_power)
8808                         return -EAGAIN;
8809
8810                 spin_lock_bh(&tp->lock);
8811                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8812                 spin_unlock_bh(&tp->lock);
8813
8814                 data->val_out = mii_regval;
8815
8816                 return err;
8817         }
8818
8819         case SIOCSMIIREG:
8820                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8821                         break;                  /* We have no PHY */
8822
8823                 if (!capable(CAP_NET_ADMIN))
8824                         return -EPERM;
8825
8826                 if (tp->link_config.phy_is_low_power)
8827                         return -EAGAIN;
8828
8829                 spin_lock_bh(&tp->lock);
8830                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8831                 spin_unlock_bh(&tp->lock);
8832
8833                 return err;
8834
8835         default:
8836                 /* do nothing */
8837                 break;
8838         }
8839         return -EOPNOTSUPP;
8840 }
8841
8842 #if TG3_VLAN_TAG_USED
8843 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8844 {
8845         struct tg3 *tp = netdev_priv(dev);
8846
8847         if (netif_running(dev))
8848                 tg3_netif_stop(tp);
8849
8850         tg3_full_lock(tp, 0);
8851
8852         tp->vlgrp = grp;
8853
8854         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8855         __tg3_set_rx_mode(dev);
8856
8857         tg3_full_unlock(tp);
8858
8859         if (netif_running(dev))
8860                 tg3_netif_start(tp);
8861 }
8862
8863 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
8864 {
8865         struct tg3 *tp = netdev_priv(dev);
8866
8867         if (netif_running(dev))
8868                 tg3_netif_stop(tp);
8869
8870         tg3_full_lock(tp, 0);
8871         if (tp->vlgrp)
8872                 tp->vlgrp->vlan_devices[vid] = NULL;
8873         tg3_full_unlock(tp);
8874
8875         if (netif_running(dev))
8876                 tg3_netif_start(tp);
8877 }
8878 #endif
8879
8880 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8881 {
8882         struct tg3 *tp = netdev_priv(dev);
8883
8884         memcpy(ec, &tp->coal, sizeof(*ec));
8885         return 0;
8886 }
8887
8888 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8889 {
8890         struct tg3 *tp = netdev_priv(dev);
8891         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8892         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8893
8894         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8895                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8896                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8897                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8898                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8899         }
8900
8901         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8902             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8903             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8904             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8905             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8906             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8907             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8908             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8909             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8910             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8911                 return -EINVAL;
8912
8913         /* No rx interrupts will be generated if both are zero */
8914         if ((ec->rx_coalesce_usecs == 0) &&
8915             (ec->rx_max_coalesced_frames == 0))
8916                 return -EINVAL;
8917
8918         /* No tx interrupts will be generated if both are zero */
8919         if ((ec->tx_coalesce_usecs == 0) &&
8920             (ec->tx_max_coalesced_frames == 0))
8921                 return -EINVAL;
8922
8923         /* Only copy relevant parameters, ignore all others. */
8924         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8925         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8926         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8927         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8928         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8929         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8930         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8931         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8932         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8933
8934         if (netif_running(dev)) {
8935                 tg3_full_lock(tp, 0);
8936                 __tg3_set_coalesce(tp, &tp->coal);
8937                 tg3_full_unlock(tp);
8938         }
8939         return 0;
8940 }
8941
8942 static const struct ethtool_ops tg3_ethtool_ops = {
8943         .get_settings           = tg3_get_settings,
8944         .set_settings           = tg3_set_settings,
8945         .get_drvinfo            = tg3_get_drvinfo,
8946         .get_regs_len           = tg3_get_regs_len,
8947         .get_regs               = tg3_get_regs,
8948         .get_wol                = tg3_get_wol,
8949         .set_wol                = tg3_set_wol,
8950         .get_msglevel           = tg3_get_msglevel,
8951         .set_msglevel           = tg3_set_msglevel,
8952         .nway_reset             = tg3_nway_reset,
8953         .get_link               = ethtool_op_get_link,
8954         .get_eeprom_len         = tg3_get_eeprom_len,
8955         .get_eeprom             = tg3_get_eeprom,
8956         .set_eeprom             = tg3_set_eeprom,
8957         .get_ringparam          = tg3_get_ringparam,
8958         .set_ringparam          = tg3_set_ringparam,
8959         .get_pauseparam         = tg3_get_pauseparam,
8960         .set_pauseparam         = tg3_set_pauseparam,
8961         .get_rx_csum            = tg3_get_rx_csum,
8962         .set_rx_csum            = tg3_set_rx_csum,
8963         .get_tx_csum            = ethtool_op_get_tx_csum,
8964         .set_tx_csum            = tg3_set_tx_csum,
8965         .get_sg                 = ethtool_op_get_sg,
8966         .set_sg                 = ethtool_op_set_sg,
8967 #if TG3_TSO_SUPPORT != 0
8968         .get_tso                = ethtool_op_get_tso,
8969         .set_tso                = tg3_set_tso,
8970 #endif
8971         .self_test_count        = tg3_get_test_count,
8972         .self_test              = tg3_self_test,
8973         .get_strings            = tg3_get_strings,
8974         .phys_id                = tg3_phys_id,
8975         .get_stats_count        = tg3_get_stats_count,
8976         .get_ethtool_stats      = tg3_get_ethtool_stats,
8977         .get_coalesce           = tg3_get_coalesce,
8978         .set_coalesce           = tg3_set_coalesce,
8979         .get_perm_addr          = ethtool_op_get_perm_addr,
8980 };
8981
8982 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8983 {
8984         u32 cursize, val, magic;
8985
8986         tp->nvram_size = EEPROM_CHIP_SIZE;
8987
8988         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8989                 return;
8990
8991         if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000))
8992                 return;
8993
8994         /*
8995          * Size the chip by reading offsets at increasing powers of two.
8996          * When we encounter our validation signature, we know the addressing
8997          * has wrapped around, and thus have our chip size.
8998          */
8999         cursize = 0x10;
9000
9001         while (cursize < tp->nvram_size) {
9002                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9003                         return;
9004
9005                 if (val == magic)
9006                         break;
9007
9008                 cursize <<= 1;
9009         }
9010
9011         tp->nvram_size = cursize;
9012 }
9013
9014 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9015 {
9016         u32 val;
9017
9018         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9019                 return;
9020
9021         /* Selfboot format */
9022         if (val != TG3_EEPROM_MAGIC) {
9023                 tg3_get_eeprom_size(tp);
9024                 return;
9025         }
9026
9027         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9028                 if (val != 0) {
9029                         tp->nvram_size = (val >> 16) * 1024;
9030                         return;
9031                 }
9032         }
9033         tp->nvram_size = 0x20000;
9034 }
9035
9036 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9037 {
9038         u32 nvcfg1;
9039
9040         nvcfg1 = tr32(NVRAM_CFG1);
9041         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9042                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9043         }
9044         else {
9045                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9046                 tw32(NVRAM_CFG1, nvcfg1);
9047         }
9048
9049         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9050             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9051                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9052                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9053                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9054                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9055                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9056                                 break;
9057                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9058                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9059                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9060                                 break;
9061                         case FLASH_VENDOR_ATMEL_EEPROM:
9062                                 tp->nvram_jedecnum = JEDEC_ATMEL;
9063                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9064                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9065                                 break;
9066                         case FLASH_VENDOR_ST:
9067                                 tp->nvram_jedecnum = JEDEC_ST;
9068                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9069                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9070                                 break;
9071                         case FLASH_VENDOR_SAIFUN:
9072                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
9073                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9074                                 break;
9075                         case FLASH_VENDOR_SST_SMALL:
9076                         case FLASH_VENDOR_SST_LARGE:
9077                                 tp->nvram_jedecnum = JEDEC_SST;
9078                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9079                                 break;
9080                 }
9081         }
9082         else {
9083                 tp->nvram_jedecnum = JEDEC_ATMEL;
9084                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9085                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9086         }
9087 }
9088
9089 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9090 {
9091         u32 nvcfg1;
9092
9093         nvcfg1 = tr32(NVRAM_CFG1);
9094
9095         /* NVRAM protection for TPM */
9096         if (nvcfg1 & (1 << 27))
9097                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9098
9099         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9100                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9101                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9102                         tp->nvram_jedecnum = JEDEC_ATMEL;
9103                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9104                         break;
9105                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9106                         tp->nvram_jedecnum = JEDEC_ATMEL;
9107                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9108                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9109                         break;
9110                 case FLASH_5752VENDOR_ST_M45PE10:
9111                 case FLASH_5752VENDOR_ST_M45PE20:
9112                 case FLASH_5752VENDOR_ST_M45PE40:
9113                         tp->nvram_jedecnum = JEDEC_ST;
9114                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9115                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9116                         break;
9117         }
9118
9119         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9120                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9121                         case FLASH_5752PAGE_SIZE_256:
9122                                 tp->nvram_pagesize = 256;
9123                                 break;
9124                         case FLASH_5752PAGE_SIZE_512:
9125                                 tp->nvram_pagesize = 512;
9126                                 break;
9127                         case FLASH_5752PAGE_SIZE_1K:
9128                                 tp->nvram_pagesize = 1024;
9129                                 break;
9130                         case FLASH_5752PAGE_SIZE_2K:
9131                                 tp->nvram_pagesize = 2048;
9132                                 break;
9133                         case FLASH_5752PAGE_SIZE_4K:
9134                                 tp->nvram_pagesize = 4096;
9135                                 break;
9136                         case FLASH_5752PAGE_SIZE_264:
9137                                 tp->nvram_pagesize = 264;
9138                                 break;
9139                 }
9140         }
9141         else {
9142                 /* For eeprom, set pagesize to maximum eeprom size */
9143                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9144
9145                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9146                 tw32(NVRAM_CFG1, nvcfg1);
9147         }
9148 }
9149
9150 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9151 {
9152         u32 nvcfg1;
9153
9154         nvcfg1 = tr32(NVRAM_CFG1);
9155
9156         /* NVRAM protection for TPM */
9157         if (nvcfg1 & (1 << 27))
9158                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9159
9160         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9161                 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
9162                 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
9163                         tp->nvram_jedecnum = JEDEC_ATMEL;
9164                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9165                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9166
9167                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9168                         tw32(NVRAM_CFG1, nvcfg1);
9169                         break;
9170                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9171                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9172                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9173                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9174                 case FLASH_5755VENDOR_ATMEL_FLASH_4:
9175                         tp->nvram_jedecnum = JEDEC_ATMEL;
9176                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9177                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9178                         tp->nvram_pagesize = 264;
9179                         break;
9180                 case FLASH_5752VENDOR_ST_M45PE10:
9181                 case FLASH_5752VENDOR_ST_M45PE20:
9182                 case FLASH_5752VENDOR_ST_M45PE40:
9183                         tp->nvram_jedecnum = JEDEC_ST;
9184                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9185                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9186                         tp->nvram_pagesize = 256;
9187                         break;
9188         }
9189 }
9190
9191 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9192 {
9193         u32 nvcfg1;
9194
9195         nvcfg1 = tr32(NVRAM_CFG1);
9196
9197         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9198                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9199                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9200                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9201                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9202                         tp->nvram_jedecnum = JEDEC_ATMEL;
9203                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9204                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9205
9206                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9207                         tw32(NVRAM_CFG1, nvcfg1);
9208                         break;
9209                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9210                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9211                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9212                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9213                         tp->nvram_jedecnum = JEDEC_ATMEL;
9214                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9215                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9216                         tp->nvram_pagesize = 264;
9217                         break;
9218                 case FLASH_5752VENDOR_ST_M45PE10:
9219                 case FLASH_5752VENDOR_ST_M45PE20:
9220                 case FLASH_5752VENDOR_ST_M45PE40:
9221                         tp->nvram_jedecnum = JEDEC_ST;
9222                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9223                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
9224                         tp->nvram_pagesize = 256;
9225                         break;
9226         }
9227 }
9228
9229 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9230 static void __devinit tg3_nvram_init(struct tg3 *tp)
9231 {
9232         int j;
9233
9234         tw32_f(GRC_EEPROM_ADDR,
9235              (EEPROM_ADDR_FSM_RESET |
9236               (EEPROM_DEFAULT_CLOCK_PERIOD <<
9237                EEPROM_ADDR_CLKPERD_SHIFT)));
9238
9239         /* XXX schedule_timeout() ... */
9240         for (j = 0; j < 100; j++)
9241                 udelay(10);
9242
9243         /* Enable seeprom accesses. */
9244         tw32_f(GRC_LOCAL_CTRL,
9245              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9246         udelay(100);
9247
9248         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9249             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9250                 tp->tg3_flags |= TG3_FLAG_NVRAM;
9251
9252                 if (tg3_nvram_lock(tp)) {
9253                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9254                                "tg3_nvram_init failed.\n", tp->dev->name);
9255                         return;
9256                 }
9257                 tg3_enable_nvram_access(tp);
9258
9259                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9260                         tg3_get_5752_nvram_info(tp);
9261                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9262                         tg3_get_5755_nvram_info(tp);
9263                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9264                         tg3_get_5787_nvram_info(tp);
9265                 else
9266                         tg3_get_nvram_info(tp);
9267
9268                 tg3_get_nvram_size(tp);
9269
9270                 tg3_disable_nvram_access(tp);
9271                 tg3_nvram_unlock(tp);
9272
9273         } else {
9274                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9275
9276                 tg3_get_eeprom_size(tp);
9277         }
9278 }
9279
9280 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9281                                         u32 offset, u32 *val)
9282 {
9283         u32 tmp;
9284         int i;
9285
9286         if (offset > EEPROM_ADDR_ADDR_MASK ||
9287             (offset % 4) != 0)
9288                 return -EINVAL;
9289
9290         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9291                                         EEPROM_ADDR_DEVID_MASK |
9292                                         EEPROM_ADDR_READ);
9293         tw32(GRC_EEPROM_ADDR,
9294              tmp |
9295              (0 << EEPROM_ADDR_DEVID_SHIFT) |
9296              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9297               EEPROM_ADDR_ADDR_MASK) |
9298              EEPROM_ADDR_READ | EEPROM_ADDR_START);
9299
9300         for (i = 0; i < 10000; i++) {
9301                 tmp = tr32(GRC_EEPROM_ADDR);
9302
9303                 if (tmp & EEPROM_ADDR_COMPLETE)
9304                         break;
9305                 udelay(100);
9306         }
9307         if (!(tmp & EEPROM_ADDR_COMPLETE))
9308                 return -EBUSY;
9309
9310         *val = tr32(GRC_EEPROM_DATA);
9311         return 0;
9312 }
9313
9314 #define NVRAM_CMD_TIMEOUT 10000
9315
9316 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9317 {
9318         int i;
9319
9320         tw32(NVRAM_CMD, nvram_cmd);
9321         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9322                 udelay(10);
9323                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9324                         udelay(10);
9325                         break;
9326                 }
9327         }
9328         if (i == NVRAM_CMD_TIMEOUT) {
9329                 return -EBUSY;
9330         }
9331         return 0;
9332 }
9333
9334 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9335 {
9336         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9337             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9338             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9339             (tp->nvram_jedecnum == JEDEC_ATMEL))
9340
9341                 addr = ((addr / tp->nvram_pagesize) <<
9342                         ATMEL_AT45DB0X1B_PAGE_POS) +
9343                        (addr % tp->nvram_pagesize);
9344
9345         return addr;
9346 }
9347
9348 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9349 {
9350         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9351             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9352             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9353             (tp->nvram_jedecnum == JEDEC_ATMEL))
9354
9355                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9356                         tp->nvram_pagesize) +
9357                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9358
9359         return addr;
9360 }
9361
9362 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9363 {
9364         int ret;
9365
9366         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9367                 return tg3_nvram_read_using_eeprom(tp, offset, val);
9368
9369         offset = tg3_nvram_phys_addr(tp, offset);
9370
9371         if (offset > NVRAM_ADDR_MSK)
9372                 return -EINVAL;
9373
9374         ret = tg3_nvram_lock(tp);
9375         if (ret)
9376                 return ret;
9377
9378         tg3_enable_nvram_access(tp);
9379
9380         tw32(NVRAM_ADDR, offset);
9381         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9382                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9383
9384         if (ret == 0)
9385                 *val = swab32(tr32(NVRAM_RDDATA));
9386
9387         tg3_disable_nvram_access(tp);
9388
9389         tg3_nvram_unlock(tp);
9390
9391         return ret;
9392 }
9393
9394 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9395 {
9396         int err;
9397         u32 tmp;
9398
9399         err = tg3_nvram_read(tp, offset, &tmp);
9400         *val = swab32(tmp);
9401         return err;
9402 }
9403
9404 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9405                                     u32 offset, u32 len, u8 *buf)
9406 {
9407         int i, j, rc = 0;
9408         u32 val;
9409
9410         for (i = 0; i < len; i += 4) {
9411                 u32 addr, data;
9412
9413                 addr = offset + i;
9414
9415                 memcpy(&data, buf + i, 4);
9416
9417                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9418
9419                 val = tr32(GRC_EEPROM_ADDR);
9420                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9421
9422                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9423                         EEPROM_ADDR_READ);
9424                 tw32(GRC_EEPROM_ADDR, val |
9425                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
9426                         (addr & EEPROM_ADDR_ADDR_MASK) |
9427                         EEPROM_ADDR_START |
9428                         EEPROM_ADDR_WRITE);
9429
9430                 for (j = 0; j < 10000; j++) {
9431                         val = tr32(GRC_EEPROM_ADDR);
9432
9433                         if (val & EEPROM_ADDR_COMPLETE)
9434                                 break;
9435                         udelay(100);
9436                 }
9437                 if (!(val & EEPROM_ADDR_COMPLETE)) {
9438                         rc = -EBUSY;
9439                         break;
9440                 }
9441         }
9442
9443         return rc;
9444 }
9445
9446 /* offset and length are dword aligned */
9447 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9448                 u8 *buf)
9449 {
9450         int ret = 0;
9451         u32 pagesize = tp->nvram_pagesize;
9452         u32 pagemask = pagesize - 1;
9453         u32 nvram_cmd;
9454         u8 *tmp;
9455
9456         tmp = kmalloc(pagesize, GFP_KERNEL);
9457         if (tmp == NULL)
9458                 return -ENOMEM;
9459
9460         while (len) {
9461                 int j;
9462                 u32 phy_addr, page_off, size;
9463
9464                 phy_addr = offset & ~pagemask;
9465
9466                 for (j = 0; j < pagesize; j += 4) {
9467                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
9468                                                 (u32 *) (tmp + j))))
9469                                 break;
9470                 }
9471                 if (ret)
9472                         break;
9473
9474                 page_off = offset & pagemask;
9475                 size = pagesize;
9476                 if (len < size)
9477                         size = len;
9478
9479                 len -= size;
9480
9481                 memcpy(tmp + page_off, buf, size);
9482
9483                 offset = offset + (pagesize - page_off);
9484
9485                 tg3_enable_nvram_access(tp);
9486
9487                 /*
9488                  * Before we can erase the flash page, we need
9489                  * to issue a special "write enable" command.
9490                  */
9491                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9492
9493                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9494                         break;
9495
9496                 /* Erase the target page */
9497                 tw32(NVRAM_ADDR, phy_addr);
9498
9499                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9500                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9501
9502                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9503                         break;
9504
9505                 /* Issue another write enable to start the write. */
9506                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9507
9508                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9509                         break;
9510
9511                 for (j = 0; j < pagesize; j += 4) {
9512                         u32 data;
9513
9514                         data = *((u32 *) (tmp + j));
9515                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
9516
9517                         tw32(NVRAM_ADDR, phy_addr + j);
9518
9519                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9520                                 NVRAM_CMD_WR;
9521
9522                         if (j == 0)
9523                                 nvram_cmd |= NVRAM_CMD_FIRST;
9524                         else if (j == (pagesize - 4))
9525                                 nvram_cmd |= NVRAM_CMD_LAST;
9526
9527                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9528                                 break;
9529                 }
9530                 if (ret)
9531                         break;
9532         }
9533
9534         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9535         tg3_nvram_exec_cmd(tp, nvram_cmd);
9536
9537         kfree(tmp);
9538
9539         return ret;
9540 }
9541
9542 /* offset and length are dword aligned */
9543 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9544                 u8 *buf)
9545 {
9546         int i, ret = 0;
9547
9548         for (i = 0; i < len; i += 4, offset += 4) {
9549                 u32 data, page_off, phy_addr, nvram_cmd;
9550
9551                 memcpy(&data, buf + i, 4);
9552                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
9553
9554                 page_off = offset % tp->nvram_pagesize;
9555
9556                 phy_addr = tg3_nvram_phys_addr(tp, offset);
9557
9558                 tw32(NVRAM_ADDR, phy_addr);
9559
9560                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9561
9562                 if ((page_off == 0) || (i == 0))
9563                         nvram_cmd |= NVRAM_CMD_FIRST;
9564                 if (page_off == (tp->nvram_pagesize - 4))
9565                         nvram_cmd |= NVRAM_CMD_LAST;
9566
9567                 if (i == (len - 4))
9568                         nvram_cmd |= NVRAM_CMD_LAST;
9569
9570                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9571                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9572                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9573                     (tp->nvram_jedecnum == JEDEC_ST) &&
9574                     (nvram_cmd & NVRAM_CMD_FIRST)) {
9575
9576                         if ((ret = tg3_nvram_exec_cmd(tp,
9577                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
9578                                 NVRAM_CMD_DONE)))
9579
9580                                 break;
9581                 }
9582                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9583                         /* We always do complete word writes to eeprom. */
9584                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9585                 }
9586
9587                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9588                         break;
9589         }
9590         return ret;
9591 }
9592
9593 /* offset and length are dword aligned */
9594 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9595 {
9596         int ret;
9597
9598         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9599                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9600                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
9601                 udelay(40);
9602         }
9603
9604         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9605                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9606         }
9607         else {
9608                 u32 grc_mode;
9609
9610                 ret = tg3_nvram_lock(tp);
9611                 if (ret)
9612                         return ret;
9613
9614                 tg3_enable_nvram_access(tp);
9615                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9616                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9617                         tw32(NVRAM_WRITE1, 0x406);
9618
9619                 grc_mode = tr32(GRC_MODE);
9620                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9621
9622                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9623                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9624
9625                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
9626                                 buf);
9627                 }
9628                 else {
9629                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9630                                 buf);
9631                 }
9632
9633                 grc_mode = tr32(GRC_MODE);
9634                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9635
9636                 tg3_disable_nvram_access(tp);
9637                 tg3_nvram_unlock(tp);
9638         }
9639
9640         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9641                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9642                 udelay(40);
9643         }
9644
9645         return ret;
9646 }
9647
9648 struct subsys_tbl_ent {
9649         u16 subsys_vendor, subsys_devid;
9650         u32 phy_id;
9651 };
9652
9653 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9654         /* Broadcom boards. */
9655         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9656         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9657         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9658         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
9659         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9660         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9661         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
9662         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9663         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9664         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9665         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9666
9667         /* 3com boards. */
9668         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9669         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9670         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
9671         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9672         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9673
9674         /* DELL boards. */
9675         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9676         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9677         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9678         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9679
9680         /* Compaq boards. */
9681         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9682         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9683         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
9684         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9685         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9686
9687         /* IBM boards. */
9688         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9689 };
9690
9691 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9692 {
9693         int i;
9694
9695         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9696                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
9697                      tp->pdev->subsystem_vendor) &&
9698                     (subsys_id_to_phy_id[i].subsys_devid ==
9699                      tp->pdev->subsystem_device))
9700                         return &subsys_id_to_phy_id[i];
9701         }
9702         return NULL;
9703 }
9704
9705 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9706 {
9707         u32 val;
9708         u16 pmcsr;
9709
9710         /* On some early chips the SRAM cannot be accessed in D3hot state,
9711          * so need make sure we're in D0.
9712          */
9713         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9714         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9715         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9716         msleep(1);
9717
9718         /* Make sure register accesses (indirect or otherwise)
9719          * will function correctly.
9720          */
9721         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9722                                tp->misc_host_ctrl);
9723
9724         /* The memory arbiter has to be enabled in order for SRAM accesses
9725          * to succeed.  Normally on powerup the tg3 chip firmware will make
9726          * sure it is enabled, but other entities such as system netboot
9727          * code might disable it.
9728          */
9729         val = tr32(MEMARB_MODE);
9730         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9731
9732         tp->phy_id = PHY_ID_INVALID;
9733         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9734
9735         /* Assume an onboard device by default.  */
9736         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9737
9738         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9739         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9740                 u32 nic_cfg, led_cfg;
9741                 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9742                 int eeprom_phy_serdes = 0;
9743
9744                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9745                 tp->nic_sram_data_cfg = nic_cfg;
9746
9747                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9748                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
9749                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9750                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9751                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9752                     (ver > 0) && (ver < 0x100))
9753                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9754
9755                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9756                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9757                         eeprom_phy_serdes = 1;
9758
9759                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9760                 if (nic_phy_id != 0) {
9761                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9762                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9763
9764                         eeprom_phy_id  = (id1 >> 16) << 10;
9765                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
9766                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9767                 } else
9768                         eeprom_phy_id = 0;
9769
9770                 tp->phy_id = eeprom_phy_id;
9771                 if (eeprom_phy_serdes) {
9772                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9773                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9774                         else
9775                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9776                 }
9777
9778                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9779                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9780                                     SHASTA_EXT_LED_MODE_MASK);
9781                 else
9782                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9783
9784                 switch (led_cfg) {
9785                 default:
9786                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9787                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9788                         break;
9789
9790                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9791                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9792                         break;
9793
9794                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9795                         tp->led_ctrl = LED_CTRL_MODE_MAC;
9796
9797                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9798                          * read on some older 5700/5701 bootcode.
9799                          */
9800                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9801                             ASIC_REV_5700 ||
9802                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
9803                             ASIC_REV_5701)
9804                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9805
9806                         break;
9807
9808                 case SHASTA_EXT_LED_SHARED:
9809                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
9810                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9811                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9812                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9813                                                  LED_CTRL_MODE_PHY_2);
9814                         break;
9815
9816                 case SHASTA_EXT_LED_MAC:
9817                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9818                         break;
9819
9820                 case SHASTA_EXT_LED_COMBO:
9821                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
9822                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9823                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9824                                                  LED_CTRL_MODE_PHY_2);
9825                         break;
9826
9827                 };
9828
9829                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9830                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9831                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9832                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9833
9834                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
9835                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9836                 else
9837                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9838
9839                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9840                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9841                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9842                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9843                 }
9844                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
9845                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
9846
9847                 if (cfg2 & (1 << 17))
9848                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9849
9850                 /* serdes signal pre-emphasis in register 0x590 set by */
9851                 /* bootcode if bit 18 is set */
9852                 if (cfg2 & (1 << 18))
9853                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9854         }
9855 }
9856
9857 static int __devinit tg3_phy_probe(struct tg3 *tp)
9858 {
9859         u32 hw_phy_id_1, hw_phy_id_2;
9860         u32 hw_phy_id, hw_phy_id_masked;
9861         int err;
9862
9863         /* Reading the PHY ID register can conflict with ASF
9864          * firwmare access to the PHY hardware.
9865          */
9866         err = 0;
9867         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9868                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9869         } else {
9870                 /* Now read the physical PHY_ID from the chip and verify
9871                  * that it is sane.  If it doesn't look good, we fall back
9872                  * to either the hard-coded table based PHY_ID and failing
9873                  * that the value found in the eeprom area.
9874                  */
9875                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9876                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9877
9878                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9879                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9880                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9881
9882                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9883         }
9884
9885         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9886                 tp->phy_id = hw_phy_id;
9887                 if (hw_phy_id_masked == PHY_ID_BCM8002)
9888                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9889                 else
9890                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9891         } else {
9892                 if (tp->phy_id != PHY_ID_INVALID) {
9893                         /* Do nothing, phy ID already set up in
9894                          * tg3_get_eeprom_hw_cfg().
9895                          */
9896                 } else {
9897                         struct subsys_tbl_ent *p;
9898
9899                         /* No eeprom signature?  Try the hardcoded
9900                          * subsys device table.
9901                          */
9902                         p = lookup_by_subsys(tp);
9903                         if (!p)
9904                                 return -ENODEV;
9905
9906                         tp->phy_id = p->phy_id;
9907                         if (!tp->phy_id ||
9908                             tp->phy_id == PHY_ID_BCM8002)
9909                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9910                 }
9911         }
9912
9913         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9914             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9915                 u32 bmsr, adv_reg, tg3_ctrl;
9916
9917                 tg3_readphy(tp, MII_BMSR, &bmsr);
9918                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9919                     (bmsr & BMSR_LSTATUS))
9920                         goto skip_phy_reset;
9921
9922                 err = tg3_phy_reset(tp);
9923                 if (err)
9924                         return err;
9925
9926                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9927                            ADVERTISE_100HALF | ADVERTISE_100FULL |
9928                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9929                 tg3_ctrl = 0;
9930                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9931                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9932                                     MII_TG3_CTRL_ADV_1000_FULL);
9933                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9934                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9935                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9936                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
9937                 }
9938
9939                 if (!tg3_copper_is_advertising_all(tp)) {
9940                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9941
9942                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9943                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9944
9945                         tg3_writephy(tp, MII_BMCR,
9946                                      BMCR_ANENABLE | BMCR_ANRESTART);
9947                 }
9948                 tg3_phy_set_wirespeed(tp);
9949
9950                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9951                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9952                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9953         }
9954
9955 skip_phy_reset:
9956         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9957                 err = tg3_init_5401phy_dsp(tp);
9958                 if (err)
9959                         return err;
9960         }
9961
9962         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9963                 err = tg3_init_5401phy_dsp(tp);
9964         }
9965
9966         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9967                 tp->link_config.advertising =
9968                         (ADVERTISED_1000baseT_Half |
9969                          ADVERTISED_1000baseT_Full |
9970                          ADVERTISED_Autoneg |
9971                          ADVERTISED_FIBRE);
9972         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9973                 tp->link_config.advertising &=
9974                         ~(ADVERTISED_1000baseT_Half |
9975                           ADVERTISED_1000baseT_Full);
9976
9977         return err;
9978 }
9979
9980 static void __devinit tg3_read_partno(struct tg3 *tp)
9981 {
9982         unsigned char vpd_data[256];
9983         int i;
9984         u32 magic;
9985
9986         if (tg3_nvram_read_swab(tp, 0x0, &magic))
9987                 goto out_not_found;
9988
9989         if (magic == TG3_EEPROM_MAGIC) {
9990                 for (i = 0; i < 256; i += 4) {
9991                         u32 tmp;
9992
9993                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
9994                                 goto out_not_found;
9995
9996                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
9997                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
9998                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
9999                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10000                 }
10001         } else {
10002                 int vpd_cap;
10003
10004                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10005                 for (i = 0; i < 256; i += 4) {
10006                         u32 tmp, j = 0;
10007                         u16 tmp16;
10008
10009                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10010                                               i);
10011                         while (j++ < 100) {
10012                                 pci_read_config_word(tp->pdev, vpd_cap +
10013                                                      PCI_VPD_ADDR, &tmp16);
10014                                 if (tmp16 & 0x8000)
10015                                         break;
10016                                 msleep(1);
10017                         }
10018                         if (!(tmp16 & 0x8000))
10019                                 goto out_not_found;
10020
10021                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10022                                               &tmp);
10023                         tmp = cpu_to_le32(tmp);
10024                         memcpy(&vpd_data[i], &tmp, 4);
10025                 }
10026         }
10027
10028         /* Now parse and find the part number. */
10029         for (i = 0; i < 256; ) {
10030                 unsigned char val = vpd_data[i];
10031                 int block_end;
10032
10033                 if (val == 0x82 || val == 0x91) {
10034                         i = (i + 3 +
10035                              (vpd_data[i + 1] +
10036                               (vpd_data[i + 2] << 8)));
10037                         continue;
10038                 }
10039
10040                 if (val != 0x90)
10041                         goto out_not_found;
10042
10043                 block_end = (i + 3 +
10044                              (vpd_data[i + 1] +
10045                               (vpd_data[i + 2] << 8)));
10046                 i += 3;
10047                 while (i < block_end) {
10048                         if (vpd_data[i + 0] == 'P' &&
10049                             vpd_data[i + 1] == 'N') {
10050                                 int partno_len = vpd_data[i + 2];
10051
10052                                 if (partno_len > 24)
10053                                         goto out_not_found;
10054
10055                                 memcpy(tp->board_part_number,
10056                                        &vpd_data[i + 3],
10057                                        partno_len);
10058
10059                                 /* Success. */
10060                                 return;
10061                         }
10062                 }
10063
10064                 /* Part number not found. */
10065                 goto out_not_found;
10066         }
10067
10068 out_not_found:
10069         strcpy(tp->board_part_number, "none");
10070 }
10071
10072 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10073 {
10074         u32 val, offset, start;
10075
10076         if (tg3_nvram_read_swab(tp, 0, &val))
10077                 return;
10078
10079         if (val != TG3_EEPROM_MAGIC)
10080                 return;
10081
10082         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10083             tg3_nvram_read_swab(tp, 0x4, &start))
10084                 return;
10085
10086         offset = tg3_nvram_logical_addr(tp, offset);
10087         if (tg3_nvram_read_swab(tp, offset, &val))
10088                 return;
10089
10090         if ((val & 0xfc000000) == 0x0c000000) {
10091                 u32 ver_offset, addr;
10092                 int i;
10093
10094                 if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10095                     tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10096                         return;
10097
10098                 if (val != 0)
10099                         return;
10100
10101                 addr = offset + ver_offset - start;
10102                 for (i = 0; i < 16; i += 4) {
10103                         if (tg3_nvram_read(tp, addr + i, &val))
10104                                 return;
10105
10106                         val = cpu_to_le32(val);
10107                         memcpy(tp->fw_ver + i, &val, 4);
10108                 }
10109         }
10110 }
10111
10112 static int __devinit tg3_get_invariants(struct tg3 *tp)
10113 {
10114         static struct pci_device_id write_reorder_chipsets[] = {
10115                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10116                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10117                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
10118                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10119                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
10120                              PCI_DEVICE_ID_VIA_8385_0) },
10121                 { },
10122         };
10123         u32 misc_ctrl_reg;
10124         u32 cacheline_sz_reg;
10125         u32 pci_state_reg, grc_misc_cfg;
10126         u32 val;
10127         u16 pci_cmd;
10128         int err;
10129
10130         /* Force memory write invalidate off.  If we leave it on,
10131          * then on 5700_BX chips we have to enable a workaround.
10132          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10133          * to match the cacheline size.  The Broadcom driver have this
10134          * workaround but turns MWI off all the times so never uses
10135          * it.  This seems to suggest that the workaround is insufficient.
10136          */
10137         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10138         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10139         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10140
10141         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10142          * has the register indirect write enable bit set before
10143          * we try to access any of the MMIO registers.  It is also
10144          * critical that the PCI-X hw workaround situation is decided
10145          * before that as well.
10146          */
10147         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10148                               &misc_ctrl_reg);
10149
10150         tp->pci_chip_rev_id = (misc_ctrl_reg >>
10151                                MISC_HOST_CTRL_CHIPREV_SHIFT);
10152
10153         /* Wrong chip ID in 5752 A0. This code can be removed later
10154          * as A0 is not in production.
10155          */
10156         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10157                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10158
10159         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10160          * we need to disable memory and use config. cycles
10161          * only to access all registers. The 5702/03 chips
10162          * can mistakenly decode the special cycles from the
10163          * ICH chipsets as memory write cycles, causing corruption
10164          * of register and memory space. Only certain ICH bridges
10165          * will drive special cycles with non-zero data during the
10166          * address phase which can fall within the 5703's address
10167          * range. This is not an ICH bug as the PCI spec allows
10168          * non-zero address during special cycles. However, only
10169          * these ICH bridges are known to drive non-zero addresses
10170          * during special cycles.
10171          *
10172          * Since special cycles do not cross PCI bridges, we only
10173          * enable this workaround if the 5703 is on the secondary
10174          * bus of these ICH bridges.
10175          */
10176         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10177             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10178                 static struct tg3_dev_id {
10179                         u32     vendor;
10180                         u32     device;
10181                         u32     rev;
10182                 } ich_chipsets[] = {
10183                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10184                           PCI_ANY_ID },
10185                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10186                           PCI_ANY_ID },
10187                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10188                           0xa },
10189                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10190                           PCI_ANY_ID },
10191                         { },
10192                 };
10193                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
10194                 struct pci_dev *bridge = NULL;
10195
10196                 while (pci_id->vendor != 0) {
10197                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
10198                                                 bridge);
10199                         if (!bridge) {
10200                                 pci_id++;
10201                                 continue;
10202                         }
10203                         if (pci_id->rev != PCI_ANY_ID) {
10204                                 u8 rev;
10205
10206                                 pci_read_config_byte(bridge, PCI_REVISION_ID,
10207                                                      &rev);
10208                                 if (rev > pci_id->rev)
10209                                         continue;
10210                         }
10211                         if (bridge->subordinate &&
10212                             (bridge->subordinate->number ==
10213                              tp->pdev->bus->number)) {
10214
10215                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10216                                 pci_dev_put(bridge);
10217                                 break;
10218                         }
10219                 }
10220         }
10221
10222         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10223          * DMA addresses > 40-bit. This bridge may have other additional
10224          * 57xx devices behind it in some 4-port NIC designs for example.
10225          * Any tg3 device found behind the bridge will also need the 40-bit
10226          * DMA workaround.
10227          */
10228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10229             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10230                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10231                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10232                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10233         }
10234         else {
10235                 struct pci_dev *bridge = NULL;
10236
10237                 do {
10238                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10239                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
10240                                                 bridge);
10241                         if (bridge && bridge->subordinate &&
10242                             (bridge->subordinate->number <=
10243                              tp->pdev->bus->number) &&
10244                             (bridge->subordinate->subordinate >=
10245                              tp->pdev->bus->number)) {
10246                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10247                                 pci_dev_put(bridge);
10248                                 break;
10249                         }
10250                 } while (bridge);
10251         }
10252
10253         /* Initialize misc host control in PCI block. */
10254         tp->misc_host_ctrl |= (misc_ctrl_reg &
10255                                MISC_HOST_CTRL_CHIPREV);
10256         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10257                                tp->misc_host_ctrl);
10258
10259         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10260                               &cacheline_sz_reg);
10261
10262         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10263         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10264         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10265         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10266
10267         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10268             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10269             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10270             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10271             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10272                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10273
10274         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10275             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10276                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10277
10278         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10279                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10280                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10281                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10282                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10283                 } else {
10284                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
10285                                           TG3_FLG2_HW_TSO_1_BUG;
10286                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10287                                 ASIC_REV_5750 &&
10288                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10289                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
10290                 }
10291         }
10292
10293         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10294             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10295             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10296             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10297             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)
10298                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10299
10300         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
10301                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10302
10303         /* If we have an AMD 762 or VIA K8T800 chipset, write
10304          * reordering to the mailbox registers done by the host
10305          * controller can cause major troubles.  We read back from
10306          * every mailbox register write to force the writes to be
10307          * posted to the chip in order.
10308          */
10309         if (pci_dev_present(write_reorder_chipsets) &&
10310             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10311                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10312
10313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10314             tp->pci_lat_timer < 64) {
10315                 tp->pci_lat_timer = 64;
10316
10317                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10318                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10319                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10320                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10321
10322                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10323                                        cacheline_sz_reg);
10324         }
10325
10326         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10327                               &pci_state_reg);
10328
10329         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10330                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10331
10332                 /* If this is a 5700 BX chipset, and we are in PCI-X
10333                  * mode, enable register write workaround.
10334                  *
10335                  * The workaround is to use indirect register accesses
10336                  * for all chip writes not to mailbox registers.
10337                  */
10338                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10339                         u32 pm_reg;
10340                         u16 pci_cmd;
10341
10342                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10343
10344                         /* The chip can have it's power management PCI config
10345                          * space registers clobbered due to this bug.
10346                          * So explicitly force the chip into D0 here.
10347                          */
10348                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10349                                               &pm_reg);
10350                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10351                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10352                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10353                                                pm_reg);
10354
10355                         /* Also, force SERR#/PERR# in PCI command. */
10356                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10357                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10358                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10359                 }
10360         }
10361
10362         /* 5700 BX chips need to have their TX producer index mailboxes
10363          * written twice to workaround a bug.
10364          */
10365         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10366                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10367
10368         /* Back to back register writes can cause problems on this chip,
10369          * the workaround is to read back all reg writes except those to
10370          * mailbox regs.  See tg3_write_indirect_reg32().
10371          *
10372          * PCI Express 5750_A0 rev chips need this workaround too.
10373          */
10374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10375             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10376              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
10377                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
10378
10379         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10380                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10381         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10382                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10383
10384         /* Chip-specific fixup from Broadcom driver */
10385         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10386             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10387                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10388                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10389         }
10390
10391         /* Default fast path register access methods */
10392         tp->read32 = tg3_read32;
10393         tp->write32 = tg3_write32;
10394         tp->read32_mbox = tg3_read32;
10395         tp->write32_mbox = tg3_write32;
10396         tp->write32_tx_mbox = tg3_write32;
10397         tp->write32_rx_mbox = tg3_write32;
10398
10399         /* Various workaround register access methods */
10400         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10401                 tp->write32 = tg3_write_indirect_reg32;
10402         else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
10403                 tp->write32 = tg3_write_flush_reg32;
10404
10405         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10406             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10407                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10408                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10409                         tp->write32_rx_mbox = tg3_write_flush_reg32;
10410         }
10411
10412         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10413                 tp->read32 = tg3_read_indirect_reg32;
10414                 tp->write32 = tg3_write_indirect_reg32;
10415                 tp->read32_mbox = tg3_read_indirect_mbox;
10416                 tp->write32_mbox = tg3_write_indirect_mbox;
10417                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
10418                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
10419
10420                 iounmap(tp->regs);
10421                 tp->regs = NULL;
10422
10423                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10424                 pci_cmd &= ~PCI_COMMAND_MEMORY;
10425                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10426         }
10427
10428         if (tp->write32 == tg3_write_indirect_reg32 ||
10429             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10430              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10431               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10432                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10433
10434         /* Get eeprom hw config before calling tg3_set_power_state().
10435          * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10436          * determined before calling tg3_set_power_state() so that
10437          * we know whether or not to switch out of Vaux power.
10438          * When the flag is set, it means that GPIO1 is used for eeprom
10439          * write protect and also implies that it is a LOM where GPIOs
10440          * are not used to switch power.
10441          */
10442         tg3_get_eeprom_hw_cfg(tp);
10443
10444         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10445          * GPIO1 driven high will bring 5700's external PHY out of reset.
10446          * It is also used as eeprom write protect on LOMs.
10447          */
10448         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10449         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10450             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10451                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10452                                        GRC_LCLCTRL_GPIO_OUTPUT1);
10453         /* Unused GPIO3 must be driven as output on 5752 because there
10454          * are no pull-up resistors on unused GPIO pins.
10455          */
10456         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10457                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10458
10459         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10460                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10461
10462         /* Force the chip into D0. */
10463         err = tg3_set_power_state(tp, PCI_D0);
10464         if (err) {
10465                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10466                        pci_name(tp->pdev));
10467                 return err;
10468         }
10469
10470         /* 5700 B0 chips do not support checksumming correctly due
10471          * to hardware bugs.
10472          */
10473         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10474                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10475
10476         /* Derive initial jumbo mode from MTU assigned in
10477          * ether_setup() via the alloc_etherdev() call
10478          */
10479         if (tp->dev->mtu > ETH_DATA_LEN &&
10480             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10481                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10482
10483         /* Determine WakeOnLan speed to use. */
10484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10485             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10486             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10487             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10488                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10489         } else {
10490                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10491         }
10492
10493         /* A few boards don't want Ethernet@WireSpeed phy feature */
10494         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10495             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10496              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10497              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10498             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10499                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10500
10501         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10502             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10503                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10504         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10505                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10506
10507         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10508                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10509                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10510                         tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10511                 else
10512                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10513         }
10514
10515         tp->coalesce_mode = 0;
10516         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10517             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10518                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10519
10520         /* Initialize MAC MI mode, polling disabled. */
10521         tw32_f(MAC_MI_MODE, tp->mi_mode);
10522         udelay(80);
10523
10524         /* Initialize data/descriptor byte/word swapping. */
10525         val = tr32(GRC_MODE);
10526         val &= GRC_MODE_HOST_STACKUP;
10527         tw32(GRC_MODE, val | tp->grc_mode);
10528
10529         tg3_switch_clocks(tp);
10530
10531         /* Clear this out for sanity. */
10532         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10533
10534         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10535                               &pci_state_reg);
10536         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10537             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10538                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10539
10540                 if (chiprevid == CHIPREV_ID_5701_A0 ||
10541                     chiprevid == CHIPREV_ID_5701_B0 ||
10542                     chiprevid == CHIPREV_ID_5701_B2 ||
10543                     chiprevid == CHIPREV_ID_5701_B5) {
10544                         void __iomem *sram_base;
10545
10546                         /* Write some dummy words into the SRAM status block
10547                          * area, see if it reads back correctly.  If the return
10548                          * value is bad, force enable the PCIX workaround.
10549                          */
10550                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10551
10552                         writel(0x00000000, sram_base);
10553                         writel(0x00000000, sram_base + 4);
10554                         writel(0xffffffff, sram_base + 4);
10555                         if (readl(sram_base) != 0x00000000)
10556                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10557                 }
10558         }
10559
10560         udelay(50);
10561         tg3_nvram_init(tp);
10562
10563         grc_misc_cfg = tr32(GRC_MISC_CFG);
10564         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10565
10566         /* Broadcom's driver says that CIOBE multisplit has a bug */
10567 #if 0
10568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
10569             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
10570                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
10571                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
10572         }
10573 #endif
10574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10575             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10576              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10577                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10578
10579         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10580             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10581                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10582         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10583                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10584                                       HOSTCC_MODE_CLRTICK_TXBD);
10585
10586                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10587                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10588                                        tp->misc_host_ctrl);
10589         }
10590
10591         /* these are limited to 10/100 only */
10592         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10593              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10594             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10595              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10596              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10597               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10598               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10599             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10600              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10601               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
10602                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10603
10604         err = tg3_phy_probe(tp);
10605         if (err) {
10606                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10607                        pci_name(tp->pdev), err);
10608                 /* ... but do not return immediately ... */
10609         }
10610
10611         tg3_read_partno(tp);
10612         tg3_read_fw_ver(tp);
10613
10614         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10615                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10616         } else {
10617                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10618                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10619                 else
10620                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10621         }
10622
10623         /* 5700 {AX,BX} chips have a broken status block link
10624          * change bit implementation, so we must use the
10625          * status register in those cases.
10626          */
10627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10628                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10629         else
10630                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10631
10632         /* The led_ctrl is set during tg3_phy_probe, here we might
10633          * have to force the link status polling mechanism based
10634          * upon subsystem IDs.
10635          */
10636         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10637             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10638                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10639                                   TG3_FLAG_USE_LINKCHG_REG);
10640         }
10641
10642         /* For all SERDES we poll the MAC status register. */
10643         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10644                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10645         else
10646                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10647
10648         /* All chips before 5787 can get confused if TX buffers
10649          * straddle the 4GB address boundary in some cases.
10650          */
10651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10652             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
10653                 tp->dev->hard_start_xmit = tg3_start_xmit;
10654         else
10655                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10656
10657         tp->rx_offset = 2;
10658         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10659             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10660                 tp->rx_offset = 0;
10661
10662         tp->rx_std_max_post = TG3_RX_RING_SIZE;
10663
10664         /* Increment the rx prod index on the rx std ring by at most
10665          * 8 for these chips to workaround hw errata.
10666          */
10667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10668             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10670                 tp->rx_std_max_post = 8;
10671
10672         /* By default, disable wake-on-lan.  User can change this
10673          * using ETHTOOL_SWOL.
10674          */
10675         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10676
10677         return err;
10678 }
10679
10680 #ifdef CONFIG_SPARC64
10681 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10682 {
10683         struct net_device *dev = tp->dev;
10684         struct pci_dev *pdev = tp->pdev;
10685         struct pcidev_cookie *pcp = pdev->sysdata;
10686
10687         if (pcp != NULL) {
10688                 unsigned char *addr;
10689                 int len;
10690
10691                 addr = of_get_property(pcp->prom_node, "local-mac-address",
10692                                         &len);
10693                 if (addr && len == 6) {
10694                         memcpy(dev->dev_addr, addr, 6);
10695                         memcpy(dev->perm_addr, dev->dev_addr, 6);
10696                         return 0;
10697                 }
10698         }
10699         return -ENODEV;
10700 }
10701
10702 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10703 {
10704         struct net_device *dev = tp->dev;
10705
10706         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10707         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10708         return 0;
10709 }
10710 #endif
10711
10712 static int __devinit tg3_get_device_address(struct tg3 *tp)
10713 {
10714         struct net_device *dev = tp->dev;
10715         u32 hi, lo, mac_offset;
10716         int addr_ok = 0;
10717
10718 #ifdef CONFIG_SPARC64
10719         if (!tg3_get_macaddr_sparc(tp))
10720                 return 0;
10721 #endif
10722
10723         mac_offset = 0x7c;
10724         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10725             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10726                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10727                         mac_offset = 0xcc;
10728                 if (tg3_nvram_lock(tp))
10729                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10730                 else
10731                         tg3_nvram_unlock(tp);
10732         }
10733
10734         /* First try to get it from MAC address mailbox. */
10735         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10736         if ((hi >> 16) == 0x484b) {
10737                 dev->dev_addr[0] = (hi >>  8) & 0xff;
10738                 dev->dev_addr[1] = (hi >>  0) & 0xff;
10739
10740                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10741                 dev->dev_addr[2] = (lo >> 24) & 0xff;
10742                 dev->dev_addr[3] = (lo >> 16) & 0xff;
10743                 dev->dev_addr[4] = (lo >>  8) & 0xff;
10744                 dev->dev_addr[5] = (lo >>  0) & 0xff;
10745
10746                 /* Some old bootcode may report a 0 MAC address in SRAM */
10747                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10748         }
10749         if (!addr_ok) {
10750                 /* Next, try NVRAM. */
10751                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10752                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10753                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
10754                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
10755                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
10756                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
10757                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
10758                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
10759                 }
10760                 /* Finally just fetch it out of the MAC control regs. */
10761                 else {
10762                         hi = tr32(MAC_ADDR_0_HIGH);
10763                         lo = tr32(MAC_ADDR_0_LOW);
10764
10765                         dev->dev_addr[5] = lo & 0xff;
10766                         dev->dev_addr[4] = (lo >> 8) & 0xff;
10767                         dev->dev_addr[3] = (lo >> 16) & 0xff;
10768                         dev->dev_addr[2] = (lo >> 24) & 0xff;
10769                         dev->dev_addr[1] = hi & 0xff;
10770                         dev->dev_addr[0] = (hi >> 8) & 0xff;
10771                 }
10772         }
10773
10774         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10775 #ifdef CONFIG_SPARC64
10776                 if (!tg3_get_default_macaddr_sparc(tp))
10777                         return 0;
10778 #endif
10779                 return -EINVAL;
10780         }
10781         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10782         return 0;
10783 }
10784
10785 #define BOUNDARY_SINGLE_CACHELINE       1
10786 #define BOUNDARY_MULTI_CACHELINE        2
10787
10788 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10789 {
10790         int cacheline_size;
10791         u8 byte;
10792         int goal;
10793
10794         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10795         if (byte == 0)
10796                 cacheline_size = 1024;
10797         else
10798                 cacheline_size = (int) byte * 4;
10799
10800         /* On 5703 and later chips, the boundary bits have no
10801          * effect.
10802          */
10803         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10804             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10805             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10806                 goto out;
10807
10808 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10809         goal = BOUNDARY_MULTI_CACHELINE;
10810 #else
10811 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10812         goal = BOUNDARY_SINGLE_CACHELINE;
10813 #else
10814         goal = 0;
10815 #endif
10816 #endif
10817
10818         if (!goal)
10819                 goto out;
10820
10821         /* PCI controllers on most RISC systems tend to disconnect
10822          * when a device tries to burst across a cache-line boundary.
10823          * Therefore, letting tg3 do so just wastes PCI bandwidth.
10824          *
10825          * Unfortunately, for PCI-E there are only limited
10826          * write-side controls for this, and thus for reads
10827          * we will still get the disconnects.  We'll also waste
10828          * these PCI cycles for both read and write for chips
10829          * other than 5700 and 5701 which do not implement the
10830          * boundary bits.
10831          */
10832         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10833             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10834                 switch (cacheline_size) {
10835                 case 16:
10836                 case 32:
10837                 case 64:
10838                 case 128:
10839                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10840                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10841                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10842                         } else {
10843                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10844                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10845                         }
10846                         break;
10847
10848                 case 256:
10849                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10850                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10851                         break;
10852
10853                 default:
10854                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10855                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10856                         break;
10857                 };
10858         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10859                 switch (cacheline_size) {
10860                 case 16:
10861                 case 32:
10862                 case 64:
10863                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10864                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10865                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10866                                 break;
10867                         }
10868                         /* fallthrough */
10869                 case 128:
10870                 default:
10871                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10872                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10873                         break;
10874                 };
10875         } else {
10876                 switch (cacheline_size) {
10877                 case 16:
10878                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10879                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
10880                                         DMA_RWCTRL_WRITE_BNDRY_16);
10881                                 break;
10882                         }
10883                         /* fallthrough */
10884                 case 32:
10885                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10886                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
10887                                         DMA_RWCTRL_WRITE_BNDRY_32);
10888                                 break;
10889                         }
10890                         /* fallthrough */
10891                 case 64:
10892                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10893                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
10894                                         DMA_RWCTRL_WRITE_BNDRY_64);
10895                                 break;
10896                         }
10897                         /* fallthrough */
10898                 case 128:
10899                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
10900                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
10901                                         DMA_RWCTRL_WRITE_BNDRY_128);
10902                                 break;
10903                         }
10904                         /* fallthrough */
10905                 case 256:
10906                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
10907                                 DMA_RWCTRL_WRITE_BNDRY_256);
10908                         break;
10909                 case 512:
10910                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
10911                                 DMA_RWCTRL_WRITE_BNDRY_512);
10912                         break;
10913                 case 1024:
10914                 default:
10915                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10916                                 DMA_RWCTRL_WRITE_BNDRY_1024);
10917                         break;
10918                 };
10919         }
10920
10921 out:
10922         return val;
10923 }
10924
10925 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10926 {
10927         struct tg3_internal_buffer_desc test_desc;
10928         u32 sram_dma_descs;
10929         int i, ret;
10930
10931         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10932
10933         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10934         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10935         tw32(RDMAC_STATUS, 0);
10936         tw32(WDMAC_STATUS, 0);
10937
10938         tw32(BUFMGR_MODE, 0);
10939         tw32(FTQ_RESET, 0);
10940
10941         test_desc.addr_hi = ((u64) buf_dma) >> 32;
10942         test_desc.addr_lo = buf_dma & 0xffffffff;
10943         test_desc.nic_mbuf = 0x00002100;
10944         test_desc.len = size;
10945
10946         /*
10947          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10948          * the *second* time the tg3 driver was getting loaded after an
10949          * initial scan.
10950          *
10951          * Broadcom tells me:
10952          *   ...the DMA engine is connected to the GRC block and a DMA
10953          *   reset may affect the GRC block in some unpredictable way...
10954          *   The behavior of resets to individual blocks has not been tested.
10955          *
10956          * Broadcom noted the GRC reset will also reset all sub-components.
10957          */
10958         if (to_device) {
10959                 test_desc.cqid_sqid = (13 << 8) | 2;
10960
10961                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10962                 udelay(40);
10963         } else {
10964                 test_desc.cqid_sqid = (16 << 8) | 7;
10965
10966                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10967                 udelay(40);
10968         }
10969         test_desc.flags = 0x00000005;
10970
10971         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10972                 u32 val;
10973
10974                 val = *(((u32 *)&test_desc) + i);
10975                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10976                                        sram_dma_descs + (i * sizeof(u32)));
10977                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10978         }
10979         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10980
10981         if (to_device) {
10982                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10983         } else {
10984                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
10985         }
10986
10987         ret = -ENODEV;
10988         for (i = 0; i < 40; i++) {
10989                 u32 val;
10990
10991                 if (to_device)
10992                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
10993                 else
10994                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
10995                 if ((val & 0xffff) == sram_dma_descs) {
10996                         ret = 0;
10997                         break;
10998                 }
10999
11000                 udelay(100);
11001         }
11002
11003         return ret;
11004 }
11005
11006 #define TEST_BUFFER_SIZE        0x2000
11007
11008 static int __devinit tg3_test_dma(struct tg3 *tp)
11009 {
11010         dma_addr_t buf_dma;
11011         u32 *buf, saved_dma_rwctrl;
11012         int ret;
11013
11014         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11015         if (!buf) {
11016                 ret = -ENOMEM;
11017                 goto out_nofree;
11018         }
11019
11020         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11021                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11022
11023         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11024
11025         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11026                 /* DMA read watermark not used on PCIE */
11027                 tp->dma_rwctrl |= 0x00180000;
11028         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11029                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11030                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11031                         tp->dma_rwctrl |= 0x003f0000;
11032                 else
11033                         tp->dma_rwctrl |= 0x003f000f;
11034         } else {
11035                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11036                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11037                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11038
11039                         /* If the 5704 is behind the EPB bridge, we can
11040                          * do the less restrictive ONE_DMA workaround for
11041                          * better performance.
11042                          */
11043                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11044                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11045                                 tp->dma_rwctrl |= 0x8000;
11046                         else if (ccval == 0x6 || ccval == 0x7)
11047                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11048
11049                         /* Set bit 23 to enable PCIX hw bug fix */
11050                         tp->dma_rwctrl |= 0x009f0000;
11051                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11052                         /* 5780 always in PCIX mode */
11053                         tp->dma_rwctrl |= 0x00144000;
11054                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11055                         /* 5714 always in PCIX mode */
11056                         tp->dma_rwctrl |= 0x00148000;
11057                 } else {
11058                         tp->dma_rwctrl |= 0x001b000f;
11059                 }
11060         }
11061
11062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11063             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11064                 tp->dma_rwctrl &= 0xfffffff0;
11065
11066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11067             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11068                 /* Remove this if it causes problems for some boards. */
11069                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11070
11071                 /* On 5700/5701 chips, we need to set this bit.
11072                  * Otherwise the chip will issue cacheline transactions
11073                  * to streamable DMA memory with not all the byte
11074                  * enables turned on.  This is an error on several
11075                  * RISC PCI controllers, in particular sparc64.
11076                  *
11077                  * On 5703/5704 chips, this bit has been reassigned
11078                  * a different meaning.  In particular, it is used
11079                  * on those chips to enable a PCI-X workaround.
11080                  */
11081                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11082         }
11083
11084         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11085
11086 #if 0
11087         /* Unneeded, already done by tg3_get_invariants.  */
11088         tg3_switch_clocks(tp);
11089 #endif
11090
11091         ret = 0;
11092         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11093             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11094                 goto out;
11095
11096         /* It is best to perform DMA test with maximum write burst size
11097          * to expose the 5700/5701 write DMA bug.
11098          */
11099         saved_dma_rwctrl = tp->dma_rwctrl;
11100         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11101         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11102
11103         while (1) {
11104                 u32 *p = buf, i;
11105
11106                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11107                         p[i] = i;
11108
11109                 /* Send the buffer to the chip. */
11110                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11111                 if (ret) {
11112                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11113                         break;
11114                 }
11115
11116 #if 0
11117                 /* validate data reached card RAM correctly. */
11118                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11119                         u32 val;
11120                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
11121                         if (le32_to_cpu(val) != p[i]) {
11122                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
11123                                 /* ret = -ENODEV here? */
11124                         }
11125                         p[i] = 0;
11126                 }
11127 #endif
11128                 /* Now read it back. */
11129                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11130                 if (ret) {
11131                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11132
11133                         break;
11134                 }
11135
11136                 /* Verify it. */
11137                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11138                         if (p[i] == i)
11139                                 continue;
11140
11141                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11142                             DMA_RWCTRL_WRITE_BNDRY_16) {
11143                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11144                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11145                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11146                                 break;
11147                         } else {
11148                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11149                                 ret = -ENODEV;
11150                                 goto out;
11151                         }
11152                 }
11153
11154                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11155                         /* Success. */
11156                         ret = 0;
11157                         break;
11158                 }
11159         }
11160         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11161             DMA_RWCTRL_WRITE_BNDRY_16) {
11162                 static struct pci_device_id dma_wait_state_chipsets[] = {
11163                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11164                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11165                         { },
11166                 };
11167
11168                 /* DMA test passed without adjusting DMA boundary,
11169                  * now look for chipsets that are known to expose the
11170                  * DMA bug without failing the test.
11171                  */
11172                 if (pci_dev_present(dma_wait_state_chipsets)) {
11173                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11174                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11175                 }
11176                 else
11177                         /* Safe to use the calculated DMA boundary. */
11178                         tp->dma_rwctrl = saved_dma_rwctrl;
11179
11180                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11181         }
11182
11183 out:
11184         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11185 out_nofree:
11186         return ret;
11187 }
11188
11189 static void __devinit tg3_init_link_config(struct tg3 *tp)
11190 {
11191         tp->link_config.advertising =
11192                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11193                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11194                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11195                  ADVERTISED_Autoneg | ADVERTISED_MII);
11196         tp->link_config.speed = SPEED_INVALID;
11197         tp->link_config.duplex = DUPLEX_INVALID;
11198         tp->link_config.autoneg = AUTONEG_ENABLE;
11199         tp->link_config.active_speed = SPEED_INVALID;
11200         tp->link_config.active_duplex = DUPLEX_INVALID;
11201         tp->link_config.phy_is_low_power = 0;
11202         tp->link_config.orig_speed = SPEED_INVALID;
11203         tp->link_config.orig_duplex = DUPLEX_INVALID;
11204         tp->link_config.orig_autoneg = AUTONEG_INVALID;
11205 }
11206
11207 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11208 {
11209         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11210                 tp->bufmgr_config.mbuf_read_dma_low_water =
11211                         DEFAULT_MB_RDMA_LOW_WATER_5705;
11212                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11213                         DEFAULT_MB_MACRX_LOW_WATER_5705;
11214                 tp->bufmgr_config.mbuf_high_water =
11215                         DEFAULT_MB_HIGH_WATER_5705;
11216
11217                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11218                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11219                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11220                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11221                 tp->bufmgr_config.mbuf_high_water_jumbo =
11222                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11223         } else {
11224                 tp->bufmgr_config.mbuf_read_dma_low_water =
11225                         DEFAULT_MB_RDMA_LOW_WATER;
11226                 tp->bufmgr_config.mbuf_mac_rx_low_water =
11227                         DEFAULT_MB_MACRX_LOW_WATER;
11228                 tp->bufmgr_config.mbuf_high_water =
11229                         DEFAULT_MB_HIGH_WATER;
11230
11231                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11232                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11233                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11234                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11235                 tp->bufmgr_config.mbuf_high_water_jumbo =
11236                         DEFAULT_MB_HIGH_WATER_JUMBO;
11237         }
11238
11239         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11240         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11241 }
11242
11243 static char * __devinit tg3_phy_string(struct tg3 *tp)
11244 {
11245         switch (tp->phy_id & PHY_ID_MASK) {
11246         case PHY_ID_BCM5400:    return "5400";
11247         case PHY_ID_BCM5401:    return "5401";
11248         case PHY_ID_BCM5411:    return "5411";
11249         case PHY_ID_BCM5701:    return "5701";
11250         case PHY_ID_BCM5703:    return "5703";
11251         case PHY_ID_BCM5704:    return "5704";
11252         case PHY_ID_BCM5705:    return "5705";
11253         case PHY_ID_BCM5750:    return "5750";
11254         case PHY_ID_BCM5752:    return "5752";
11255         case PHY_ID_BCM5714:    return "5714";
11256         case PHY_ID_BCM5780:    return "5780";
11257         case PHY_ID_BCM5755:    return "5755";
11258         case PHY_ID_BCM5787:    return "5787";
11259         case PHY_ID_BCM8002:    return "8002/serdes";
11260         case 0:                 return "serdes";
11261         default:                return "unknown";
11262         };
11263 }
11264
11265 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11266 {
11267         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11268                 strcpy(str, "PCI Express");
11269                 return str;
11270         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11271                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11272
11273                 strcpy(str, "PCIX:");
11274
11275                 if ((clock_ctrl == 7) ||
11276                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11277                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11278                         strcat(str, "133MHz");
11279                 else if (clock_ctrl == 0)
11280                         strcat(str, "33MHz");
11281                 else if (clock_ctrl == 2)
11282                         strcat(str, "50MHz");
11283                 else if (clock_ctrl == 4)
11284                         strcat(str, "66MHz");
11285                 else if (clock_ctrl == 6)
11286                         strcat(str, "100MHz");
11287         } else {
11288                 strcpy(str, "PCI:");
11289                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11290                         strcat(str, "66MHz");
11291                 else
11292                         strcat(str, "33MHz");
11293         }
11294         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11295                 strcat(str, ":32-bit");
11296         else
11297                 strcat(str, ":64-bit");
11298         return str;
11299 }
11300
11301 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11302 {
11303         struct pci_dev *peer;
11304         unsigned int func, devnr = tp->pdev->devfn & ~7;
11305
11306         for (func = 0; func < 8; func++) {
11307                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
11308                 if (peer && peer != tp->pdev)
11309                         break;
11310                 pci_dev_put(peer);
11311         }
11312         /* 5704 can be configured in single-port mode, set peer to
11313          * tp->pdev in that case.
11314          */
11315         if (!peer) {
11316                 peer = tp->pdev;
11317                 return peer;
11318         }
11319
11320         /*
11321          * We don't need to keep the refcount elevated; there's no way
11322          * to remove one half of this device without removing the other
11323          */
11324         pci_dev_put(peer);
11325
11326         return peer;
11327 }
11328
11329 static void __devinit tg3_init_coal(struct tg3 *tp)
11330 {
11331         struct ethtool_coalesce *ec = &tp->coal;
11332
11333         memset(ec, 0, sizeof(*ec));
11334         ec->cmd = ETHTOOL_GCOALESCE;
11335         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11336         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11337         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11338         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11339         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11340         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11341         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11342         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11343         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11344
11345         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11346                                  HOSTCC_MODE_CLRTICK_TXBD)) {
11347                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11348                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11349                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11350                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11351         }
11352
11353         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11354                 ec->rx_coalesce_usecs_irq = 0;
11355                 ec->tx_coalesce_usecs_irq = 0;
11356                 ec->stats_block_coalesce_usecs = 0;
11357         }
11358 }
11359
11360 static int __devinit tg3_init_one(struct pci_dev *pdev,
11361                                   const struct pci_device_id *ent)
11362 {
11363         static int tg3_version_printed = 0;
11364         unsigned long tg3reg_base, tg3reg_len;
11365         struct net_device *dev;
11366         struct tg3 *tp;
11367         int i, err, pm_cap;
11368         char str[40];
11369         u64 dma_mask, persist_dma_mask;
11370
11371         if (tg3_version_printed++ == 0)
11372                 printk(KERN_INFO "%s", version);
11373
11374         err = pci_enable_device(pdev);
11375         if (err) {
11376                 printk(KERN_ERR PFX "Cannot enable PCI device, "
11377                        "aborting.\n");
11378                 return err;
11379         }
11380
11381         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11382                 printk(KERN_ERR PFX "Cannot find proper PCI device "
11383                        "base address, aborting.\n");
11384                 err = -ENODEV;
11385                 goto err_out_disable_pdev;
11386         }
11387
11388         err = pci_request_regions(pdev, DRV_MODULE_NAME);
11389         if (err) {
11390                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11391                        "aborting.\n");
11392                 goto err_out_disable_pdev;
11393         }
11394
11395         pci_set_master(pdev);
11396
11397         /* Find power-management capability. */
11398         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11399         if (pm_cap == 0) {
11400                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11401                        "aborting.\n");
11402                 err = -EIO;
11403                 goto err_out_free_res;
11404         }
11405
11406         tg3reg_base = pci_resource_start(pdev, 0);
11407         tg3reg_len = pci_resource_len(pdev, 0);
11408
11409         dev = alloc_etherdev(sizeof(*tp));
11410         if (!dev) {
11411                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11412                 err = -ENOMEM;
11413                 goto err_out_free_res;
11414         }
11415
11416         SET_MODULE_OWNER(dev);
11417         SET_NETDEV_DEV(dev, &pdev->dev);
11418
11419 #if TG3_VLAN_TAG_USED
11420         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11421         dev->vlan_rx_register = tg3_vlan_rx_register;
11422         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
11423 #endif
11424
11425         tp = netdev_priv(dev);
11426         tp->pdev = pdev;
11427         tp->dev = dev;
11428         tp->pm_cap = pm_cap;
11429         tp->mac_mode = TG3_DEF_MAC_MODE;
11430         tp->rx_mode = TG3_DEF_RX_MODE;
11431         tp->tx_mode = TG3_DEF_TX_MODE;
11432         tp->mi_mode = MAC_MI_MODE_BASE;
11433         if (tg3_debug > 0)
11434                 tp->msg_enable = tg3_debug;
11435         else
11436                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
11437
11438         /* The word/byte swap controls here control register access byte
11439          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11440          * setting below.
11441          */
11442         tp->misc_host_ctrl =
11443                 MISC_HOST_CTRL_MASK_PCI_INT |
11444                 MISC_HOST_CTRL_WORD_SWAP |
11445                 MISC_HOST_CTRL_INDIR_ACCESS |
11446                 MISC_HOST_CTRL_PCISTATE_RW;
11447
11448         /* The NONFRM (non-frame) byte/word swap controls take effect
11449          * on descriptor entries, anything which isn't packet data.
11450          *
11451          * The StrongARM chips on the board (one for tx, one for rx)
11452          * are running in big-endian mode.
11453          */
11454         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11455                         GRC_MODE_WSWAP_NONFRM_DATA);
11456 #ifdef __BIG_ENDIAN
11457         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11458 #endif
11459         spin_lock_init(&tp->lock);
11460         spin_lock_init(&tp->indirect_lock);
11461         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
11462
11463         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11464         if (tp->regs == 0UL) {
11465                 printk(KERN_ERR PFX "Cannot map device registers, "
11466                        "aborting.\n");
11467                 err = -ENOMEM;
11468                 goto err_out_free_dev;
11469         }
11470
11471         tg3_init_link_config(tp);
11472
11473         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11474         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11475         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11476
11477         dev->open = tg3_open;
11478         dev->stop = tg3_close;
11479         dev->get_stats = tg3_get_stats;
11480         dev->set_multicast_list = tg3_set_rx_mode;
11481         dev->set_mac_address = tg3_set_mac_addr;
11482         dev->do_ioctl = tg3_ioctl;
11483         dev->tx_timeout = tg3_tx_timeout;
11484         dev->poll = tg3_poll;
11485         dev->ethtool_ops = &tg3_ethtool_ops;
11486         dev->weight = 64;
11487         dev->watchdog_timeo = TG3_TX_TIMEOUT;
11488         dev->change_mtu = tg3_change_mtu;
11489         dev->irq = pdev->irq;
11490 #ifdef CONFIG_NET_POLL_CONTROLLER
11491         dev->poll_controller = tg3_poll_controller;
11492 #endif
11493
11494         err = tg3_get_invariants(tp);
11495         if (err) {
11496                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11497                        "aborting.\n");
11498                 goto err_out_iounmap;
11499         }
11500
11501         /* The EPB bridge inside 5714, 5715, and 5780 and any
11502          * device behind the EPB cannot support DMA addresses > 40-bit.
11503          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11504          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11505          * do DMA address check in tg3_start_xmit().
11506          */
11507         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11508                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11509         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11510                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11511 #ifdef CONFIG_HIGHMEM
11512                 dma_mask = DMA_64BIT_MASK;
11513 #endif
11514         } else
11515                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11516
11517         /* Configure DMA attributes. */
11518         if (dma_mask > DMA_32BIT_MASK) {
11519                 err = pci_set_dma_mask(pdev, dma_mask);
11520                 if (!err) {
11521                         dev->features |= NETIF_F_HIGHDMA;
11522                         err = pci_set_consistent_dma_mask(pdev,
11523                                                           persist_dma_mask);
11524                         if (err < 0) {
11525                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
11526                                        "DMA for consistent allocations\n");
11527                                 goto err_out_iounmap;
11528                         }
11529                 }
11530         }
11531         if (err || dma_mask == DMA_32BIT_MASK) {
11532                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11533                 if (err) {
11534                         printk(KERN_ERR PFX "No usable DMA configuration, "
11535                                "aborting.\n");
11536                         goto err_out_iounmap;
11537                 }
11538         }
11539
11540         tg3_init_bufmgr_config(tp);
11541
11542 #if TG3_TSO_SUPPORT != 0
11543         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11544                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11545         }
11546         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11547             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11548             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11549             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11550                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11551         } else {
11552                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11553         }
11554
11555         /* TSO is on by default on chips that support hardware TSO.
11556          * Firmware TSO on older chips gives lower performance, so it
11557          * is off by default, but can be enabled using ethtool.
11558          */
11559         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11560                 dev->features |= NETIF_F_TSO;
11561                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
11562                         dev->features |= NETIF_F_TSO6;
11563         }
11564
11565 #endif
11566
11567         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11568             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11569             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11570                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11571                 tp->rx_pending = 63;
11572         }
11573
11574         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11575             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11576                 tp->pdev_peer = tg3_find_peer(tp);
11577
11578         err = tg3_get_device_address(tp);
11579         if (err) {
11580                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11581                        "aborting.\n");
11582                 goto err_out_iounmap;
11583         }
11584
11585         /*
11586          * Reset chip in case UNDI or EFI driver did not shutdown
11587          * DMA self test will enable WDMAC and we'll see (spurious)
11588          * pending DMA on the PCI bus at that point.
11589          */
11590         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11591             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11592                 pci_save_state(tp->pdev);
11593                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11594                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11595         }
11596
11597         err = tg3_test_dma(tp);
11598         if (err) {
11599                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11600                 goto err_out_iounmap;
11601         }
11602
11603         /* Tigon3 can do ipv4 only... and some chips have buggy
11604          * checksumming.
11605          */
11606         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11607                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11608                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11609                         dev->features |= NETIF_F_HW_CSUM;
11610                 else
11611                         dev->features |= NETIF_F_IP_CSUM;
11612                 dev->features |= NETIF_F_SG;
11613                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11614         } else
11615                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11616
11617         /* flow control autonegotiation is default behavior */
11618         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11619
11620         tg3_init_coal(tp);
11621
11622         /* Now that we have fully setup the chip, save away a snapshot
11623          * of the PCI config space.  We need to restore this after
11624          * GRC_MISC_CFG core clock resets and some resume events.
11625          */
11626         pci_save_state(tp->pdev);
11627
11628         err = register_netdev(dev);
11629         if (err) {
11630                 printk(KERN_ERR PFX "Cannot register net device, "
11631                        "aborting.\n");
11632                 goto err_out_iounmap;
11633         }
11634
11635         pci_set_drvdata(pdev, dev);
11636
11637         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11638                dev->name,
11639                tp->board_part_number,
11640                tp->pci_chip_rev_id,
11641                tg3_phy_string(tp),
11642                tg3_bus_string(tp, str),
11643                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
11644
11645         for (i = 0; i < 6; i++)
11646                 printk("%2.2x%c", dev->dev_addr[i],
11647                        i == 5 ? '\n' : ':');
11648
11649         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11650                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11651                "TSOcap[%d] \n",
11652                dev->name,
11653                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11654                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11655                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11656                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11657                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
11658                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11659                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11660         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11661                dev->name, tp->dma_rwctrl,
11662                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11663                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11664
11665         netif_carrier_off(tp->dev);
11666
11667         return 0;
11668
11669 err_out_iounmap:
11670         if (tp->regs) {
11671                 iounmap(tp->regs);
11672                 tp->regs = NULL;
11673         }
11674
11675 err_out_free_dev:
11676         free_netdev(dev);
11677
11678 err_out_free_res:
11679         pci_release_regions(pdev);
11680
11681 err_out_disable_pdev:
11682         pci_disable_device(pdev);
11683         pci_set_drvdata(pdev, NULL);
11684         return err;
11685 }
11686
11687 static void __devexit tg3_remove_one(struct pci_dev *pdev)
11688 {
11689         struct net_device *dev = pci_get_drvdata(pdev);
11690
11691         if (dev) {
11692                 struct tg3 *tp = netdev_priv(dev);
11693
11694                 flush_scheduled_work();
11695                 unregister_netdev(dev);
11696                 if (tp->regs) {
11697                         iounmap(tp->regs);
11698                         tp->regs = NULL;
11699                 }
11700                 free_netdev(dev);
11701                 pci_release_regions(pdev);
11702                 pci_disable_device(pdev);
11703                 pci_set_drvdata(pdev, NULL);
11704         }
11705 }
11706
11707 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11708 {
11709         struct net_device *dev = pci_get_drvdata(pdev);
11710         struct tg3 *tp = netdev_priv(dev);
11711         int err;
11712
11713         if (!netif_running(dev))
11714                 return 0;
11715
11716         flush_scheduled_work();
11717         tg3_netif_stop(tp);
11718
11719         del_timer_sync(&tp->timer);
11720
11721         tg3_full_lock(tp, 1);
11722         tg3_disable_ints(tp);
11723         tg3_full_unlock(tp);
11724
11725         netif_device_detach(dev);
11726
11727         tg3_full_lock(tp, 0);
11728         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11729         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11730         tg3_full_unlock(tp);
11731
11732         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11733         if (err) {
11734                 tg3_full_lock(tp, 0);
11735
11736                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11737                 if (tg3_restart_hw(tp, 1))
11738                         goto out;
11739
11740                 tp->timer.expires = jiffies + tp->timer_offset;
11741                 add_timer(&tp->timer);
11742
11743                 netif_device_attach(dev);
11744                 tg3_netif_start(tp);
11745
11746 out:
11747                 tg3_full_unlock(tp);
11748         }
11749
11750         return err;
11751 }
11752
11753 static int tg3_resume(struct pci_dev *pdev)
11754 {
11755         struct net_device *dev = pci_get_drvdata(pdev);
11756         struct tg3 *tp = netdev_priv(dev);
11757         int err;
11758
11759         if (!netif_running(dev))
11760                 return 0;
11761
11762         pci_restore_state(tp->pdev);
11763
11764         err = tg3_set_power_state(tp, PCI_D0);
11765         if (err)
11766                 return err;
11767
11768         netif_device_attach(dev);
11769
11770         tg3_full_lock(tp, 0);
11771
11772         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11773         err = tg3_restart_hw(tp, 1);
11774         if (err)
11775                 goto out;
11776
11777         tp->timer.expires = jiffies + tp->timer_offset;
11778         add_timer(&tp->timer);
11779
11780         tg3_netif_start(tp);
11781
11782 out:
11783         tg3_full_unlock(tp);
11784
11785         return err;
11786 }
11787
11788 static struct pci_driver tg3_driver = {
11789         .name           = DRV_MODULE_NAME,
11790         .id_table       = tg3_pci_tbl,
11791         .probe          = tg3_init_one,
11792         .remove         = __devexit_p(tg3_remove_one),
11793         .suspend        = tg3_suspend,
11794         .resume         = tg3_resume
11795 };
11796
11797 static int __init tg3_init(void)
11798 {
11799         return pci_register_driver(&tg3_driver);
11800 }
11801
11802 static void __exit tg3_cleanup(void)
11803 {
11804         pci_unregister_driver(&tg3_driver);
11805 }
11806
11807 module_init(tg3_init);
11808 module_exit(tg3_cleanup);