tg3: 5785 enhancements
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2007 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43
44 #include <net/checksum.h>
45 #include <net/ip.h>
46
47 #include <asm/system.h>
48 #include <asm/io.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
51
52 #ifdef CONFIG_SPARC
53 #include <asm/idprom.h>
54 #include <asm/prom.h>
55 #endif
56
57 #define BAR_0   0
58 #define BAR_2   2
59
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
62 #else
63 #define TG3_VLAN_TAG_USED 0
64 #endif
65
66 #define TG3_TSO_SUPPORT 1
67
68 #include "tg3.h"
69
70 #define DRV_MODULE_NAME         "tg3"
71 #define PFX DRV_MODULE_NAME     ": "
72 #define DRV_MODULE_VERSION      "3.94"
73 #define DRV_MODULE_RELDATE      "August 14, 2008"
74
75 #define TG3_DEF_MAC_MODE        0
76 #define TG3_DEF_RX_MODE         0
77 #define TG3_DEF_TX_MODE         0
78 #define TG3_DEF_MSG_ENABLE        \
79         (NETIF_MSG_DRV          | \
80          NETIF_MSG_PROBE        | \
81          NETIF_MSG_LINK         | \
82          NETIF_MSG_TIMER        | \
83          NETIF_MSG_IFDOWN       | \
84          NETIF_MSG_IFUP         | \
85          NETIF_MSG_RX_ERR       | \
86          NETIF_MSG_TX_ERR)
87
88 /* length of time before we decide the hardware is borked,
89  * and dev->tx_timeout() should be called to fix the problem
90  */
91 #define TG3_TX_TIMEOUT                  (5 * HZ)
92
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU                     60
95 #define TG3_MAX_MTU(tp) \
96         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99  * You can't change the ring sizes, but you can change where you place
100  * them in the NIC onboard memory.
101  */
102 #define TG3_RX_RING_SIZE                512
103 #define TG3_DEF_RX_RING_PENDING         200
104 #define TG3_RX_JUMBO_RING_SIZE          256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
106
107 /* Do not place this n-ring entries value into the tp struct itself,
108  * we really want to expose these constants to GCC so that modulo et
109  * al.  operations are done with shifts and masks instead of with
110  * hw multiply/modulo instructions.  Another solution would be to
111  * replace things like '% foo' with '& (foo - 1)'.
112  */
113 #define TG3_RX_RCB_RING_SIZE(tp)        \
114         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
115
116 #define TG3_TX_RING_SIZE                512
117 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
118
119 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
120                                  TG3_RX_RING_SIZE)
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122                                  TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124                                    TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
126                                  TG3_TX_RING_SIZE)
127 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
131
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
134
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138 #define TG3_NUM_TEST            6
139
140 static char version[] __devinitdata =
141         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
142
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
147
148 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
151
152 static struct pci_device_id tg3_pci_tbl[] = {
153         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
221         {}
222 };
223
224 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
225
226 static const struct {
227         const char string[ETH_GSTRING_LEN];
228 } ethtool_stats_keys[TG3_NUM_STATS] = {
229         { "rx_octets" },
230         { "rx_fragments" },
231         { "rx_ucast_packets" },
232         { "rx_mcast_packets" },
233         { "rx_bcast_packets" },
234         { "rx_fcs_errors" },
235         { "rx_align_errors" },
236         { "rx_xon_pause_rcvd" },
237         { "rx_xoff_pause_rcvd" },
238         { "rx_mac_ctrl_rcvd" },
239         { "rx_xoff_entered" },
240         { "rx_frame_too_long_errors" },
241         { "rx_jabbers" },
242         { "rx_undersize_packets" },
243         { "rx_in_length_errors" },
244         { "rx_out_length_errors" },
245         { "rx_64_or_less_octet_packets" },
246         { "rx_65_to_127_octet_packets" },
247         { "rx_128_to_255_octet_packets" },
248         { "rx_256_to_511_octet_packets" },
249         { "rx_512_to_1023_octet_packets" },
250         { "rx_1024_to_1522_octet_packets" },
251         { "rx_1523_to_2047_octet_packets" },
252         { "rx_2048_to_4095_octet_packets" },
253         { "rx_4096_to_8191_octet_packets" },
254         { "rx_8192_to_9022_octet_packets" },
255
256         { "tx_octets" },
257         { "tx_collisions" },
258
259         { "tx_xon_sent" },
260         { "tx_xoff_sent" },
261         { "tx_flow_control" },
262         { "tx_mac_errors" },
263         { "tx_single_collisions" },
264         { "tx_mult_collisions" },
265         { "tx_deferred" },
266         { "tx_excessive_collisions" },
267         { "tx_late_collisions" },
268         { "tx_collide_2times" },
269         { "tx_collide_3times" },
270         { "tx_collide_4times" },
271         { "tx_collide_5times" },
272         { "tx_collide_6times" },
273         { "tx_collide_7times" },
274         { "tx_collide_8times" },
275         { "tx_collide_9times" },
276         { "tx_collide_10times" },
277         { "tx_collide_11times" },
278         { "tx_collide_12times" },
279         { "tx_collide_13times" },
280         { "tx_collide_14times" },
281         { "tx_collide_15times" },
282         { "tx_ucast_packets" },
283         { "tx_mcast_packets" },
284         { "tx_bcast_packets" },
285         { "tx_carrier_sense_errors" },
286         { "tx_discards" },
287         { "tx_errors" },
288
289         { "dma_writeq_full" },
290         { "dma_write_prioq_full" },
291         { "rxbds_empty" },
292         { "rx_discards" },
293         { "rx_errors" },
294         { "rx_threshold_hit" },
295
296         { "dma_readq_full" },
297         { "dma_read_prioq_full" },
298         { "tx_comp_queue_full" },
299
300         { "ring_set_send_prod_index" },
301         { "ring_status_update" },
302         { "nic_irqs" },
303         { "nic_avoided_irqs" },
304         { "nic_tx_threshold_hit" }
305 };
306
307 static const struct {
308         const char string[ETH_GSTRING_LEN];
309 } ethtool_test_keys[TG3_NUM_TEST] = {
310         { "nvram test     (online) " },
311         { "link test      (online) " },
312         { "register test  (offline)" },
313         { "memory test    (offline)" },
314         { "loopback test  (offline)" },
315         { "interrupt test (offline)" },
316 };
317
318 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
319 {
320         writel(val, tp->regs + off);
321 }
322
323 static u32 tg3_read32(struct tg3 *tp, u32 off)
324 {
325         return (readl(tp->regs + off));
326 }
327
328 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
329 {
330         writel(val, tp->aperegs + off);
331 }
332
333 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
334 {
335         return (readl(tp->aperegs + off));
336 }
337
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
339 {
340         unsigned long flags;
341
342         spin_lock_irqsave(&tp->indirect_lock, flags);
343         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345         spin_unlock_irqrestore(&tp->indirect_lock, flags);
346 }
347
348 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
349 {
350         writel(val, tp->regs + off);
351         readl(tp->regs + off);
352 }
353
354 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
355 {
356         unsigned long flags;
357         u32 val;
358
359         spin_lock_irqsave(&tp->indirect_lock, flags);
360         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362         spin_unlock_irqrestore(&tp->indirect_lock, flags);
363         return val;
364 }
365
366 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
367 {
368         unsigned long flags;
369
370         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372                                        TG3_64BIT_REG_LOW, val);
373                 return;
374         }
375         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377                                        TG3_64BIT_REG_LOW, val);
378                 return;
379         }
380
381         spin_lock_irqsave(&tp->indirect_lock, flags);
382         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384         spin_unlock_irqrestore(&tp->indirect_lock, flags);
385
386         /* In indirect mode when disabling interrupts, we also need
387          * to clear the interrupt bit in the GRC local ctrl register.
388          */
389         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
390             (val == 0x1)) {
391                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
393         }
394 }
395
396 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
397 {
398         unsigned long flags;
399         u32 val;
400
401         spin_lock_irqsave(&tp->indirect_lock, flags);
402         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404         spin_unlock_irqrestore(&tp->indirect_lock, flags);
405         return val;
406 }
407
408 /* usec_wait specifies the wait time in usec when writing to certain registers
409  * where it is unsafe to read back the register without some delay.
410  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
412  */
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
414 {
415         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417                 /* Non-posted methods */
418                 tp->write32(tp, off, val);
419         else {
420                 /* Posted method */
421                 tg3_write32(tp, off, val);
422                 if (usec_wait)
423                         udelay(usec_wait);
424                 tp->read32(tp, off);
425         }
426         /* Wait again after the read for the posted method to guarantee that
427          * the wait time is met.
428          */
429         if (usec_wait)
430                 udelay(usec_wait);
431 }
432
433 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
434 {
435         tp->write32_mbox(tp, off, val);
436         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438                 tp->read32_mbox(tp, off);
439 }
440
441 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
442 {
443         void __iomem *mbox = tp->regs + off;
444         writel(val, mbox);
445         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
446                 writel(val, mbox);
447         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
448                 readl(mbox);
449 }
450
451 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
452 {
453         return (readl(tp->regs + off + GRCMBOX_BASE));
454 }
455
456 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off + GRCMBOX_BASE);
459 }
460
461 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
462 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
463 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
464 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
465 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
466
467 #define tw32(reg,val)           tp->write32(tp, reg, val)
468 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
469 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
470 #define tr32(reg)               tp->read32(tp, reg)
471
472 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
473 {
474         unsigned long flags;
475
476         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
478                 return;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
484
485                 /* Always leave this as zero. */
486                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
487         } else {
488                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
490
491                 /* Always leave this as zero. */
492                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
493         }
494         spin_unlock_irqrestore(&tp->indirect_lock, flags);
495 }
496
497 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
498 {
499         unsigned long flags;
500
501         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
503                 *val = 0;
504                 return;
505         }
506
507         spin_lock_irqsave(&tp->indirect_lock, flags);
508         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
511
512                 /* Always leave this as zero. */
513                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
514         } else {
515                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516                 *val = tr32(TG3PCI_MEM_WIN_DATA);
517
518                 /* Always leave this as zero. */
519                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
520         }
521         spin_unlock_irqrestore(&tp->indirect_lock, flags);
522 }
523
524 static void tg3_ape_lock_init(struct tg3 *tp)
525 {
526         int i;
527
528         /* Make sure the driver hasn't any stale locks. */
529         for (i = 0; i < 8; i++)
530                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531                                 APE_LOCK_GRANT_DRIVER);
532 }
533
534 static int tg3_ape_lock(struct tg3 *tp, int locknum)
535 {
536         int i, off;
537         int ret = 0;
538         u32 status;
539
540         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
541                 return 0;
542
543         switch (locknum) {
544                 case TG3_APE_LOCK_GRC:
545                 case TG3_APE_LOCK_MEM:
546                         break;
547                 default:
548                         return -EINVAL;
549         }
550
551         off = 4 * locknum;
552
553         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
554
555         /* Wait for up to 1 millisecond to acquire lock. */
556         for (i = 0; i < 100; i++) {
557                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558                 if (status == APE_LOCK_GRANT_DRIVER)
559                         break;
560                 udelay(10);
561         }
562
563         if (status != APE_LOCK_GRANT_DRIVER) {
564                 /* Revoke the lock request. */
565                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566                                 APE_LOCK_GRANT_DRIVER);
567
568                 ret = -EBUSY;
569         }
570
571         return ret;
572 }
573
574 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
575 {
576         int off;
577
578         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
579                 return;
580
581         switch (locknum) {
582                 case TG3_APE_LOCK_GRC:
583                 case TG3_APE_LOCK_MEM:
584                         break;
585                 default:
586                         return;
587         }
588
589         off = 4 * locknum;
590         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
591 }
592
593 static void tg3_disable_ints(struct tg3 *tp)
594 {
595         tw32(TG3PCI_MISC_HOST_CTRL,
596              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
597         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
598 }
599
600 static inline void tg3_cond_int(struct tg3 *tp)
601 {
602         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603             (tp->hw_status->status & SD_STATUS_UPDATED))
604                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
605         else
606                 tw32(HOSTCC_MODE, tp->coalesce_mode |
607                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
608 }
609
610 static void tg3_enable_ints(struct tg3 *tp)
611 {
612         tp->irq_sync = 0;
613         wmb();
614
615         tw32(TG3PCI_MISC_HOST_CTRL,
616              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
617         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618                        (tp->last_tag << 24));
619         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621                                (tp->last_tag << 24));
622         tg3_cond_int(tp);
623 }
624
625 static inline unsigned int tg3_has_work(struct tg3 *tp)
626 {
627         struct tg3_hw_status *sblk = tp->hw_status;
628         unsigned int work_exists = 0;
629
630         /* check for phy events */
631         if (!(tp->tg3_flags &
632               (TG3_FLAG_USE_LINKCHG_REG |
633                TG3_FLAG_POLL_SERDES))) {
634                 if (sblk->status & SD_STATUS_LINK_CHG)
635                         work_exists = 1;
636         }
637         /* check for RX/TX work to do */
638         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
640                 work_exists = 1;
641
642         return work_exists;
643 }
644
645 /* tg3_restart_ints
646  *  similar to tg3_enable_ints, but it accurately determines whether there
647  *  is new work pending and can return without flushing the PIO write
648  *  which reenables interrupts
649  */
650 static void tg3_restart_ints(struct tg3 *tp)
651 {
652         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
653                      tp->last_tag << 24);
654         mmiowb();
655
656         /* When doing tagged status, this work check is unnecessary.
657          * The last_tag we write above tells the chip which piece of
658          * work we've completed.
659          */
660         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
661             tg3_has_work(tp))
662                 tw32(HOSTCC_MODE, tp->coalesce_mode |
663                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
664 }
665
666 static inline void tg3_netif_stop(struct tg3 *tp)
667 {
668         tp->dev->trans_start = jiffies; /* prevent tx timeout */
669         napi_disable(&tp->napi);
670         netif_tx_disable(tp->dev);
671 }
672
673 static inline void tg3_netif_start(struct tg3 *tp)
674 {
675         netif_wake_queue(tp->dev);
676         /* NOTE: unconditional netif_wake_queue is only appropriate
677          * so long as all callers are assured to have free tx slots
678          * (such as after tg3_init_hw)
679          */
680         napi_enable(&tp->napi);
681         tp->hw_status->status |= SD_STATUS_UPDATED;
682         tg3_enable_ints(tp);
683 }
684
685 static void tg3_switch_clocks(struct tg3 *tp)
686 {
687         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
688         u32 orig_clock_ctrl;
689
690         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
692                 return;
693
694         orig_clock_ctrl = clock_ctrl;
695         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696                        CLOCK_CTRL_CLKRUN_OENABLE |
697                        0x1f);
698         tp->pci_clock_ctrl = clock_ctrl;
699
700         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
702                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
703                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
704                 }
705         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
706                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
707                             clock_ctrl |
708                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
709                             40);
710                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
712                             40);
713         }
714         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
715 }
716
717 #define PHY_BUSY_LOOPS  5000
718
719 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
720 {
721         u32 frame_val;
722         unsigned int loops;
723         int ret;
724
725         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
726                 tw32_f(MAC_MI_MODE,
727                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
728                 udelay(80);
729         }
730
731         *val = 0x0;
732
733         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734                       MI_COM_PHY_ADDR_MASK);
735         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736                       MI_COM_REG_ADDR_MASK);
737         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
738
739         tw32_f(MAC_MI_COM, frame_val);
740
741         loops = PHY_BUSY_LOOPS;
742         while (loops != 0) {
743                 udelay(10);
744                 frame_val = tr32(MAC_MI_COM);
745
746                 if ((frame_val & MI_COM_BUSY) == 0) {
747                         udelay(5);
748                         frame_val = tr32(MAC_MI_COM);
749                         break;
750                 }
751                 loops -= 1;
752         }
753
754         ret = -EBUSY;
755         if (loops != 0) {
756                 *val = frame_val & MI_COM_DATA_MASK;
757                 ret = 0;
758         }
759
760         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761                 tw32_f(MAC_MI_MODE, tp->mi_mode);
762                 udelay(80);
763         }
764
765         return ret;
766 }
767
768 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
769 {
770         u32 frame_val;
771         unsigned int loops;
772         int ret;
773
774         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
776                 return 0;
777
778         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
779                 tw32_f(MAC_MI_MODE,
780                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
781                 udelay(80);
782         }
783
784         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785                       MI_COM_PHY_ADDR_MASK);
786         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787                       MI_COM_REG_ADDR_MASK);
788         frame_val |= (val & MI_COM_DATA_MASK);
789         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
790
791         tw32_f(MAC_MI_COM, frame_val);
792
793         loops = PHY_BUSY_LOOPS;
794         while (loops != 0) {
795                 udelay(10);
796                 frame_val = tr32(MAC_MI_COM);
797                 if ((frame_val & MI_COM_BUSY) == 0) {
798                         udelay(5);
799                         frame_val = tr32(MAC_MI_COM);
800                         break;
801                 }
802                 loops -= 1;
803         }
804
805         ret = -EBUSY;
806         if (loops != 0)
807                 ret = 0;
808
809         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810                 tw32_f(MAC_MI_MODE, tp->mi_mode);
811                 udelay(80);
812         }
813
814         return ret;
815 }
816
817 static int tg3_bmcr_reset(struct tg3 *tp)
818 {
819         u32 phy_control;
820         int limit, err;
821
822         /* OK, reset it, and poll the BMCR_RESET bit until it
823          * clears or we time out.
824          */
825         phy_control = BMCR_RESET;
826         err = tg3_writephy(tp, MII_BMCR, phy_control);
827         if (err != 0)
828                 return -EBUSY;
829
830         limit = 5000;
831         while (limit--) {
832                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
833                 if (err != 0)
834                         return -EBUSY;
835
836                 if ((phy_control & BMCR_RESET) == 0) {
837                         udelay(40);
838                         break;
839                 }
840                 udelay(10);
841         }
842         if (limit <= 0)
843                 return -EBUSY;
844
845         return 0;
846 }
847
848 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
849 {
850         struct tg3 *tp = (struct tg3 *)bp->priv;
851         u32 val;
852
853         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
854                 return -EAGAIN;
855
856         if (tg3_readphy(tp, reg, &val))
857                 return -EIO;
858
859         return val;
860 }
861
862 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
863 {
864         struct tg3 *tp = (struct tg3 *)bp->priv;
865
866         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867                 return -EAGAIN;
868
869         if (tg3_writephy(tp, reg, val))
870                 return -EIO;
871
872         return 0;
873 }
874
875 static int tg3_mdio_reset(struct mii_bus *bp)
876 {
877         return 0;
878 }
879
880 static void tg3_mdio_config_5785(struct tg3 *tp)
881 {
882         u32 val;
883         struct phy_device *phydev;
884
885         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
886         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
887         case TG3_PHY_ID_BCM50610:
888                 val = MAC_PHYCFG2_50610_LED_MODES;
889                 break;
890         case TG3_PHY_ID_BCMAC131:
891                 val = MAC_PHYCFG2_AC131_LED_MODES;
892                 break;
893         case TG3_PHY_ID_RTL8211C:
894                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
895                 break;
896         case TG3_PHY_ID_RTL8201E:
897                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
898                 break;
899         default:
900                 return;
901         }
902
903         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
904                 tw32(MAC_PHYCFG2, val);
905
906                 val = tr32(MAC_PHYCFG1);
907                 val &= ~MAC_PHYCFG1_RGMII_INT;
908                 tw32(MAC_PHYCFG1, val);
909
910                 return;
911         }
912
913         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
914                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
915                        MAC_PHYCFG2_FMODE_MASK_MASK |
916                        MAC_PHYCFG2_GMODE_MASK_MASK |
917                        MAC_PHYCFG2_ACT_MASK_MASK   |
918                        MAC_PHYCFG2_QUAL_MASK_MASK |
919                        MAC_PHYCFG2_INBAND_ENABLE;
920
921         tw32(MAC_PHYCFG2, val);
922
923         val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
924                                     MAC_PHYCFG1_RGMII_SND_STAT_EN);
925         if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
926                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
927                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
928                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
929                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
930         }
931         tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
932
933         val = tr32(MAC_EXT_RGMII_MODE);
934         val &= ~(MAC_RGMII_MODE_RX_INT_B |
935                  MAC_RGMII_MODE_RX_QUALITY |
936                  MAC_RGMII_MODE_RX_ACTIVITY |
937                  MAC_RGMII_MODE_RX_ENG_DET |
938                  MAC_RGMII_MODE_TX_ENABLE |
939                  MAC_RGMII_MODE_TX_LOWPWR |
940                  MAC_RGMII_MODE_TX_RESET);
941         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
942                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
943                         val |= MAC_RGMII_MODE_RX_INT_B |
944                                MAC_RGMII_MODE_RX_QUALITY |
945                                MAC_RGMII_MODE_RX_ACTIVITY |
946                                MAC_RGMII_MODE_RX_ENG_DET;
947                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
948                         val |= MAC_RGMII_MODE_TX_ENABLE |
949                                MAC_RGMII_MODE_TX_LOWPWR |
950                                MAC_RGMII_MODE_TX_RESET;
951         }
952         tw32(MAC_EXT_RGMII_MODE, val);
953 }
954
955 static void tg3_mdio_start(struct tg3 *tp)
956 {
957         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
958                 mutex_lock(&tp->mdio_bus->mdio_lock);
959                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
960                 mutex_unlock(&tp->mdio_bus->mdio_lock);
961         }
962
963         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
964         tw32_f(MAC_MI_MODE, tp->mi_mode);
965         udelay(80);
966
967         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
969                 tg3_mdio_config_5785(tp);
970 }
971
972 static void tg3_mdio_stop(struct tg3 *tp)
973 {
974         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
975                 mutex_lock(&tp->mdio_bus->mdio_lock);
976                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
977                 mutex_unlock(&tp->mdio_bus->mdio_lock);
978         }
979 }
980
981 static int tg3_mdio_init(struct tg3 *tp)
982 {
983         int i;
984         u32 reg;
985         struct phy_device *phydev;
986
987         tg3_mdio_start(tp);
988
989         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
990             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
991                 return 0;
992
993         tp->mdio_bus = mdiobus_alloc();
994         if (tp->mdio_bus == NULL)
995                 return -ENOMEM;
996
997         tp->mdio_bus->name     = "tg3 mdio bus";
998         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
999                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1000         tp->mdio_bus->priv     = tp;
1001         tp->mdio_bus->parent   = &tp->pdev->dev;
1002         tp->mdio_bus->read     = &tg3_mdio_read;
1003         tp->mdio_bus->write    = &tg3_mdio_write;
1004         tp->mdio_bus->reset    = &tg3_mdio_reset;
1005         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1006         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1007
1008         for (i = 0; i < PHY_MAX_ADDR; i++)
1009                 tp->mdio_bus->irq[i] = PHY_POLL;
1010
1011         /* The bus registration will look for all the PHYs on the mdio bus.
1012          * Unfortunately, it does not ensure the PHY is powered up before
1013          * accessing the PHY ID registers.  A chip reset is the
1014          * quickest way to bring the device back to an operational state..
1015          */
1016         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1017                 tg3_bmcr_reset(tp);
1018
1019         i = mdiobus_register(tp->mdio_bus);
1020         if (i) {
1021                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1022                         tp->dev->name, i);
1023                 mdiobus_free(tp->mdio_bus);
1024                 return i;
1025         }
1026
1027         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1028
1029         if (!phydev || !phydev->drv) {
1030                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1031                 mdiobus_unregister(tp->mdio_bus);
1032                 mdiobus_free(tp->mdio_bus);
1033                 return -ENODEV;
1034         }
1035
1036         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1037         case TG3_PHY_ID_BCM50610:
1038                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1039                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1040                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1041                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1042                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1043                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1044                 /* fallthru */
1045         case TG3_PHY_ID_RTL8211C:
1046                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1047                 break;
1048         case TG3_PHY_ID_RTL8201E:
1049         case TG3_PHY_ID_BCMAC131:
1050                 phydev->interface = PHY_INTERFACE_MODE_MII;
1051                 break;
1052         }
1053
1054         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1055
1056         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1057                 tg3_mdio_config_5785(tp);
1058
1059         return 0;
1060 }
1061
1062 static void tg3_mdio_fini(struct tg3 *tp)
1063 {
1064         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1065                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1066                 mdiobus_unregister(tp->mdio_bus);
1067                 mdiobus_free(tp->mdio_bus);
1068                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1069         }
1070 }
1071
1072 /* tp->lock is held. */
1073 static inline void tg3_generate_fw_event(struct tg3 *tp)
1074 {
1075         u32 val;
1076
1077         val = tr32(GRC_RX_CPU_EVENT);
1078         val |= GRC_RX_CPU_DRIVER_EVENT;
1079         tw32_f(GRC_RX_CPU_EVENT, val);
1080
1081         tp->last_event_jiffies = jiffies;
1082 }
1083
1084 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1085
1086 /* tp->lock is held. */
1087 static void tg3_wait_for_event_ack(struct tg3 *tp)
1088 {
1089         int i;
1090         unsigned int delay_cnt;
1091         long time_remain;
1092
1093         /* If enough time has passed, no wait is necessary. */
1094         time_remain = (long)(tp->last_event_jiffies + 1 +
1095                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1096                       (long)jiffies;
1097         if (time_remain < 0)
1098                 return;
1099
1100         /* Check if we can shorten the wait time. */
1101         delay_cnt = jiffies_to_usecs(time_remain);
1102         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1103                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1104         delay_cnt = (delay_cnt >> 3) + 1;
1105
1106         for (i = 0; i < delay_cnt; i++) {
1107                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1108                         break;
1109                 udelay(8);
1110         }
1111 }
1112
1113 /* tp->lock is held. */
1114 static void tg3_ump_link_report(struct tg3 *tp)
1115 {
1116         u32 reg;
1117         u32 val;
1118
1119         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1120             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1121                 return;
1122
1123         tg3_wait_for_event_ack(tp);
1124
1125         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1126
1127         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1128
1129         val = 0;
1130         if (!tg3_readphy(tp, MII_BMCR, &reg))
1131                 val = reg << 16;
1132         if (!tg3_readphy(tp, MII_BMSR, &reg))
1133                 val |= (reg & 0xffff);
1134         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1135
1136         val = 0;
1137         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1138                 val = reg << 16;
1139         if (!tg3_readphy(tp, MII_LPA, &reg))
1140                 val |= (reg & 0xffff);
1141         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1142
1143         val = 0;
1144         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1145                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1146                         val = reg << 16;
1147                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1148                         val |= (reg & 0xffff);
1149         }
1150         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1151
1152         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1153                 val = reg << 16;
1154         else
1155                 val = 0;
1156         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1157
1158         tg3_generate_fw_event(tp);
1159 }
1160
1161 static void tg3_link_report(struct tg3 *tp)
1162 {
1163         if (!netif_carrier_ok(tp->dev)) {
1164                 if (netif_msg_link(tp))
1165                         printk(KERN_INFO PFX "%s: Link is down.\n",
1166                                tp->dev->name);
1167                 tg3_ump_link_report(tp);
1168         } else if (netif_msg_link(tp)) {
1169                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1170                        tp->dev->name,
1171                        (tp->link_config.active_speed == SPEED_1000 ?
1172                         1000 :
1173                         (tp->link_config.active_speed == SPEED_100 ?
1174                          100 : 10)),
1175                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1176                         "full" : "half"));
1177
1178                 printk(KERN_INFO PFX
1179                        "%s: Flow control is %s for TX and %s for RX.\n",
1180                        tp->dev->name,
1181                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1182                        "on" : "off",
1183                        (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1184                        "on" : "off");
1185                 tg3_ump_link_report(tp);
1186         }
1187 }
1188
1189 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1190 {
1191         u16 miireg;
1192
1193         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1194                 miireg = ADVERTISE_PAUSE_CAP;
1195         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1196                 miireg = ADVERTISE_PAUSE_ASYM;
1197         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1198                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1199         else
1200                 miireg = 0;
1201
1202         return miireg;
1203 }
1204
1205 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1206 {
1207         u16 miireg;
1208
1209         if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1210                 miireg = ADVERTISE_1000XPAUSE;
1211         else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1212                 miireg = ADVERTISE_1000XPSE_ASYM;
1213         else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1214                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1215         else
1216                 miireg = 0;
1217
1218         return miireg;
1219 }
1220
1221 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1222 {
1223         u8 cap = 0;
1224
1225         if (lcladv & ADVERTISE_PAUSE_CAP) {
1226                 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1227                         if (rmtadv & LPA_PAUSE_CAP)
1228                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1229                         else if (rmtadv & LPA_PAUSE_ASYM)
1230                                 cap = TG3_FLOW_CTRL_RX;
1231                 } else {
1232                         if (rmtadv & LPA_PAUSE_CAP)
1233                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1234                 }
1235         } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1236                 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1237                         cap = TG3_FLOW_CTRL_TX;
1238         }
1239
1240         return cap;
1241 }
1242
1243 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1244 {
1245         u8 cap = 0;
1246
1247         if (lcladv & ADVERTISE_1000XPAUSE) {
1248                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1249                         if (rmtadv & LPA_1000XPAUSE)
1250                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1251                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1252                                 cap = TG3_FLOW_CTRL_RX;
1253                 } else {
1254                         if (rmtadv & LPA_1000XPAUSE)
1255                                 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1256                 }
1257         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1258                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1259                         cap = TG3_FLOW_CTRL_TX;
1260         }
1261
1262         return cap;
1263 }
1264
1265 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1266 {
1267         u8 autoneg;
1268         u8 flowctrl = 0;
1269         u32 old_rx_mode = tp->rx_mode;
1270         u32 old_tx_mode = tp->tx_mode;
1271
1272         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1273                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1274         else
1275                 autoneg = tp->link_config.autoneg;
1276
1277         if (autoneg == AUTONEG_ENABLE &&
1278             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1279                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1280                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1281                 else
1282                         flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1283         } else
1284                 flowctrl = tp->link_config.flowctrl;
1285
1286         tp->link_config.active_flowctrl = flowctrl;
1287
1288         if (flowctrl & TG3_FLOW_CTRL_RX)
1289                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1290         else
1291                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1292
1293         if (old_rx_mode != tp->rx_mode)
1294                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1295
1296         if (flowctrl & TG3_FLOW_CTRL_TX)
1297                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1298         else
1299                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1300
1301         if (old_tx_mode != tp->tx_mode)
1302                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1303 }
1304
1305 static void tg3_adjust_link(struct net_device *dev)
1306 {
1307         u8 oldflowctrl, linkmesg = 0;
1308         u32 mac_mode, lcl_adv, rmt_adv;
1309         struct tg3 *tp = netdev_priv(dev);
1310         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1311
1312         spin_lock(&tp->lock);
1313
1314         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1315                                     MAC_MODE_HALF_DUPLEX);
1316
1317         oldflowctrl = tp->link_config.active_flowctrl;
1318
1319         if (phydev->link) {
1320                 lcl_adv = 0;
1321                 rmt_adv = 0;
1322
1323                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1324                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1325                 else
1326                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1327
1328                 if (phydev->duplex == DUPLEX_HALF)
1329                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1330                 else {
1331                         lcl_adv = tg3_advert_flowctrl_1000T(
1332                                   tp->link_config.flowctrl);
1333
1334                         if (phydev->pause)
1335                                 rmt_adv = LPA_PAUSE_CAP;
1336                         if (phydev->asym_pause)
1337                                 rmt_adv |= LPA_PAUSE_ASYM;
1338                 }
1339
1340                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1341         } else
1342                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1343
1344         if (mac_mode != tp->mac_mode) {
1345                 tp->mac_mode = mac_mode;
1346                 tw32_f(MAC_MODE, tp->mac_mode);
1347                 udelay(40);
1348         }
1349
1350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1351                 if (phydev->speed == SPEED_10)
1352                         tw32(MAC_MI_STAT,
1353                              MAC_MI_STAT_10MBPS_MODE |
1354                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1355                 else
1356                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1357         }
1358
1359         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1360                 tw32(MAC_TX_LENGTHS,
1361                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1362                       (6 << TX_LENGTHS_IPG_SHIFT) |
1363                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1364         else
1365                 tw32(MAC_TX_LENGTHS,
1366                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1367                       (6 << TX_LENGTHS_IPG_SHIFT) |
1368                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1369
1370         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1371             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1372             phydev->speed != tp->link_config.active_speed ||
1373             phydev->duplex != tp->link_config.active_duplex ||
1374             oldflowctrl != tp->link_config.active_flowctrl)
1375             linkmesg = 1;
1376
1377         tp->link_config.active_speed = phydev->speed;
1378         tp->link_config.active_duplex = phydev->duplex;
1379
1380         spin_unlock(&tp->lock);
1381
1382         if (linkmesg)
1383                 tg3_link_report(tp);
1384 }
1385
1386 static int tg3_phy_init(struct tg3 *tp)
1387 {
1388         struct phy_device *phydev;
1389
1390         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1391                 return 0;
1392
1393         /* Bring the PHY back to a known state. */
1394         tg3_bmcr_reset(tp);
1395
1396         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1397
1398         /* Attach the MAC to the PHY. */
1399         phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1400                              phydev->dev_flags, phydev->interface);
1401         if (IS_ERR(phydev)) {
1402                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1403                 return PTR_ERR(phydev);
1404         }
1405
1406         /* Mask with MAC supported features. */
1407         switch (phydev->interface) {
1408         case PHY_INTERFACE_MODE_GMII:
1409         case PHY_INTERFACE_MODE_RGMII:
1410                 phydev->supported &= (PHY_GBIT_FEATURES |
1411                                       SUPPORTED_Pause |
1412                                       SUPPORTED_Asym_Pause);
1413                 break;
1414         case PHY_INTERFACE_MODE_MII:
1415                 phydev->supported &= (PHY_BASIC_FEATURES |
1416                                       SUPPORTED_Pause |
1417                                       SUPPORTED_Asym_Pause);
1418                 break;
1419         default:
1420                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1421                 return -EINVAL;
1422         }
1423
1424         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1425
1426         phydev->advertising = phydev->supported;
1427
1428         return 0;
1429 }
1430
1431 static void tg3_phy_start(struct tg3 *tp)
1432 {
1433         struct phy_device *phydev;
1434
1435         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1436                 return;
1437
1438         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1439
1440         if (tp->link_config.phy_is_low_power) {
1441                 tp->link_config.phy_is_low_power = 0;
1442                 phydev->speed = tp->link_config.orig_speed;
1443                 phydev->duplex = tp->link_config.orig_duplex;
1444                 phydev->autoneg = tp->link_config.orig_autoneg;
1445                 phydev->advertising = tp->link_config.orig_advertising;
1446         }
1447
1448         phy_start(phydev);
1449
1450         phy_start_aneg(phydev);
1451 }
1452
1453 static void tg3_phy_stop(struct tg3 *tp)
1454 {
1455         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1456                 return;
1457
1458         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1459 }
1460
1461 static void tg3_phy_fini(struct tg3 *tp)
1462 {
1463         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1464                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1465                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1466         }
1467 }
1468
1469 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1470 {
1471         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1472         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1473 }
1474
1475 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1476 {
1477         u32 phy;
1478
1479         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1480             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1481                 return;
1482
1483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1484                 u32 ephy;
1485
1486                 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1487                         tg3_writephy(tp, MII_TG3_EPHY_TEST,
1488                                      ephy | MII_TG3_EPHY_SHADOW_EN);
1489                         if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1490                                 if (enable)
1491                                         phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1492                                 else
1493                                         phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494                                 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1495                         }
1496                         tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1497                 }
1498         } else {
1499                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1500                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1501                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1502                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1503                         if (enable)
1504                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1505                         else
1506                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1508                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1509                 }
1510         }
1511 }
1512
1513 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1514 {
1515         u32 val;
1516
1517         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1518                 return;
1519
1520         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1521             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1522                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1523                              (val | (1 << 15) | (1 << 4)));
1524 }
1525
1526 static void tg3_phy_apply_otp(struct tg3 *tp)
1527 {
1528         u32 otp, phy;
1529
1530         if (!tp->phy_otp)
1531                 return;
1532
1533         otp = tp->phy_otp;
1534
1535         /* Enable SM_DSP clock and tx 6dB coding. */
1536         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1537               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1538               MII_TG3_AUXCTL_ACTL_TX_6DB;
1539         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1540
1541         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1542         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1543         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1544
1545         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1546               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1547         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1548
1549         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1550         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1551         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1552
1553         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1554         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1555
1556         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1557         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1558
1559         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1560               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1561         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1562
1563         /* Turn off SM_DSP clock. */
1564         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1565               MII_TG3_AUXCTL_ACTL_TX_6DB;
1566         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1567 }
1568
1569 static int tg3_wait_macro_done(struct tg3 *tp)
1570 {
1571         int limit = 100;
1572
1573         while (limit--) {
1574                 u32 tmp32;
1575
1576                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1577                         if ((tmp32 & 0x1000) == 0)
1578                                 break;
1579                 }
1580         }
1581         if (limit <= 0)
1582                 return -EBUSY;
1583
1584         return 0;
1585 }
1586
1587 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1588 {
1589         static const u32 test_pat[4][6] = {
1590         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1591         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1592         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1593         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1594         };
1595         int chan;
1596
1597         for (chan = 0; chan < 4; chan++) {
1598                 int i;
1599
1600                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1601                              (chan * 0x2000) | 0x0200);
1602                 tg3_writephy(tp, 0x16, 0x0002);
1603
1604                 for (i = 0; i < 6; i++)
1605                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1606                                      test_pat[chan][i]);
1607
1608                 tg3_writephy(tp, 0x16, 0x0202);
1609                 if (tg3_wait_macro_done(tp)) {
1610                         *resetp = 1;
1611                         return -EBUSY;
1612                 }
1613
1614                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1615                              (chan * 0x2000) | 0x0200);
1616                 tg3_writephy(tp, 0x16, 0x0082);
1617                 if (tg3_wait_macro_done(tp)) {
1618                         *resetp = 1;
1619                         return -EBUSY;
1620                 }
1621
1622                 tg3_writephy(tp, 0x16, 0x0802);
1623                 if (tg3_wait_macro_done(tp)) {
1624                         *resetp = 1;
1625                         return -EBUSY;
1626                 }
1627
1628                 for (i = 0; i < 6; i += 2) {
1629                         u32 low, high;
1630
1631                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1632                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1633                             tg3_wait_macro_done(tp)) {
1634                                 *resetp = 1;
1635                                 return -EBUSY;
1636                         }
1637                         low &= 0x7fff;
1638                         high &= 0x000f;
1639                         if (low != test_pat[chan][i] ||
1640                             high != test_pat[chan][i+1]) {
1641                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1642                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1643                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1644
1645                                 return -EBUSY;
1646                         }
1647                 }
1648         }
1649
1650         return 0;
1651 }
1652
1653 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1654 {
1655         int chan;
1656
1657         for (chan = 0; chan < 4; chan++) {
1658                 int i;
1659
1660                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1661                              (chan * 0x2000) | 0x0200);
1662                 tg3_writephy(tp, 0x16, 0x0002);
1663                 for (i = 0; i < 6; i++)
1664                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1665                 tg3_writephy(tp, 0x16, 0x0202);
1666                 if (tg3_wait_macro_done(tp))
1667                         return -EBUSY;
1668         }
1669
1670         return 0;
1671 }
1672
1673 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1674 {
1675         u32 reg32, phy9_orig;
1676         int retries, do_phy_reset, err;
1677
1678         retries = 10;
1679         do_phy_reset = 1;
1680         do {
1681                 if (do_phy_reset) {
1682                         err = tg3_bmcr_reset(tp);
1683                         if (err)
1684                                 return err;
1685                         do_phy_reset = 0;
1686                 }
1687
1688                 /* Disable transmitter and interrupt.  */
1689                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1690                         continue;
1691
1692                 reg32 |= 0x3000;
1693                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1694
1695                 /* Set full-duplex, 1000 mbps.  */
1696                 tg3_writephy(tp, MII_BMCR,
1697                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1698
1699                 /* Set to master mode.  */
1700                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1701                         continue;
1702
1703                 tg3_writephy(tp, MII_TG3_CTRL,
1704                              (MII_TG3_CTRL_AS_MASTER |
1705                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1706
1707                 /* Enable SM_DSP_CLOCK and 6dB.  */
1708                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1709
1710                 /* Block the PHY control access.  */
1711                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1712                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1713
1714                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1715                 if (!err)
1716                         break;
1717         } while (--retries);
1718
1719         err = tg3_phy_reset_chanpat(tp);
1720         if (err)
1721                 return err;
1722
1723         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1724         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1725
1726         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1727         tg3_writephy(tp, 0x16, 0x0000);
1728
1729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1730             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1731                 /* Set Extended packet length bit for jumbo frames */
1732                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1733         }
1734         else {
1735                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1736         }
1737
1738         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1739
1740         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1741                 reg32 &= ~0x3000;
1742                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1743         } else if (!err)
1744                 err = -EBUSY;
1745
1746         return err;
1747 }
1748
1749 /* This will reset the tigon3 PHY if there is no valid
1750  * link unless the FORCE argument is non-zero.
1751  */
1752 static int tg3_phy_reset(struct tg3 *tp)
1753 {
1754         u32 cpmuctrl;
1755         u32 phy_status;
1756         int err;
1757
1758         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1759                 u32 val;
1760
1761                 val = tr32(GRC_MISC_CFG);
1762                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1763                 udelay(40);
1764         }
1765         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1766         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1767         if (err != 0)
1768                 return -EBUSY;
1769
1770         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1771                 netif_carrier_off(tp->dev);
1772                 tg3_link_report(tp);
1773         }
1774
1775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1776             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1777             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1778                 err = tg3_phy_reset_5703_4_5(tp);
1779                 if (err)
1780                         return err;
1781                 goto out;
1782         }
1783
1784         cpmuctrl = 0;
1785         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1786             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1787                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1788                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1789                         tw32(TG3_CPMU_CTRL,
1790                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1791         }
1792
1793         err = tg3_bmcr_reset(tp);
1794         if (err)
1795                 return err;
1796
1797         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1798                 u32 phy;
1799
1800                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1801                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1802
1803                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1804         }
1805
1806         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1807             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1808                 u32 val;
1809
1810                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1811                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1812                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1813                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1814                         udelay(40);
1815                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1816                 }
1817
1818                 /* Disable GPHY autopowerdown. */
1819                 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1820                              MII_TG3_MISC_SHDW_WREN |
1821                              MII_TG3_MISC_SHDW_APD_SEL |
1822                              MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1823         }
1824
1825         tg3_phy_apply_otp(tp);
1826
1827 out:
1828         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1829                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1830                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1831                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1832                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1833                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1834                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1835         }
1836         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1837                 tg3_writephy(tp, 0x1c, 0x8d68);
1838                 tg3_writephy(tp, 0x1c, 0x8d68);
1839         }
1840         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1841                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1843                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1844                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1845                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1846                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1847                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1848                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1849         }
1850         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1851                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1852                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1853                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1854                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1855                         tg3_writephy(tp, MII_TG3_TEST1,
1856                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1857                 } else
1858                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1859                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1860         }
1861         /* Set Extended packet length bit (bit 14) on all chips that */
1862         /* support jumbo frames */
1863         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1864                 /* Cannot do read-modify-write on 5401 */
1865                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1866         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1867                 u32 phy_reg;
1868
1869                 /* Set bit 14 with read-modify-write to preserve other bits */
1870                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1871                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1872                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1873         }
1874
1875         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1876          * jumbo frames transmission.
1877          */
1878         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1879                 u32 phy_reg;
1880
1881                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1882                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1883                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1884         }
1885
1886         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1887                 /* adjust output voltage */
1888                 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1889         }
1890
1891         tg3_phy_toggle_automdix(tp, 1);
1892         tg3_phy_set_wirespeed(tp);
1893         return 0;
1894 }
1895
1896 static void tg3_frob_aux_power(struct tg3 *tp)
1897 {
1898         struct tg3 *tp_peer = tp;
1899
1900         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1901                 return;
1902
1903         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1904             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1905                 struct net_device *dev_peer;
1906
1907                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1908                 /* remove_one() may have been run on the peer. */
1909                 if (!dev_peer)
1910                         tp_peer = tp;
1911                 else
1912                         tp_peer = netdev_priv(dev_peer);
1913         }
1914
1915         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1916             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1917             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1918             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1919                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1920                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1921                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1922                                     (GRC_LCLCTRL_GPIO_OE0 |
1923                                      GRC_LCLCTRL_GPIO_OE1 |
1924                                      GRC_LCLCTRL_GPIO_OE2 |
1925                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1926                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1927                                     100);
1928                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1929                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1930                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1931                                              GRC_LCLCTRL_GPIO_OE1 |
1932                                              GRC_LCLCTRL_GPIO_OE2 |
1933                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1934                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1935                                              tp->grc_local_ctrl;
1936                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1937
1938                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1939                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1940
1941                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1942                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1943                 } else {
1944                         u32 no_gpio2;
1945                         u32 grc_local_ctrl = 0;
1946
1947                         if (tp_peer != tp &&
1948                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1949                                 return;
1950
1951                         /* Workaround to prevent overdrawing Amps. */
1952                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1953                             ASIC_REV_5714) {
1954                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1955                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1956                                             grc_local_ctrl, 100);
1957                         }
1958
1959                         /* On 5753 and variants, GPIO2 cannot be used. */
1960                         no_gpio2 = tp->nic_sram_data_cfg &
1961                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1962
1963                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1964                                          GRC_LCLCTRL_GPIO_OE1 |
1965                                          GRC_LCLCTRL_GPIO_OE2 |
1966                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
1967                                          GRC_LCLCTRL_GPIO_OUTPUT2;
1968                         if (no_gpio2) {
1969                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1970                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
1971                         }
1972                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1973                                                     grc_local_ctrl, 100);
1974
1975                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1976
1977                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1978                                                     grc_local_ctrl, 100);
1979
1980                         if (!no_gpio2) {
1981                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1982                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1983                                             grc_local_ctrl, 100);
1984                         }
1985                 }
1986         } else {
1987                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1988                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1989                         if (tp_peer != tp &&
1990                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1991                                 return;
1992
1993                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1994                                     (GRC_LCLCTRL_GPIO_OE1 |
1995                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1996
1997                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1998                                     GRC_LCLCTRL_GPIO_OE1, 100);
1999
2000                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2001                                     (GRC_LCLCTRL_GPIO_OE1 |
2002                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2003                 }
2004         }
2005 }
2006
2007 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2008 {
2009         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2010                 return 1;
2011         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2012                 if (speed != SPEED_10)
2013                         return 1;
2014         } else if (speed == SPEED_10)
2015                 return 1;
2016
2017         return 0;
2018 }
2019
2020 static int tg3_setup_phy(struct tg3 *, int);
2021
2022 #define RESET_KIND_SHUTDOWN     0
2023 #define RESET_KIND_INIT         1
2024 #define RESET_KIND_SUSPEND      2
2025
2026 static void tg3_write_sig_post_reset(struct tg3 *, int);
2027 static int tg3_halt_cpu(struct tg3 *, u32);
2028 static int tg3_nvram_lock(struct tg3 *);
2029 static void tg3_nvram_unlock(struct tg3 *);
2030
2031 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2032 {
2033         u32 val;
2034
2035         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2037                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2038                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2039
2040                         sg_dig_ctrl |=
2041                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2042                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2043                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2044                 }
2045                 return;
2046         }
2047
2048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2049                 tg3_bmcr_reset(tp);
2050                 val = tr32(GRC_MISC_CFG);
2051                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2052                 udelay(40);
2053                 return;
2054         } else if (do_low_power) {
2055                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2056                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2057
2058                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2059                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2060                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2061                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2062                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2063         }
2064
2065         /* The PHY should not be powered down on some chips because
2066          * of bugs.
2067          */
2068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2069             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2070             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2071              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2072                 return;
2073
2074         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2075             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2076                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2077                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2078                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2079                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2080         }
2081
2082         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2083 }
2084
2085 /* tp->lock is held. */
2086 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2087 {
2088         u32 addr_high, addr_low;
2089         int i;
2090
2091         addr_high = ((tp->dev->dev_addr[0] << 8) |
2092                      tp->dev->dev_addr[1]);
2093         addr_low = ((tp->dev->dev_addr[2] << 24) |
2094                     (tp->dev->dev_addr[3] << 16) |
2095                     (tp->dev->dev_addr[4] <<  8) |
2096                     (tp->dev->dev_addr[5] <<  0));
2097         for (i = 0; i < 4; i++) {
2098                 if (i == 1 && skip_mac_1)
2099                         continue;
2100                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2101                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2102         }
2103
2104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2106                 for (i = 0; i < 12; i++) {
2107                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2108                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2109                 }
2110         }
2111
2112         addr_high = (tp->dev->dev_addr[0] +
2113                      tp->dev->dev_addr[1] +
2114                      tp->dev->dev_addr[2] +
2115                      tp->dev->dev_addr[3] +
2116                      tp->dev->dev_addr[4] +
2117                      tp->dev->dev_addr[5]) &
2118                 TX_BACKOFF_SEED_MASK;
2119         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2120 }
2121
2122 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2123 {
2124         u32 misc_host_ctrl;
2125         bool device_should_wake, do_low_power;
2126
2127         /* Make sure register accesses (indirect or otherwise)
2128          * will function correctly.
2129          */
2130         pci_write_config_dword(tp->pdev,
2131                                TG3PCI_MISC_HOST_CTRL,
2132                                tp->misc_host_ctrl);
2133
2134         switch (state) {
2135         case PCI_D0:
2136                 pci_enable_wake(tp->pdev, state, false);
2137                 pci_set_power_state(tp->pdev, PCI_D0);
2138
2139                 /* Switch out of Vaux if it is a NIC */
2140                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2141                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2142
2143                 return 0;
2144
2145         case PCI_D1:
2146         case PCI_D2:
2147         case PCI_D3hot:
2148                 break;
2149
2150         default:
2151                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2152                         tp->dev->name, state);
2153                 return -EINVAL;
2154         }
2155         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2156         tw32(TG3PCI_MISC_HOST_CTRL,
2157              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2158
2159         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2160                              device_may_wakeup(&tp->pdev->dev) &&
2161                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2162
2163         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2164                 do_low_power = false;
2165                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2166                     !tp->link_config.phy_is_low_power) {
2167                         struct phy_device *phydev;
2168                         u32 phyid, advertising;
2169
2170                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2171
2172                         tp->link_config.phy_is_low_power = 1;
2173
2174                         tp->link_config.orig_speed = phydev->speed;
2175                         tp->link_config.orig_duplex = phydev->duplex;
2176                         tp->link_config.orig_autoneg = phydev->autoneg;
2177                         tp->link_config.orig_advertising = phydev->advertising;
2178
2179                         advertising = ADVERTISED_TP |
2180                                       ADVERTISED_Pause |
2181                                       ADVERTISED_Autoneg |
2182                                       ADVERTISED_10baseT_Half;
2183
2184                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2185                             device_should_wake) {
2186                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2187                                         advertising |=
2188                                                 ADVERTISED_100baseT_Half |
2189                                                 ADVERTISED_100baseT_Full |
2190                                                 ADVERTISED_10baseT_Full;
2191                                 else
2192                                         advertising |= ADVERTISED_10baseT_Full;
2193                         }
2194
2195                         phydev->advertising = advertising;
2196
2197                         phy_start_aneg(phydev);
2198
2199                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2200                         if (phyid != TG3_PHY_ID_BCMAC131) {
2201                                 phyid &= TG3_PHY_OUI_MASK;
2202                                 if (phyid == TG3_PHY_OUI_1 &&
2203                                     phyid == TG3_PHY_OUI_2 &&
2204                                     phyid == TG3_PHY_OUI_3)
2205                                         do_low_power = true;
2206                         }
2207                 }
2208         } else {
2209                 do_low_power = false;
2210
2211                 if (tp->link_config.phy_is_low_power == 0) {
2212                         tp->link_config.phy_is_low_power = 1;
2213                         tp->link_config.orig_speed = tp->link_config.speed;
2214                         tp->link_config.orig_duplex = tp->link_config.duplex;
2215                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2216                 }
2217
2218                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2219                         tp->link_config.speed = SPEED_10;
2220                         tp->link_config.duplex = DUPLEX_HALF;
2221                         tp->link_config.autoneg = AUTONEG_ENABLE;
2222                         tg3_setup_phy(tp, 0);
2223                 }
2224         }
2225
2226         __tg3_set_mac_addr(tp, 0);
2227
2228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2229                 u32 val;
2230
2231                 val = tr32(GRC_VCPU_EXT_CTRL);
2232                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2233         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2234                 int i;
2235                 u32 val;
2236
2237                 for (i = 0; i < 200; i++) {
2238                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2239                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2240                                 break;
2241                         msleep(1);
2242                 }
2243         }
2244         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2245                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2246                                                      WOL_DRV_STATE_SHUTDOWN |
2247                                                      WOL_DRV_WOL |
2248                                                      WOL_SET_MAGIC_PKT);
2249
2250         if (device_should_wake) {
2251                 u32 mac_mode;
2252
2253                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2254                         if (do_low_power) {
2255                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2256                                 udelay(40);
2257                         }
2258
2259                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2260                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2261                         else
2262                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2263
2264                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2265                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2266                             ASIC_REV_5700) {
2267                                 u32 speed = (tp->tg3_flags &
2268                                              TG3_FLAG_WOL_SPEED_100MB) ?
2269                                              SPEED_100 : SPEED_10;
2270                                 if (tg3_5700_link_polarity(tp, speed))
2271                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2272                                 else
2273                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2274                         }
2275                 } else {
2276                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2277                 }
2278
2279                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2280                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2281
2282                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2283                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2284                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2285                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2286                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2287                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2288
2289                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2290                         mac_mode |= tp->mac_mode &
2291                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2292                         if (mac_mode & MAC_MODE_APE_TX_EN)
2293                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2294                 }
2295
2296                 tw32_f(MAC_MODE, mac_mode);
2297                 udelay(100);
2298
2299                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2300                 udelay(10);
2301         }
2302
2303         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2304             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2305              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2306                 u32 base_val;
2307
2308                 base_val = tp->pci_clock_ctrl;
2309                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2310                              CLOCK_CTRL_TXCLK_DISABLE);
2311
2312                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2313                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2314         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2315                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2316                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2317                 /* do nothing */
2318         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2319                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2320                 u32 newbits1, newbits2;
2321
2322                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2323                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2324                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2325                                     CLOCK_CTRL_TXCLK_DISABLE |
2326                                     CLOCK_CTRL_ALTCLK);
2327                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2328                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2329                         newbits1 = CLOCK_CTRL_625_CORE;
2330                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2331                 } else {
2332                         newbits1 = CLOCK_CTRL_ALTCLK;
2333                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2334                 }
2335
2336                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2337                             40);
2338
2339                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2340                             40);
2341
2342                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2343                         u32 newbits3;
2344
2345                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2346                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2347                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2348                                             CLOCK_CTRL_TXCLK_DISABLE |
2349                                             CLOCK_CTRL_44MHZ_CORE);
2350                         } else {
2351                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2352                         }
2353
2354                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2355                                     tp->pci_clock_ctrl | newbits3, 40);
2356                 }
2357         }
2358
2359         if (!(device_should_wake) &&
2360             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2361             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2362                 tg3_power_down_phy(tp, do_low_power);
2363
2364         tg3_frob_aux_power(tp);
2365
2366         /* Workaround for unstable PLL clock */
2367         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2368             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2369                 u32 val = tr32(0x7d00);
2370
2371                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2372                 tw32(0x7d00, val);
2373                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2374                         int err;
2375
2376                         err = tg3_nvram_lock(tp);
2377                         tg3_halt_cpu(tp, RX_CPU_BASE);
2378                         if (!err)
2379                                 tg3_nvram_unlock(tp);
2380                 }
2381         }
2382
2383         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2384
2385         if (device_should_wake)
2386                 pci_enable_wake(tp->pdev, state, true);
2387
2388         /* Finally, set the new power state. */
2389         pci_set_power_state(tp->pdev, state);
2390
2391         return 0;
2392 }
2393
2394 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2395 {
2396         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2397         case MII_TG3_AUX_STAT_10HALF:
2398                 *speed = SPEED_10;
2399                 *duplex = DUPLEX_HALF;
2400                 break;
2401
2402         case MII_TG3_AUX_STAT_10FULL:
2403                 *speed = SPEED_10;
2404                 *duplex = DUPLEX_FULL;
2405                 break;
2406
2407         case MII_TG3_AUX_STAT_100HALF:
2408                 *speed = SPEED_100;
2409                 *duplex = DUPLEX_HALF;
2410                 break;
2411
2412         case MII_TG3_AUX_STAT_100FULL:
2413                 *speed = SPEED_100;
2414                 *duplex = DUPLEX_FULL;
2415                 break;
2416
2417         case MII_TG3_AUX_STAT_1000HALF:
2418                 *speed = SPEED_1000;
2419                 *duplex = DUPLEX_HALF;
2420                 break;
2421
2422         case MII_TG3_AUX_STAT_1000FULL:
2423                 *speed = SPEED_1000;
2424                 *duplex = DUPLEX_FULL;
2425                 break;
2426
2427         default:
2428                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2429                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2430                                  SPEED_10;
2431                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2432                                   DUPLEX_HALF;
2433                         break;
2434                 }
2435                 *speed = SPEED_INVALID;
2436                 *duplex = DUPLEX_INVALID;
2437                 break;
2438         }
2439 }
2440
2441 static void tg3_phy_copper_begin(struct tg3 *tp)
2442 {
2443         u32 new_adv;
2444         int i;
2445
2446         if (tp->link_config.phy_is_low_power) {
2447                 /* Entering low power mode.  Disable gigabit and
2448                  * 100baseT advertisements.
2449                  */
2450                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2451
2452                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2453                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2454                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2455                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2456
2457                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2458         } else if (tp->link_config.speed == SPEED_INVALID) {
2459                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2460                         tp->link_config.advertising &=
2461                                 ~(ADVERTISED_1000baseT_Half |
2462                                   ADVERTISED_1000baseT_Full);
2463
2464                 new_adv = ADVERTISE_CSMA;
2465                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2466                         new_adv |= ADVERTISE_10HALF;
2467                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2468                         new_adv |= ADVERTISE_10FULL;
2469                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2470                         new_adv |= ADVERTISE_100HALF;
2471                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2472                         new_adv |= ADVERTISE_100FULL;
2473
2474                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2475
2476                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2477
2478                 if (tp->link_config.advertising &
2479                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2480                         new_adv = 0;
2481                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2482                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2483                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2484                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2485                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2486                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2487                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2488                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2489                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2490                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2491                 } else {
2492                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2493                 }
2494         } else {
2495                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2496                 new_adv |= ADVERTISE_CSMA;
2497
2498                 /* Asking for a specific link mode. */
2499                 if (tp->link_config.speed == SPEED_1000) {
2500                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2501
2502                         if (tp->link_config.duplex == DUPLEX_FULL)
2503                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2504                         else
2505                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2506                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2507                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2508                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2509                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2510                 } else {
2511                         if (tp->link_config.speed == SPEED_100) {
2512                                 if (tp->link_config.duplex == DUPLEX_FULL)
2513                                         new_adv |= ADVERTISE_100FULL;
2514                                 else
2515                                         new_adv |= ADVERTISE_100HALF;
2516                         } else {
2517                                 if (tp->link_config.duplex == DUPLEX_FULL)
2518                                         new_adv |= ADVERTISE_10FULL;
2519                                 else
2520                                         new_adv |= ADVERTISE_10HALF;
2521                         }
2522                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2523
2524                         new_adv = 0;
2525                 }
2526
2527                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2528         }
2529
2530         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2531             tp->link_config.speed != SPEED_INVALID) {
2532                 u32 bmcr, orig_bmcr;
2533
2534                 tp->link_config.active_speed = tp->link_config.speed;
2535                 tp->link_config.active_duplex = tp->link_config.duplex;
2536
2537                 bmcr = 0;
2538                 switch (tp->link_config.speed) {
2539                 default:
2540                 case SPEED_10:
2541                         break;
2542
2543                 case SPEED_100:
2544                         bmcr |= BMCR_SPEED100;
2545                         break;
2546
2547                 case SPEED_1000:
2548                         bmcr |= TG3_BMCR_SPEED1000;
2549                         break;
2550                 }
2551
2552                 if (tp->link_config.duplex == DUPLEX_FULL)
2553                         bmcr |= BMCR_FULLDPLX;
2554
2555                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2556                     (bmcr != orig_bmcr)) {
2557                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2558                         for (i = 0; i < 1500; i++) {
2559                                 u32 tmp;
2560
2561                                 udelay(10);
2562                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2563                                     tg3_readphy(tp, MII_BMSR, &tmp))
2564                                         continue;
2565                                 if (!(tmp & BMSR_LSTATUS)) {
2566                                         udelay(40);
2567                                         break;
2568                                 }
2569                         }
2570                         tg3_writephy(tp, MII_BMCR, bmcr);
2571                         udelay(40);
2572                 }
2573         } else {
2574                 tg3_writephy(tp, MII_BMCR,
2575                              BMCR_ANENABLE | BMCR_ANRESTART);
2576         }
2577 }
2578
2579 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2580 {
2581         int err;
2582
2583         /* Turn off tap power management. */
2584         /* Set Extended packet length bit */
2585         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2586
2587         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2588         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2589
2590         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2591         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2592
2593         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2594         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2595
2596         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2597         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2598
2599         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2600         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2601
2602         udelay(40);
2603
2604         return err;
2605 }
2606
2607 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2608 {
2609         u32 adv_reg, all_mask = 0;
2610
2611         if (mask & ADVERTISED_10baseT_Half)
2612                 all_mask |= ADVERTISE_10HALF;
2613         if (mask & ADVERTISED_10baseT_Full)
2614                 all_mask |= ADVERTISE_10FULL;
2615         if (mask & ADVERTISED_100baseT_Half)
2616                 all_mask |= ADVERTISE_100HALF;
2617         if (mask & ADVERTISED_100baseT_Full)
2618                 all_mask |= ADVERTISE_100FULL;
2619
2620         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2621                 return 0;
2622
2623         if ((adv_reg & all_mask) != all_mask)
2624                 return 0;
2625         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2626                 u32 tg3_ctrl;
2627
2628                 all_mask = 0;
2629                 if (mask & ADVERTISED_1000baseT_Half)
2630                         all_mask |= ADVERTISE_1000HALF;
2631                 if (mask & ADVERTISED_1000baseT_Full)
2632                         all_mask |= ADVERTISE_1000FULL;
2633
2634                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2635                         return 0;
2636
2637                 if ((tg3_ctrl & all_mask) != all_mask)
2638                         return 0;
2639         }
2640         return 1;
2641 }
2642
2643 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2644 {
2645         u32 curadv, reqadv;
2646
2647         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2648                 return 1;
2649
2650         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2651         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2652
2653         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2654                 if (curadv != reqadv)
2655                         return 0;
2656
2657                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2658                         tg3_readphy(tp, MII_LPA, rmtadv);
2659         } else {
2660                 /* Reprogram the advertisement register, even if it
2661                  * does not affect the current link.  If the link
2662                  * gets renegotiated in the future, we can save an
2663                  * additional renegotiation cycle by advertising
2664                  * it correctly in the first place.
2665                  */
2666                 if (curadv != reqadv) {
2667                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2668                                      ADVERTISE_PAUSE_ASYM);
2669                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2670                 }
2671         }
2672
2673         return 1;
2674 }
2675
2676 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2677 {
2678         int current_link_up;
2679         u32 bmsr, dummy;
2680         u32 lcl_adv, rmt_adv;
2681         u16 current_speed;
2682         u8 current_duplex;
2683         int i, err;
2684
2685         tw32(MAC_EVENT, 0);
2686
2687         tw32_f(MAC_STATUS,
2688              (MAC_STATUS_SYNC_CHANGED |
2689               MAC_STATUS_CFG_CHANGED |
2690               MAC_STATUS_MI_COMPLETION |
2691               MAC_STATUS_LNKSTATE_CHANGED));
2692         udelay(40);
2693
2694         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2695                 tw32_f(MAC_MI_MODE,
2696                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2697                 udelay(80);
2698         }
2699
2700         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2701
2702         /* Some third-party PHYs need to be reset on link going
2703          * down.
2704          */
2705         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2706              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2707              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2708             netif_carrier_ok(tp->dev)) {
2709                 tg3_readphy(tp, MII_BMSR, &bmsr);
2710                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2711                     !(bmsr & BMSR_LSTATUS))
2712                         force_reset = 1;
2713         }
2714         if (force_reset)
2715                 tg3_phy_reset(tp);
2716
2717         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2718                 tg3_readphy(tp, MII_BMSR, &bmsr);
2719                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2720                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2721                         bmsr = 0;
2722
2723                 if (!(bmsr & BMSR_LSTATUS)) {
2724                         err = tg3_init_5401phy_dsp(tp);
2725                         if (err)
2726                                 return err;
2727
2728                         tg3_readphy(tp, MII_BMSR, &bmsr);
2729                         for (i = 0; i < 1000; i++) {
2730                                 udelay(10);
2731                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2732                                     (bmsr & BMSR_LSTATUS)) {
2733                                         udelay(40);
2734                                         break;
2735                                 }
2736                         }
2737
2738                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2739                             !(bmsr & BMSR_LSTATUS) &&
2740                             tp->link_config.active_speed == SPEED_1000) {
2741                                 err = tg3_phy_reset(tp);
2742                                 if (!err)
2743                                         err = tg3_init_5401phy_dsp(tp);
2744                                 if (err)
2745                                         return err;
2746                         }
2747                 }
2748         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2749                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2750                 /* 5701 {A0,B0} CRC bug workaround */
2751                 tg3_writephy(tp, 0x15, 0x0a75);
2752                 tg3_writephy(tp, 0x1c, 0x8c68);
2753                 tg3_writephy(tp, 0x1c, 0x8d68);
2754                 tg3_writephy(tp, 0x1c, 0x8c68);
2755         }
2756
2757         /* Clear pending interrupts... */
2758         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2759         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2760
2761         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2762                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2763         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2764                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2765
2766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2767             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2768                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2769                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2770                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2771                 else
2772                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2773         }
2774
2775         current_link_up = 0;
2776         current_speed = SPEED_INVALID;
2777         current_duplex = DUPLEX_INVALID;
2778
2779         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2780                 u32 val;
2781
2782                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2783                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2784                 if (!(val & (1 << 10))) {
2785                         val |= (1 << 10);
2786                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2787                         goto relink;
2788                 }
2789         }
2790
2791         bmsr = 0;
2792         for (i = 0; i < 100; i++) {
2793                 tg3_readphy(tp, MII_BMSR, &bmsr);
2794                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2795                     (bmsr & BMSR_LSTATUS))
2796                         break;
2797                 udelay(40);
2798         }
2799
2800         if (bmsr & BMSR_LSTATUS) {
2801                 u32 aux_stat, bmcr;
2802
2803                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2804                 for (i = 0; i < 2000; i++) {
2805                         udelay(10);
2806                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2807                             aux_stat)
2808                                 break;
2809                 }
2810
2811                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2812                                              &current_speed,
2813                                              &current_duplex);
2814
2815                 bmcr = 0;
2816                 for (i = 0; i < 200; i++) {
2817                         tg3_readphy(tp, MII_BMCR, &bmcr);
2818                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
2819                                 continue;
2820                         if (bmcr && bmcr != 0x7fff)
2821                                 break;
2822                         udelay(10);
2823                 }
2824
2825                 lcl_adv = 0;
2826                 rmt_adv = 0;
2827
2828                 tp->link_config.active_speed = current_speed;
2829                 tp->link_config.active_duplex = current_duplex;
2830
2831                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2832                         if ((bmcr & BMCR_ANENABLE) &&
2833                             tg3_copper_is_advertising_all(tp,
2834                                                 tp->link_config.advertising)) {
2835                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2836                                                                   &rmt_adv))
2837                                         current_link_up = 1;
2838                         }
2839                 } else {
2840                         if (!(bmcr & BMCR_ANENABLE) &&
2841                             tp->link_config.speed == current_speed &&
2842                             tp->link_config.duplex == current_duplex &&
2843                             tp->link_config.flowctrl ==
2844                             tp->link_config.active_flowctrl) {
2845                                 current_link_up = 1;
2846                         }
2847                 }
2848
2849                 if (current_link_up == 1 &&
2850                     tp->link_config.active_duplex == DUPLEX_FULL)
2851                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2852         }
2853
2854 relink:
2855         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2856                 u32 tmp;
2857
2858                 tg3_phy_copper_begin(tp);
2859
2860                 tg3_readphy(tp, MII_BMSR, &tmp);
2861                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2862                     (tmp & BMSR_LSTATUS))
2863                         current_link_up = 1;
2864         }
2865
2866         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2867         if (current_link_up == 1) {
2868                 if (tp->link_config.active_speed == SPEED_100 ||
2869                     tp->link_config.active_speed == SPEED_10)
2870                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2871                 else
2872                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2873         } else
2874                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2875
2876         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2877         if (tp->link_config.active_duplex == DUPLEX_HALF)
2878                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2879
2880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2881                 if (current_link_up == 1 &&
2882                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2883                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2884                 else
2885                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2886         }
2887
2888         /* ??? Without this setting Netgear GA302T PHY does not
2889          * ??? send/receive packets...
2890          */
2891         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2892             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2893                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2894                 tw32_f(MAC_MI_MODE, tp->mi_mode);
2895                 udelay(80);
2896         }
2897
2898         tw32_f(MAC_MODE, tp->mac_mode);
2899         udelay(40);
2900
2901         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2902                 /* Polled via timer. */
2903                 tw32_f(MAC_EVENT, 0);
2904         } else {
2905                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2906         }
2907         udelay(40);
2908
2909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2910             current_link_up == 1 &&
2911             tp->link_config.active_speed == SPEED_1000 &&
2912             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2913              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2914                 udelay(120);
2915                 tw32_f(MAC_STATUS,
2916                      (MAC_STATUS_SYNC_CHANGED |
2917                       MAC_STATUS_CFG_CHANGED));
2918                 udelay(40);
2919                 tg3_write_mem(tp,
2920                               NIC_SRAM_FIRMWARE_MBOX,
2921                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2922         }
2923
2924         if (current_link_up != netif_carrier_ok(tp->dev)) {
2925                 if (current_link_up)
2926                         netif_carrier_on(tp->dev);
2927                 else
2928                         netif_carrier_off(tp->dev);
2929                 tg3_link_report(tp);
2930         }
2931
2932         return 0;
2933 }
2934
2935 struct tg3_fiber_aneginfo {
2936         int state;
2937 #define ANEG_STATE_UNKNOWN              0
2938 #define ANEG_STATE_AN_ENABLE            1
2939 #define ANEG_STATE_RESTART_INIT         2
2940 #define ANEG_STATE_RESTART              3
2941 #define ANEG_STATE_DISABLE_LINK_OK      4
2942 #define ANEG_STATE_ABILITY_DETECT_INIT  5
2943 #define ANEG_STATE_ABILITY_DETECT       6
2944 #define ANEG_STATE_ACK_DETECT_INIT      7
2945 #define ANEG_STATE_ACK_DETECT           8
2946 #define ANEG_STATE_COMPLETE_ACK_INIT    9
2947 #define ANEG_STATE_COMPLETE_ACK         10
2948 #define ANEG_STATE_IDLE_DETECT_INIT     11
2949 #define ANEG_STATE_IDLE_DETECT          12
2950 #define ANEG_STATE_LINK_OK              13
2951 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
2952 #define ANEG_STATE_NEXT_PAGE_WAIT       15
2953
2954         u32 flags;
2955 #define MR_AN_ENABLE            0x00000001
2956 #define MR_RESTART_AN           0x00000002
2957 #define MR_AN_COMPLETE          0x00000004
2958 #define MR_PAGE_RX              0x00000008
2959 #define MR_NP_LOADED            0x00000010
2960 #define MR_TOGGLE_TX            0x00000020
2961 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
2962 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
2963 #define MR_LP_ADV_SYM_PAUSE     0x00000100
2964 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
2965 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2966 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2967 #define MR_LP_ADV_NEXT_PAGE     0x00001000
2968 #define MR_TOGGLE_RX            0x00002000
2969 #define MR_NP_RX                0x00004000
2970
2971 #define MR_LINK_OK              0x80000000
2972
2973         unsigned long link_time, cur_time;
2974
2975         u32 ability_match_cfg;
2976         int ability_match_count;
2977
2978         char ability_match, idle_match, ack_match;
2979
2980         u32 txconfig, rxconfig;
2981 #define ANEG_CFG_NP             0x00000080
2982 #define ANEG_CFG_ACK            0x00000040
2983 #define ANEG_CFG_RF2            0x00000020
2984 #define ANEG_CFG_RF1            0x00000010
2985 #define ANEG_CFG_PS2            0x00000001
2986 #define ANEG_CFG_PS1            0x00008000
2987 #define ANEG_CFG_HD             0x00004000
2988 #define ANEG_CFG_FD             0x00002000
2989 #define ANEG_CFG_INVAL          0x00001f06
2990
2991 };
2992 #define ANEG_OK         0
2993 #define ANEG_DONE       1
2994 #define ANEG_TIMER_ENAB 2
2995 #define ANEG_FAILED     -1
2996
2997 #define ANEG_STATE_SETTLE_TIME  10000
2998
2999 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3000                                    struct tg3_fiber_aneginfo *ap)
3001 {
3002         u16 flowctrl;
3003         unsigned long delta;
3004         u32 rx_cfg_reg;
3005         int ret;
3006
3007         if (ap->state == ANEG_STATE_UNKNOWN) {
3008                 ap->rxconfig = 0;
3009                 ap->link_time = 0;
3010                 ap->cur_time = 0;
3011                 ap->ability_match_cfg = 0;
3012                 ap->ability_match_count = 0;
3013                 ap->ability_match = 0;
3014                 ap->idle_match = 0;
3015                 ap->ack_match = 0;
3016         }
3017         ap->cur_time++;
3018
3019         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3020                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3021
3022                 if (rx_cfg_reg != ap->ability_match_cfg) {
3023                         ap->ability_match_cfg = rx_cfg_reg;
3024                         ap->ability_match = 0;
3025                         ap->ability_match_count = 0;
3026                 } else {
3027                         if (++ap->ability_match_count > 1) {
3028                                 ap->ability_match = 1;
3029                                 ap->ability_match_cfg = rx_cfg_reg;
3030                         }
3031                 }
3032                 if (rx_cfg_reg & ANEG_CFG_ACK)
3033                         ap->ack_match = 1;
3034                 else
3035                         ap->ack_match = 0;
3036
3037                 ap->idle_match = 0;
3038         } else {
3039                 ap->idle_match = 1;
3040                 ap->ability_match_cfg = 0;
3041                 ap->ability_match_count = 0;
3042                 ap->ability_match = 0;
3043                 ap->ack_match = 0;
3044
3045                 rx_cfg_reg = 0;
3046         }
3047
3048         ap->rxconfig = rx_cfg_reg;
3049         ret = ANEG_OK;
3050
3051         switch(ap->state) {
3052         case ANEG_STATE_UNKNOWN:
3053                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3054                         ap->state = ANEG_STATE_AN_ENABLE;
3055
3056                 /* fallthru */
3057         case ANEG_STATE_AN_ENABLE:
3058                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3059                 if (ap->flags & MR_AN_ENABLE) {
3060                         ap->link_time = 0;
3061                         ap->cur_time = 0;
3062                         ap->ability_match_cfg = 0;
3063                         ap->ability_match_count = 0;
3064                         ap->ability_match = 0;
3065                         ap->idle_match = 0;
3066                         ap->ack_match = 0;
3067
3068                         ap->state = ANEG_STATE_RESTART_INIT;
3069                 } else {
3070                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3071                 }
3072                 break;
3073
3074         case ANEG_STATE_RESTART_INIT:
3075                 ap->link_time = ap->cur_time;
3076                 ap->flags &= ~(MR_NP_LOADED);
3077                 ap->txconfig = 0;
3078                 tw32(MAC_TX_AUTO_NEG, 0);
3079                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3080                 tw32_f(MAC_MODE, tp->mac_mode);
3081                 udelay(40);
3082
3083                 ret = ANEG_TIMER_ENAB;
3084                 ap->state = ANEG_STATE_RESTART;
3085
3086                 /* fallthru */
3087         case ANEG_STATE_RESTART:
3088                 delta = ap->cur_time - ap->link_time;
3089                 if (delta > ANEG_STATE_SETTLE_TIME) {
3090                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3091                 } else {
3092                         ret = ANEG_TIMER_ENAB;
3093                 }
3094                 break;
3095
3096         case ANEG_STATE_DISABLE_LINK_OK:
3097                 ret = ANEG_DONE;
3098                 break;
3099
3100         case ANEG_STATE_ABILITY_DETECT_INIT:
3101                 ap->flags &= ~(MR_TOGGLE_TX);
3102                 ap->txconfig = ANEG_CFG_FD;
3103                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3104                 if (flowctrl & ADVERTISE_1000XPAUSE)
3105                         ap->txconfig |= ANEG_CFG_PS1;
3106                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3107                         ap->txconfig |= ANEG_CFG_PS2;
3108                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3109                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3110                 tw32_f(MAC_MODE, tp->mac_mode);
3111                 udelay(40);
3112
3113                 ap->state = ANEG_STATE_ABILITY_DETECT;
3114                 break;
3115
3116         case ANEG_STATE_ABILITY_DETECT:
3117                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3118                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3119                 }
3120                 break;
3121
3122         case ANEG_STATE_ACK_DETECT_INIT:
3123                 ap->txconfig |= ANEG_CFG_ACK;
3124                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3125                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3126                 tw32_f(MAC_MODE, tp->mac_mode);
3127                 udelay(40);
3128
3129                 ap->state = ANEG_STATE_ACK_DETECT;
3130
3131                 /* fallthru */
3132         case ANEG_STATE_ACK_DETECT:
3133                 if (ap->ack_match != 0) {
3134                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3135                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3136                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3137                         } else {
3138                                 ap->state = ANEG_STATE_AN_ENABLE;
3139                         }
3140                 } else if (ap->ability_match != 0 &&
3141                            ap->rxconfig == 0) {
3142                         ap->state = ANEG_STATE_AN_ENABLE;
3143                 }
3144                 break;
3145
3146         case ANEG_STATE_COMPLETE_ACK_INIT:
3147                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3148                         ret = ANEG_FAILED;
3149                         break;
3150                 }
3151                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3152                                MR_LP_ADV_HALF_DUPLEX |
3153                                MR_LP_ADV_SYM_PAUSE |
3154                                MR_LP_ADV_ASYM_PAUSE |
3155                                MR_LP_ADV_REMOTE_FAULT1 |
3156                                MR_LP_ADV_REMOTE_FAULT2 |
3157                                MR_LP_ADV_NEXT_PAGE |
3158                                MR_TOGGLE_RX |
3159                                MR_NP_RX);
3160                 if (ap->rxconfig & ANEG_CFG_FD)
3161                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3162                 if (ap->rxconfig & ANEG_CFG_HD)
3163                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3164                 if (ap->rxconfig & ANEG_CFG_PS1)
3165                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3166                 if (ap->rxconfig & ANEG_CFG_PS2)
3167                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3168                 if (ap->rxconfig & ANEG_CFG_RF1)
3169                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3170                 if (ap->rxconfig & ANEG_CFG_RF2)
3171                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3172                 if (ap->rxconfig & ANEG_CFG_NP)
3173                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3174
3175                 ap->link_time = ap->cur_time;
3176
3177                 ap->flags ^= (MR_TOGGLE_TX);
3178                 if (ap->rxconfig & 0x0008)
3179                         ap->flags |= MR_TOGGLE_RX;
3180                 if (ap->rxconfig & ANEG_CFG_NP)
3181                         ap->flags |= MR_NP_RX;
3182                 ap->flags |= MR_PAGE_RX;
3183
3184                 ap->state = ANEG_STATE_COMPLETE_ACK;
3185                 ret = ANEG_TIMER_ENAB;
3186                 break;
3187
3188         case ANEG_STATE_COMPLETE_ACK:
3189                 if (ap->ability_match != 0 &&
3190                     ap->rxconfig == 0) {
3191                         ap->state = ANEG_STATE_AN_ENABLE;
3192                         break;
3193                 }
3194                 delta = ap->cur_time - ap->link_time;
3195                 if (delta > ANEG_STATE_SETTLE_TIME) {
3196                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3197                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3198                         } else {
3199                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3200                                     !(ap->flags & MR_NP_RX)) {
3201                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3202                                 } else {
3203                                         ret = ANEG_FAILED;
3204                                 }
3205                         }
3206                 }
3207                 break;
3208
3209         case ANEG_STATE_IDLE_DETECT_INIT:
3210                 ap->link_time = ap->cur_time;
3211                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3212                 tw32_f(MAC_MODE, tp->mac_mode);
3213                 udelay(40);
3214
3215                 ap->state = ANEG_STATE_IDLE_DETECT;
3216                 ret = ANEG_TIMER_ENAB;
3217                 break;
3218
3219         case ANEG_STATE_IDLE_DETECT:
3220                 if (ap->ability_match != 0 &&
3221                     ap->rxconfig == 0) {
3222                         ap->state = ANEG_STATE_AN_ENABLE;
3223                         break;
3224                 }
3225                 delta = ap->cur_time - ap->link_time;
3226                 if (delta > ANEG_STATE_SETTLE_TIME) {
3227                         /* XXX another gem from the Broadcom driver :( */
3228                         ap->state = ANEG_STATE_LINK_OK;
3229                 }
3230                 break;
3231
3232         case ANEG_STATE_LINK_OK:
3233                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3234                 ret = ANEG_DONE;
3235                 break;
3236
3237         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3238                 /* ??? unimplemented */
3239                 break;
3240
3241         case ANEG_STATE_NEXT_PAGE_WAIT:
3242                 /* ??? unimplemented */
3243                 break;
3244
3245         default:
3246                 ret = ANEG_FAILED;
3247                 break;
3248         }
3249
3250         return ret;
3251 }
3252
3253 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3254 {
3255         int res = 0;
3256         struct tg3_fiber_aneginfo aninfo;
3257         int status = ANEG_FAILED;
3258         unsigned int tick;
3259         u32 tmp;
3260
3261         tw32_f(MAC_TX_AUTO_NEG, 0);
3262
3263         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3264         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3265         udelay(40);
3266
3267         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3268         udelay(40);
3269
3270         memset(&aninfo, 0, sizeof(aninfo));
3271         aninfo.flags |= MR_AN_ENABLE;
3272         aninfo.state = ANEG_STATE_UNKNOWN;
3273         aninfo.cur_time = 0;
3274         tick = 0;
3275         while (++tick < 195000) {
3276                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3277                 if (status == ANEG_DONE || status == ANEG_FAILED)
3278                         break;
3279
3280                 udelay(1);
3281         }
3282
3283         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3284         tw32_f(MAC_MODE, tp->mac_mode);
3285         udelay(40);
3286
3287         *txflags = aninfo.txconfig;
3288         *rxflags = aninfo.flags;
3289
3290         if (status == ANEG_DONE &&
3291             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3292                              MR_LP_ADV_FULL_DUPLEX)))
3293                 res = 1;
3294
3295         return res;
3296 }
3297
3298 static void tg3_init_bcm8002(struct tg3 *tp)
3299 {
3300         u32 mac_status = tr32(MAC_STATUS);
3301         int i;
3302
3303         /* Reset when initting first time or we have a link. */
3304         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3305             !(mac_status & MAC_STATUS_PCS_SYNCED))
3306                 return;
3307
3308         /* Set PLL lock range. */
3309         tg3_writephy(tp, 0x16, 0x8007);
3310
3311         /* SW reset */
3312         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3313
3314         /* Wait for reset to complete. */
3315         /* XXX schedule_timeout() ... */
3316         for (i = 0; i < 500; i++)
3317                 udelay(10);
3318
3319         /* Config mode; select PMA/Ch 1 regs. */
3320         tg3_writephy(tp, 0x10, 0x8411);
3321
3322         /* Enable auto-lock and comdet, select txclk for tx. */
3323         tg3_writephy(tp, 0x11, 0x0a10);
3324
3325         tg3_writephy(tp, 0x18, 0x00a0);
3326         tg3_writephy(tp, 0x16, 0x41ff);
3327
3328         /* Assert and deassert POR. */
3329         tg3_writephy(tp, 0x13, 0x0400);
3330         udelay(40);
3331         tg3_writephy(tp, 0x13, 0x0000);
3332
3333         tg3_writephy(tp, 0x11, 0x0a50);
3334         udelay(40);
3335         tg3_writephy(tp, 0x11, 0x0a10);
3336
3337         /* Wait for signal to stabilize */
3338         /* XXX schedule_timeout() ... */
3339         for (i = 0; i < 15000; i++)
3340                 udelay(10);
3341
3342         /* Deselect the channel register so we can read the PHYID
3343          * later.
3344          */
3345         tg3_writephy(tp, 0x10, 0x8011);
3346 }
3347
3348 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3349 {
3350         u16 flowctrl;
3351         u32 sg_dig_ctrl, sg_dig_status;
3352         u32 serdes_cfg, expected_sg_dig_ctrl;
3353         int workaround, port_a;
3354         int current_link_up;
3355
3356         serdes_cfg = 0;
3357         expected_sg_dig_ctrl = 0;
3358         workaround = 0;
3359         port_a = 1;
3360         current_link_up = 0;
3361
3362         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3363             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3364                 workaround = 1;
3365                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3366                         port_a = 0;
3367
3368                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3369                 /* preserve bits 20-23 for voltage regulator */
3370                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3371         }
3372
3373         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3374
3375         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3376                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3377                         if (workaround) {
3378                                 u32 val = serdes_cfg;
3379
3380                                 if (port_a)
3381                                         val |= 0xc010000;
3382                                 else
3383                                         val |= 0x4010000;
3384                                 tw32_f(MAC_SERDES_CFG, val);
3385                         }
3386
3387                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3388                 }
3389                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3390                         tg3_setup_flow_control(tp, 0, 0);
3391                         current_link_up = 1;
3392                 }
3393                 goto out;
3394         }
3395
3396         /* Want auto-negotiation.  */
3397         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3398
3399         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3400         if (flowctrl & ADVERTISE_1000XPAUSE)
3401                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3402         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3403                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3404
3405         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3406                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3407                     tp->serdes_counter &&
3408                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3409                                     MAC_STATUS_RCVD_CFG)) ==
3410                      MAC_STATUS_PCS_SYNCED)) {
3411                         tp->serdes_counter--;
3412                         current_link_up = 1;
3413                         goto out;
3414                 }
3415 restart_autoneg:
3416                 if (workaround)
3417                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3418                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3419                 udelay(5);
3420                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3421
3422                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3423                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3424         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3425                                  MAC_STATUS_SIGNAL_DET)) {
3426                 sg_dig_status = tr32(SG_DIG_STATUS);
3427                 mac_status = tr32(MAC_STATUS);
3428
3429                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3430                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3431                         u32 local_adv = 0, remote_adv = 0;
3432
3433                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3434                                 local_adv |= ADVERTISE_1000XPAUSE;
3435                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3436                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3437
3438                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3439                                 remote_adv |= LPA_1000XPAUSE;
3440                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3441                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3442
3443                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3444                         current_link_up = 1;
3445                         tp->serdes_counter = 0;
3446                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3447                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3448                         if (tp->serdes_counter)
3449                                 tp->serdes_counter--;
3450                         else {
3451                                 if (workaround) {
3452                                         u32 val = serdes_cfg;
3453
3454                                         if (port_a)
3455                                                 val |= 0xc010000;
3456                                         else
3457                                                 val |= 0x4010000;
3458
3459                                         tw32_f(MAC_SERDES_CFG, val);
3460                                 }
3461
3462                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3463                                 udelay(40);
3464
3465                                 /* Link parallel detection - link is up */
3466                                 /* only if we have PCS_SYNC and not */
3467                                 /* receiving config code words */
3468                                 mac_status = tr32(MAC_STATUS);
3469                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3470                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3471                                         tg3_setup_flow_control(tp, 0, 0);
3472                                         current_link_up = 1;
3473                                         tp->tg3_flags2 |=
3474                                                 TG3_FLG2_PARALLEL_DETECT;
3475                                         tp->serdes_counter =
3476                                                 SERDES_PARALLEL_DET_TIMEOUT;
3477                                 } else
3478                                         goto restart_autoneg;
3479                         }
3480                 }
3481         } else {
3482                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3483                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3484         }
3485
3486 out:
3487         return current_link_up;
3488 }
3489
3490 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3491 {
3492         int current_link_up = 0;
3493
3494         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3495                 goto out;
3496
3497         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3498                 u32 txflags, rxflags;
3499                 int i;
3500
3501                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3502                         u32 local_adv = 0, remote_adv = 0;
3503
3504                         if (txflags & ANEG_CFG_PS1)
3505                                 local_adv |= ADVERTISE_1000XPAUSE;
3506                         if (txflags & ANEG_CFG_PS2)
3507                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3508
3509                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3510                                 remote_adv |= LPA_1000XPAUSE;
3511                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3512                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3513
3514                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3515
3516                         current_link_up = 1;
3517                 }
3518                 for (i = 0; i < 30; i++) {
3519                         udelay(20);
3520                         tw32_f(MAC_STATUS,
3521                                (MAC_STATUS_SYNC_CHANGED |
3522                                 MAC_STATUS_CFG_CHANGED));
3523                         udelay(40);
3524                         if ((tr32(MAC_STATUS) &
3525                              (MAC_STATUS_SYNC_CHANGED |
3526                               MAC_STATUS_CFG_CHANGED)) == 0)
3527                                 break;
3528                 }
3529
3530                 mac_status = tr32(MAC_STATUS);
3531                 if (current_link_up == 0 &&
3532                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3533                     !(mac_status & MAC_STATUS_RCVD_CFG))
3534                         current_link_up = 1;
3535         } else {
3536                 tg3_setup_flow_control(tp, 0, 0);
3537
3538                 /* Forcing 1000FD link up. */
3539                 current_link_up = 1;
3540
3541                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3542                 udelay(40);
3543
3544                 tw32_f(MAC_MODE, tp->mac_mode);
3545                 udelay(40);
3546         }
3547
3548 out:
3549         return current_link_up;
3550 }
3551
3552 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3553 {
3554         u32 orig_pause_cfg;
3555         u16 orig_active_speed;
3556         u8 orig_active_duplex;
3557         u32 mac_status;
3558         int current_link_up;
3559         int i;
3560
3561         orig_pause_cfg = tp->link_config.active_flowctrl;
3562         orig_active_speed = tp->link_config.active_speed;
3563         orig_active_duplex = tp->link_config.active_duplex;
3564
3565         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3566             netif_carrier_ok(tp->dev) &&
3567             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3568                 mac_status = tr32(MAC_STATUS);
3569                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3570                                MAC_STATUS_SIGNAL_DET |
3571                                MAC_STATUS_CFG_CHANGED |
3572                                MAC_STATUS_RCVD_CFG);
3573                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3574                                    MAC_STATUS_SIGNAL_DET)) {
3575                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3576                                             MAC_STATUS_CFG_CHANGED));
3577                         return 0;
3578                 }
3579         }
3580
3581         tw32_f(MAC_TX_AUTO_NEG, 0);
3582
3583         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3584         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3585         tw32_f(MAC_MODE, tp->mac_mode);
3586         udelay(40);
3587
3588         if (tp->phy_id == PHY_ID_BCM8002)
3589                 tg3_init_bcm8002(tp);
3590
3591         /* Enable link change event even when serdes polling.  */
3592         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3593         udelay(40);
3594
3595         current_link_up = 0;
3596         mac_status = tr32(MAC_STATUS);
3597
3598         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3599                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3600         else
3601                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3602
3603         tp->hw_status->status =
3604                 (SD_STATUS_UPDATED |
3605                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3606
3607         for (i = 0; i < 100; i++) {
3608                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3609                                     MAC_STATUS_CFG_CHANGED));
3610                 udelay(5);
3611                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3612                                          MAC_STATUS_CFG_CHANGED |
3613                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3614                         break;
3615         }
3616
3617         mac_status = tr32(MAC_STATUS);
3618         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3619                 current_link_up = 0;
3620                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3621                     tp->serdes_counter == 0) {
3622                         tw32_f(MAC_MODE, (tp->mac_mode |
3623                                           MAC_MODE_SEND_CONFIGS));
3624                         udelay(1);
3625                         tw32_f(MAC_MODE, tp->mac_mode);
3626                 }
3627         }
3628
3629         if (current_link_up == 1) {
3630                 tp->link_config.active_speed = SPEED_1000;
3631                 tp->link_config.active_duplex = DUPLEX_FULL;
3632                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3633                                     LED_CTRL_LNKLED_OVERRIDE |
3634                                     LED_CTRL_1000MBPS_ON));
3635         } else {
3636                 tp->link_config.active_speed = SPEED_INVALID;
3637                 tp->link_config.active_duplex = DUPLEX_INVALID;
3638                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3639                                     LED_CTRL_LNKLED_OVERRIDE |
3640                                     LED_CTRL_TRAFFIC_OVERRIDE));
3641         }
3642
3643         if (current_link_up != netif_carrier_ok(tp->dev)) {
3644                 if (current_link_up)
3645                         netif_carrier_on(tp->dev);
3646                 else
3647                         netif_carrier_off(tp->dev);
3648                 tg3_link_report(tp);
3649         } else {
3650                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3651                 if (orig_pause_cfg != now_pause_cfg ||
3652                     orig_active_speed != tp->link_config.active_speed ||
3653                     orig_active_duplex != tp->link_config.active_duplex)
3654                         tg3_link_report(tp);
3655         }
3656
3657         return 0;
3658 }
3659
3660 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3661 {
3662         int current_link_up, err = 0;
3663         u32 bmsr, bmcr;
3664         u16 current_speed;
3665         u8 current_duplex;
3666         u32 local_adv, remote_adv;
3667
3668         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3669         tw32_f(MAC_MODE, tp->mac_mode);
3670         udelay(40);
3671
3672         tw32(MAC_EVENT, 0);
3673
3674         tw32_f(MAC_STATUS,
3675              (MAC_STATUS_SYNC_CHANGED |
3676               MAC_STATUS_CFG_CHANGED |
3677               MAC_STATUS_MI_COMPLETION |
3678               MAC_STATUS_LNKSTATE_CHANGED));
3679         udelay(40);
3680
3681         if (force_reset)
3682                 tg3_phy_reset(tp);
3683
3684         current_link_up = 0;
3685         current_speed = SPEED_INVALID;
3686         current_duplex = DUPLEX_INVALID;
3687
3688         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3689         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3691                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3692                         bmsr |= BMSR_LSTATUS;
3693                 else
3694                         bmsr &= ~BMSR_LSTATUS;
3695         }
3696
3697         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3698
3699         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3700             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3701                 /* do nothing, just check for link up at the end */
3702         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3703                 u32 adv, new_adv;
3704
3705                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3706                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3707                                   ADVERTISE_1000XPAUSE |
3708                                   ADVERTISE_1000XPSE_ASYM |
3709                                   ADVERTISE_SLCT);
3710
3711                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3712
3713                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3714                         new_adv |= ADVERTISE_1000XHALF;
3715                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3716                         new_adv |= ADVERTISE_1000XFULL;
3717
3718                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3719                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3720                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3721                         tg3_writephy(tp, MII_BMCR, bmcr);
3722
3723                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3724                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3725                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3726
3727                         return err;
3728                 }
3729         } else {
3730                 u32 new_bmcr;
3731
3732                 bmcr &= ~BMCR_SPEED1000;
3733                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3734
3735                 if (tp->link_config.duplex == DUPLEX_FULL)
3736                         new_bmcr |= BMCR_FULLDPLX;
3737
3738                 if (new_bmcr != bmcr) {
3739                         /* BMCR_SPEED1000 is a reserved bit that needs
3740                          * to be set on write.
3741                          */
3742                         new_bmcr |= BMCR_SPEED1000;
3743
3744                         /* Force a linkdown */
3745                         if (netif_carrier_ok(tp->dev)) {
3746                                 u32 adv;
3747
3748                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3749                                 adv &= ~(ADVERTISE_1000XFULL |
3750                                          ADVERTISE_1000XHALF |
3751                                          ADVERTISE_SLCT);
3752                                 tg3_writephy(tp, MII_ADVERTISE, adv);
3753                                 tg3_writephy(tp, MII_BMCR, bmcr |
3754                                                            BMCR_ANRESTART |
3755                                                            BMCR_ANENABLE);
3756                                 udelay(10);
3757                                 netif_carrier_off(tp->dev);
3758                         }
3759                         tg3_writephy(tp, MII_BMCR, new_bmcr);
3760                         bmcr = new_bmcr;
3761                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3762                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3763                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3764                             ASIC_REV_5714) {
3765                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3766                                         bmsr |= BMSR_LSTATUS;
3767                                 else
3768                                         bmsr &= ~BMSR_LSTATUS;
3769                         }
3770                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3771                 }
3772         }
3773
3774         if (bmsr & BMSR_LSTATUS) {
3775                 current_speed = SPEED_1000;
3776                 current_link_up = 1;
3777                 if (bmcr & BMCR_FULLDPLX)
3778                         current_duplex = DUPLEX_FULL;
3779                 else
3780                         current_duplex = DUPLEX_HALF;
3781
3782                 local_adv = 0;
3783                 remote_adv = 0;
3784
3785                 if (bmcr & BMCR_ANENABLE) {
3786                         u32 common;
3787
3788                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3789                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3790                         common = local_adv & remote_adv;
3791                         if (common & (ADVERTISE_1000XHALF |
3792                                       ADVERTISE_1000XFULL)) {
3793                                 if (common & ADVERTISE_1000XFULL)
3794                                         current_duplex = DUPLEX_FULL;
3795                                 else
3796                                         current_duplex = DUPLEX_HALF;
3797                         }
3798                         else
3799                                 current_link_up = 0;
3800                 }
3801         }
3802
3803         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3804                 tg3_setup_flow_control(tp, local_adv, remote_adv);
3805
3806         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3807         if (tp->link_config.active_duplex == DUPLEX_HALF)
3808                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3809
3810         tw32_f(MAC_MODE, tp->mac_mode);
3811         udelay(40);
3812
3813         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3814
3815         tp->link_config.active_speed = current_speed;
3816         tp->link_config.active_duplex = current_duplex;
3817
3818         if (current_link_up != netif_carrier_ok(tp->dev)) {
3819                 if (current_link_up)
3820                         netif_carrier_on(tp->dev);
3821                 else {
3822                         netif_carrier_off(tp->dev);
3823                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3824                 }
3825                 tg3_link_report(tp);
3826         }
3827         return err;
3828 }
3829
3830 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3831 {
3832         if (tp->serdes_counter) {
3833                 /* Give autoneg time to complete. */
3834                 tp->serdes_counter--;
3835                 return;
3836         }
3837         if (!netif_carrier_ok(tp->dev) &&
3838             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3839                 u32 bmcr;
3840
3841                 tg3_readphy(tp, MII_BMCR, &bmcr);
3842                 if (bmcr & BMCR_ANENABLE) {
3843                         u32 phy1, phy2;
3844
3845                         /* Select shadow register 0x1f */
3846                         tg3_writephy(tp, 0x1c, 0x7c00);
3847                         tg3_readphy(tp, 0x1c, &phy1);
3848
3849                         /* Select expansion interrupt status register */
3850                         tg3_writephy(tp, 0x17, 0x0f01);
3851                         tg3_readphy(tp, 0x15, &phy2);
3852                         tg3_readphy(tp, 0x15, &phy2);
3853
3854                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3855                                 /* We have signal detect and not receiving
3856                                  * config code words, link is up by parallel
3857                                  * detection.
3858                                  */
3859
3860                                 bmcr &= ~BMCR_ANENABLE;
3861                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3862                                 tg3_writephy(tp, MII_BMCR, bmcr);
3863                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3864                         }
3865                 }
3866         }
3867         else if (netif_carrier_ok(tp->dev) &&
3868                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3869                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3870                 u32 phy2;
3871
3872                 /* Select expansion interrupt status register */
3873                 tg3_writephy(tp, 0x17, 0x0f01);
3874                 tg3_readphy(tp, 0x15, &phy2);
3875                 if (phy2 & 0x20) {
3876                         u32 bmcr;
3877
3878                         /* Config code words received, turn on autoneg. */
3879                         tg3_readphy(tp, MII_BMCR, &bmcr);
3880                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3881
3882                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3883
3884                 }
3885         }
3886 }
3887
3888 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3889 {
3890         int err;
3891
3892         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3893                 err = tg3_setup_fiber_phy(tp, force_reset);
3894         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3895                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3896         } else {
3897                 err = tg3_setup_copper_phy(tp, force_reset);
3898         }
3899
3900         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3901                 u32 val, scale;
3902
3903                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3904                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3905                         scale = 65;
3906                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3907                         scale = 6;
3908                 else
3909                         scale = 12;
3910
3911                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3912                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3913                 tw32(GRC_MISC_CFG, val);
3914         }
3915
3916         if (tp->link_config.active_speed == SPEED_1000 &&
3917             tp->link_config.active_duplex == DUPLEX_HALF)
3918                 tw32(MAC_TX_LENGTHS,
3919                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3920                       (6 << TX_LENGTHS_IPG_SHIFT) |
3921                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3922         else
3923                 tw32(MAC_TX_LENGTHS,
3924                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3925                       (6 << TX_LENGTHS_IPG_SHIFT) |
3926                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3927
3928         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3929                 if (netif_carrier_ok(tp->dev)) {
3930                         tw32(HOSTCC_STAT_COAL_TICKS,
3931                              tp->coal.stats_block_coalesce_usecs);
3932                 } else {
3933                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
3934                 }
3935         }
3936
3937         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3938                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3939                 if (!netif_carrier_ok(tp->dev))
3940                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3941                               tp->pwrmgmt_thresh;
3942                 else
3943                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3944                 tw32(PCIE_PWR_MGMT_THRESH, val);
3945         }
3946
3947         return err;
3948 }
3949
3950 /* This is called whenever we suspect that the system chipset is re-
3951  * ordering the sequence of MMIO to the tx send mailbox. The symptom
3952  * is bogus tx completions. We try to recover by setting the
3953  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3954  * in the workqueue.
3955  */
3956 static void tg3_tx_recover(struct tg3 *tp)
3957 {
3958         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3959                tp->write32_tx_mbox == tg3_write_indirect_mbox);
3960
3961         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3962                "mapped I/O cycles to the network device, attempting to "
3963                "recover. Please report the problem to the driver maintainer "
3964                "and include system chipset information.\n", tp->dev->name);
3965
3966         spin_lock(&tp->lock);
3967         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3968         spin_unlock(&tp->lock);
3969 }
3970
3971 static inline u32 tg3_tx_avail(struct tg3 *tp)
3972 {
3973         smp_mb();
3974         return (tp->tx_pending -
3975                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3976 }
3977
3978 /* Tigon3 never reports partial packet sends.  So we do not
3979  * need special logic to handle SKBs that have not had all
3980  * of their frags sent yet, like SunGEM does.
3981  */
3982 static void tg3_tx(struct tg3 *tp)
3983 {
3984         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3985         u32 sw_idx = tp->tx_cons;
3986
3987         while (sw_idx != hw_idx) {
3988                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3989                 struct sk_buff *skb = ri->skb;
3990                 int i, tx_bug = 0;
3991
3992                 if (unlikely(skb == NULL)) {
3993                         tg3_tx_recover(tp);
3994                         return;
3995                 }
3996
3997                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
3998
3999                 ri->skb = NULL;
4000
4001                 sw_idx = NEXT_TX(sw_idx);
4002
4003                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4004                         ri = &tp->tx_buffers[sw_idx];
4005                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4006                                 tx_bug = 1;
4007                         sw_idx = NEXT_TX(sw_idx);
4008                 }
4009
4010                 dev_kfree_skb(skb);
4011
4012                 if (unlikely(tx_bug)) {
4013                         tg3_tx_recover(tp);
4014                         return;
4015                 }
4016         }
4017
4018         tp->tx_cons = sw_idx;
4019
4020         /* Need to make the tx_cons update visible to tg3_start_xmit()
4021          * before checking for netif_queue_stopped().  Without the
4022          * memory barrier, there is a small possibility that tg3_start_xmit()
4023          * will miss it and cause the queue to be stopped forever.
4024          */
4025         smp_mb();
4026
4027         if (unlikely(netif_queue_stopped(tp->dev) &&
4028                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4029                 netif_tx_lock(tp->dev);
4030                 if (netif_queue_stopped(tp->dev) &&
4031                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4032                         netif_wake_queue(tp->dev);
4033                 netif_tx_unlock(tp->dev);
4034         }
4035 }
4036
4037 /* Returns size of skb allocated or < 0 on error.
4038  *
4039  * We only need to fill in the address because the other members
4040  * of the RX descriptor are invariant, see tg3_init_rings.
4041  *
4042  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4043  * posting buffers we only dirty the first cache line of the RX
4044  * descriptor (containing the address).  Whereas for the RX status
4045  * buffers the cpu only reads the last cacheline of the RX descriptor
4046  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4047  */
4048 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4049                             int src_idx, u32 dest_idx_unmasked)
4050 {
4051         struct tg3_rx_buffer_desc *desc;
4052         struct ring_info *map, *src_map;
4053         struct sk_buff *skb;
4054         dma_addr_t mapping;
4055         int skb_size, dest_idx;
4056
4057         src_map = NULL;
4058         switch (opaque_key) {
4059         case RXD_OPAQUE_RING_STD:
4060                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4061                 desc = &tp->rx_std[dest_idx];
4062                 map = &tp->rx_std_buffers[dest_idx];
4063                 if (src_idx >= 0)
4064                         src_map = &tp->rx_std_buffers[src_idx];
4065                 skb_size = tp->rx_pkt_buf_sz;
4066                 break;
4067
4068         case RXD_OPAQUE_RING_JUMBO:
4069                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4070                 desc = &tp->rx_jumbo[dest_idx];
4071                 map = &tp->rx_jumbo_buffers[dest_idx];
4072                 if (src_idx >= 0)
4073                         src_map = &tp->rx_jumbo_buffers[src_idx];
4074                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4075                 break;
4076
4077         default:
4078                 return -EINVAL;
4079         }
4080
4081         /* Do not overwrite any of the map or rp information
4082          * until we are sure we can commit to a new buffer.
4083          *
4084          * Callers depend upon this behavior and assume that
4085          * we leave everything unchanged if we fail.
4086          */
4087         skb = netdev_alloc_skb(tp->dev, skb_size);
4088         if (skb == NULL)
4089                 return -ENOMEM;
4090
4091         skb_reserve(skb, tp->rx_offset);
4092
4093         mapping = pci_map_single(tp->pdev, skb->data,
4094                                  skb_size - tp->rx_offset,
4095                                  PCI_DMA_FROMDEVICE);
4096
4097         map->skb = skb;
4098         pci_unmap_addr_set(map, mapping, mapping);
4099
4100         if (src_map != NULL)
4101                 src_map->skb = NULL;
4102
4103         desc->addr_hi = ((u64)mapping >> 32);
4104         desc->addr_lo = ((u64)mapping & 0xffffffff);
4105
4106         return skb_size;
4107 }
4108
4109 /* We only need to move over in the address because the other
4110  * members of the RX descriptor are invariant.  See notes above
4111  * tg3_alloc_rx_skb for full details.
4112  */
4113 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4114                            int src_idx, u32 dest_idx_unmasked)
4115 {
4116         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4117         struct ring_info *src_map, *dest_map;
4118         int dest_idx;
4119
4120         switch (opaque_key) {
4121         case RXD_OPAQUE_RING_STD:
4122                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4123                 dest_desc = &tp->rx_std[dest_idx];
4124                 dest_map = &tp->rx_std_buffers[dest_idx];
4125                 src_desc = &tp->rx_std[src_idx];
4126                 src_map = &tp->rx_std_buffers[src_idx];
4127                 break;
4128
4129         case RXD_OPAQUE_RING_JUMBO:
4130                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4131                 dest_desc = &tp->rx_jumbo[dest_idx];
4132                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4133                 src_desc = &tp->rx_jumbo[src_idx];
4134                 src_map = &tp->rx_jumbo_buffers[src_idx];
4135                 break;
4136
4137         default:
4138                 return;
4139         }
4140
4141         dest_map->skb = src_map->skb;
4142         pci_unmap_addr_set(dest_map, mapping,
4143                            pci_unmap_addr(src_map, mapping));
4144         dest_desc->addr_hi = src_desc->addr_hi;
4145         dest_desc->addr_lo = src_desc->addr_lo;
4146
4147         src_map->skb = NULL;
4148 }
4149
4150 #if TG3_VLAN_TAG_USED
4151 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4152 {
4153         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4154 }
4155 #endif
4156
4157 /* The RX ring scheme is composed of multiple rings which post fresh
4158  * buffers to the chip, and one special ring the chip uses to report
4159  * status back to the host.
4160  *
4161  * The special ring reports the status of received packets to the
4162  * host.  The chip does not write into the original descriptor the
4163  * RX buffer was obtained from.  The chip simply takes the original
4164  * descriptor as provided by the host, updates the status and length
4165  * field, then writes this into the next status ring entry.
4166  *
4167  * Each ring the host uses to post buffers to the chip is described
4168  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4169  * it is first placed into the on-chip ram.  When the packet's length
4170  * is known, it walks down the TG3_BDINFO entries to select the ring.
4171  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4172  * which is within the range of the new packet's length is chosen.
4173  *
4174  * The "separate ring for rx status" scheme may sound queer, but it makes
4175  * sense from a cache coherency perspective.  If only the host writes
4176  * to the buffer post rings, and only the chip writes to the rx status
4177  * rings, then cache lines never move beyond shared-modified state.
4178  * If both the host and chip were to write into the same ring, cache line
4179  * eviction could occur since both entities want it in an exclusive state.
4180  */
4181 static int tg3_rx(struct tg3 *tp, int budget)
4182 {
4183         u32 work_mask, rx_std_posted = 0;
4184         u32 sw_idx = tp->rx_rcb_ptr;
4185         u16 hw_idx;
4186         int received;
4187
4188         hw_idx = tp->hw_status->idx[0].rx_producer;
4189         /*
4190          * We need to order the read of hw_idx and the read of
4191          * the opaque cookie.
4192          */
4193         rmb();
4194         work_mask = 0;
4195         received = 0;
4196         while (sw_idx != hw_idx && budget > 0) {
4197                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4198                 unsigned int len;
4199                 struct sk_buff *skb;
4200                 dma_addr_t dma_addr;
4201                 u32 opaque_key, desc_idx, *post_ptr;
4202
4203                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4204                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4205                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4206                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4207                                                   mapping);
4208                         skb = tp->rx_std_buffers[desc_idx].skb;
4209                         post_ptr = &tp->rx_std_ptr;
4210                         rx_std_posted++;
4211                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4212                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4213                                                   mapping);
4214                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4215                         post_ptr = &tp->rx_jumbo_ptr;
4216                 }
4217                 else {
4218                         goto next_pkt_nopost;
4219                 }
4220
4221                 work_mask |= opaque_key;
4222
4223                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4224                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4225                 drop_it:
4226                         tg3_recycle_rx(tp, opaque_key,
4227                                        desc_idx, *post_ptr);
4228                 drop_it_no_recycle:
4229                         /* Other statistics kept track of by card. */
4230                         tp->net_stats.rx_dropped++;
4231                         goto next_pkt;
4232                 }
4233
4234                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4235
4236                 if (len > RX_COPY_THRESHOLD
4237                         && tp->rx_offset == 2
4238                         /* rx_offset != 2 iff this is a 5701 card running
4239                          * in PCI-X mode [see tg3_get_invariants()] */
4240                 ) {
4241                         int skb_size;
4242
4243                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4244                                                     desc_idx, *post_ptr);
4245                         if (skb_size < 0)
4246                                 goto drop_it;
4247
4248                         pci_unmap_single(tp->pdev, dma_addr,
4249                                          skb_size - tp->rx_offset,
4250                                          PCI_DMA_FROMDEVICE);
4251
4252                         skb_put(skb, len);
4253                 } else {
4254                         struct sk_buff *copy_skb;
4255
4256                         tg3_recycle_rx(tp, opaque_key,
4257                                        desc_idx, *post_ptr);
4258
4259                         copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4260                         if (copy_skb == NULL)
4261                                 goto drop_it_no_recycle;
4262
4263                         skb_reserve(copy_skb, 2);
4264                         skb_put(copy_skb, len);
4265                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4266                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4267                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4268
4269                         /* We'll reuse the original ring buffer. */
4270                         skb = copy_skb;
4271                 }
4272
4273                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4274                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4275                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4276                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4277                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4278                 else
4279                         skb->ip_summed = CHECKSUM_NONE;
4280
4281                 skb->protocol = eth_type_trans(skb, tp->dev);
4282 #if TG3_VLAN_TAG_USED
4283                 if (tp->vlgrp != NULL &&
4284                     desc->type_flags & RXD_FLAG_VLAN) {
4285                         tg3_vlan_rx(tp, skb,
4286                                     desc->err_vlan & RXD_VLAN_MASK);
4287                 } else
4288 #endif
4289                         netif_receive_skb(skb);
4290
4291                 tp->dev->last_rx = jiffies;
4292                 received++;
4293                 budget--;
4294
4295 next_pkt:
4296                 (*post_ptr)++;
4297
4298                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4299                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4300
4301                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4302                                      TG3_64BIT_REG_LOW, idx);
4303                         work_mask &= ~RXD_OPAQUE_RING_STD;
4304                         rx_std_posted = 0;
4305                 }
4306 next_pkt_nopost:
4307                 sw_idx++;
4308                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4309
4310                 /* Refresh hw_idx to see if there is new work */
4311                 if (sw_idx == hw_idx) {
4312                         hw_idx = tp->hw_status->idx[0].rx_producer;
4313                         rmb();
4314                 }
4315         }
4316
4317         /* ACK the status ring. */
4318         tp->rx_rcb_ptr = sw_idx;
4319         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4320
4321         /* Refill RX ring(s). */
4322         if (work_mask & RXD_OPAQUE_RING_STD) {
4323                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4324                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4325                              sw_idx);
4326         }
4327         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4328                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4329                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4330                              sw_idx);
4331         }
4332         mmiowb();
4333
4334         return received;
4335 }
4336
4337 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4338 {
4339         struct tg3_hw_status *sblk = tp->hw_status;
4340
4341         /* handle link change and other phy events */
4342         if (!(tp->tg3_flags &
4343               (TG3_FLAG_USE_LINKCHG_REG |
4344                TG3_FLAG_POLL_SERDES))) {
4345                 if (sblk->status & SD_STATUS_LINK_CHG) {
4346                         sblk->status = SD_STATUS_UPDATED |
4347                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4348                         spin_lock(&tp->lock);
4349                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4350                                 tw32_f(MAC_STATUS,
4351                                      (MAC_STATUS_SYNC_CHANGED |
4352                                       MAC_STATUS_CFG_CHANGED |
4353                                       MAC_STATUS_MI_COMPLETION |
4354                                       MAC_STATUS_LNKSTATE_CHANGED));
4355                                 udelay(40);
4356                         } else
4357                                 tg3_setup_phy(tp, 0);
4358                         spin_unlock(&tp->lock);
4359                 }
4360         }
4361
4362         /* run TX completion thread */
4363         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4364                 tg3_tx(tp);
4365                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4366                         return work_done;
4367         }
4368
4369         /* run RX thread, within the bounds set by NAPI.
4370          * All RX "locking" is done by ensuring outside
4371          * code synchronizes with tg3->napi.poll()
4372          */
4373         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4374                 work_done += tg3_rx(tp, budget - work_done);
4375
4376         return work_done;
4377 }
4378
4379 static int tg3_poll(struct napi_struct *napi, int budget)
4380 {
4381         struct tg3 *tp = container_of(napi, struct tg3, napi);
4382         int work_done = 0;
4383         struct tg3_hw_status *sblk = tp->hw_status;
4384
4385         while (1) {
4386                 work_done = tg3_poll_work(tp, work_done, budget);
4387
4388                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4389                         goto tx_recovery;
4390
4391                 if (unlikely(work_done >= budget))
4392                         break;
4393
4394                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4395                         /* tp->last_tag is used in tg3_restart_ints() below
4396                          * to tell the hw how much work has been processed,
4397                          * so we must read it before checking for more work.
4398                          */
4399                         tp->last_tag = sblk->status_tag;
4400                         rmb();
4401                 } else
4402                         sblk->status &= ~SD_STATUS_UPDATED;
4403
4404                 if (likely(!tg3_has_work(tp))) {
4405                         netif_rx_complete(tp->dev, napi);
4406                         tg3_restart_ints(tp);
4407                         break;
4408                 }
4409         }
4410
4411         return work_done;
4412
4413 tx_recovery:
4414         /* work_done is guaranteed to be less than budget. */
4415         netif_rx_complete(tp->dev, napi);
4416         schedule_work(&tp->reset_task);
4417         return work_done;
4418 }
4419
4420 static void tg3_irq_quiesce(struct tg3 *tp)
4421 {
4422         BUG_ON(tp->irq_sync);
4423
4424         tp->irq_sync = 1;
4425         smp_mb();
4426
4427         synchronize_irq(tp->pdev->irq);
4428 }
4429
4430 static inline int tg3_irq_sync(struct tg3 *tp)
4431 {
4432         return tp->irq_sync;
4433 }
4434
4435 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4436  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4437  * with as well.  Most of the time, this is not necessary except when
4438  * shutting down the device.
4439  */
4440 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4441 {
4442         spin_lock_bh(&tp->lock);
4443         if (irq_sync)
4444                 tg3_irq_quiesce(tp);
4445 }
4446
4447 static inline void tg3_full_unlock(struct tg3 *tp)
4448 {
4449         spin_unlock_bh(&tp->lock);
4450 }
4451
4452 /* One-shot MSI handler - Chip automatically disables interrupt
4453  * after sending MSI so driver doesn't have to do it.
4454  */
4455 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4456 {
4457         struct net_device *dev = dev_id;
4458         struct tg3 *tp = netdev_priv(dev);
4459
4460         prefetch(tp->hw_status);
4461         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4462
4463         if (likely(!tg3_irq_sync(tp)))
4464                 netif_rx_schedule(dev, &tp->napi);
4465
4466         return IRQ_HANDLED;
4467 }
4468
4469 /* MSI ISR - No need to check for interrupt sharing and no need to
4470  * flush status block and interrupt mailbox. PCI ordering rules
4471  * guarantee that MSI will arrive after the status block.
4472  */
4473 static irqreturn_t tg3_msi(int irq, void *dev_id)
4474 {
4475         struct net_device *dev = dev_id;
4476         struct tg3 *tp = netdev_priv(dev);
4477
4478         prefetch(tp->hw_status);
4479         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4480         /*
4481          * Writing any value to intr-mbox-0 clears PCI INTA# and
4482          * chip-internal interrupt pending events.
4483          * Writing non-zero to intr-mbox-0 additional tells the
4484          * NIC to stop sending us irqs, engaging "in-intr-handler"
4485          * event coalescing.
4486          */
4487         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4488         if (likely(!tg3_irq_sync(tp)))
4489                 netif_rx_schedule(dev, &tp->napi);
4490
4491         return IRQ_RETVAL(1);
4492 }
4493
4494 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4495 {
4496         struct net_device *dev = dev_id;
4497         struct tg3 *tp = netdev_priv(dev);
4498         struct tg3_hw_status *sblk = tp->hw_status;
4499         unsigned int handled = 1;
4500
4501         /* In INTx mode, it is possible for the interrupt to arrive at
4502          * the CPU before the status block posted prior to the interrupt.
4503          * Reading the PCI State register will confirm whether the
4504          * interrupt is ours and will flush the status block.
4505          */
4506         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4507                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4508                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4509                         handled = 0;
4510                         goto out;
4511                 }
4512         }
4513
4514         /*
4515          * Writing any value to intr-mbox-0 clears PCI INTA# and
4516          * chip-internal interrupt pending events.
4517          * Writing non-zero to intr-mbox-0 additional tells the
4518          * NIC to stop sending us irqs, engaging "in-intr-handler"
4519          * event coalescing.
4520          *
4521          * Flush the mailbox to de-assert the IRQ immediately to prevent
4522          * spurious interrupts.  The flush impacts performance but
4523          * excessive spurious interrupts can be worse in some cases.
4524          */
4525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4526         if (tg3_irq_sync(tp))
4527                 goto out;
4528         sblk->status &= ~SD_STATUS_UPDATED;
4529         if (likely(tg3_has_work(tp))) {
4530                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4531                 netif_rx_schedule(dev, &tp->napi);
4532         } else {
4533                 /* No work, shared interrupt perhaps?  re-enable
4534                  * interrupts, and flush that PCI write
4535                  */
4536                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4537                                0x00000000);
4538         }
4539 out:
4540         return IRQ_RETVAL(handled);
4541 }
4542
4543 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4544 {
4545         struct net_device *dev = dev_id;
4546         struct tg3 *tp = netdev_priv(dev);
4547         struct tg3_hw_status *sblk = tp->hw_status;
4548         unsigned int handled = 1;
4549
4550         /* In INTx mode, it is possible for the interrupt to arrive at
4551          * the CPU before the status block posted prior to the interrupt.
4552          * Reading the PCI State register will confirm whether the
4553          * interrupt is ours and will flush the status block.
4554          */
4555         if (unlikely(sblk->status_tag == tp->last_tag)) {
4556                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4557                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4558                         handled = 0;
4559                         goto out;
4560                 }
4561         }
4562
4563         /*
4564          * writing any value to intr-mbox-0 clears PCI INTA# and
4565          * chip-internal interrupt pending events.
4566          * writing non-zero to intr-mbox-0 additional tells the
4567          * NIC to stop sending us irqs, engaging "in-intr-handler"
4568          * event coalescing.
4569          *
4570          * Flush the mailbox to de-assert the IRQ immediately to prevent
4571          * spurious interrupts.  The flush impacts performance but
4572          * excessive spurious interrupts can be worse in some cases.
4573          */
4574         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4575         if (tg3_irq_sync(tp))
4576                 goto out;
4577         if (netif_rx_schedule_prep(dev, &tp->napi)) {
4578                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4579                 /* Update last_tag to mark that this status has been
4580                  * seen. Because interrupt may be shared, we may be
4581                  * racing with tg3_poll(), so only update last_tag
4582                  * if tg3_poll() is not scheduled.
4583                  */
4584                 tp->last_tag = sblk->status_tag;
4585                 __netif_rx_schedule(dev, &tp->napi);
4586         }
4587 out:
4588         return IRQ_RETVAL(handled);
4589 }
4590
4591 /* ISR for interrupt test */
4592 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4593 {
4594         struct net_device *dev = dev_id;
4595         struct tg3 *tp = netdev_priv(dev);
4596         struct tg3_hw_status *sblk = tp->hw_status;
4597
4598         if ((sblk->status & SD_STATUS_UPDATED) ||
4599             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4600                 tg3_disable_ints(tp);
4601                 return IRQ_RETVAL(1);
4602         }
4603         return IRQ_RETVAL(0);
4604 }
4605
4606 static int tg3_init_hw(struct tg3 *, int);
4607 static int tg3_halt(struct tg3 *, int, int);
4608
4609 /* Restart hardware after configuration changes, self-test, etc.
4610  * Invoked with tp->lock held.
4611  */
4612 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4613         __releases(tp->lock)
4614         __acquires(tp->lock)
4615 {
4616         int err;
4617
4618         err = tg3_init_hw(tp, reset_phy);
4619         if (err) {
4620                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4621                        "aborting.\n", tp->dev->name);
4622                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4623                 tg3_full_unlock(tp);
4624                 del_timer_sync(&tp->timer);
4625                 tp->irq_sync = 0;
4626                 napi_enable(&tp->napi);
4627                 dev_close(tp->dev);
4628                 tg3_full_lock(tp, 0);
4629         }
4630         return err;
4631 }
4632
4633 #ifdef CONFIG_NET_POLL_CONTROLLER
4634 static void tg3_poll_controller(struct net_device *dev)
4635 {
4636         struct tg3 *tp = netdev_priv(dev);
4637
4638         tg3_interrupt(tp->pdev->irq, dev);
4639 }
4640 #endif
4641
4642 static void tg3_reset_task(struct work_struct *work)
4643 {
4644         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4645         int err;
4646         unsigned int restart_timer;
4647
4648         tg3_full_lock(tp, 0);
4649
4650         if (!netif_running(tp->dev)) {
4651                 tg3_full_unlock(tp);
4652                 return;
4653         }
4654
4655         tg3_full_unlock(tp);
4656
4657         tg3_phy_stop(tp);
4658
4659         tg3_netif_stop(tp);
4660
4661         tg3_full_lock(tp, 1);
4662
4663         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4664         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4665
4666         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4667                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4668                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4669                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4670                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4671         }
4672
4673         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4674         err = tg3_init_hw(tp, 1);
4675         if (err)
4676                 goto out;
4677
4678         tg3_netif_start(tp);
4679
4680         if (restart_timer)
4681                 mod_timer(&tp->timer, jiffies + 1);
4682
4683 out:
4684         tg3_full_unlock(tp);
4685
4686         if (!err)
4687                 tg3_phy_start(tp);
4688 }
4689
4690 static void tg3_dump_short_state(struct tg3 *tp)
4691 {
4692         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4693                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4694         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4695                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4696 }
4697
4698 static void tg3_tx_timeout(struct net_device *dev)
4699 {
4700         struct tg3 *tp = netdev_priv(dev);
4701
4702         if (netif_msg_tx_err(tp)) {
4703                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4704                        dev->name);
4705                 tg3_dump_short_state(tp);
4706         }
4707
4708         schedule_work(&tp->reset_task);
4709 }
4710
4711 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4712 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4713 {
4714         u32 base = (u32) mapping & 0xffffffff;
4715
4716         return ((base > 0xffffdcc0) &&
4717                 (base + len + 8 < base));
4718 }
4719
4720 /* Test for DMA addresses > 40-bit */
4721 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4722                                           int len)
4723 {
4724 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4725         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4726                 return (((u64) mapping + len) > DMA_40BIT_MASK);
4727         return 0;
4728 #else
4729         return 0;
4730 #endif
4731 }
4732
4733 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4734
4735 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4736 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4737                                        u32 last_plus_one, u32 *start,
4738                                        u32 base_flags, u32 mss)
4739 {
4740         struct sk_buff *new_skb;
4741         dma_addr_t new_addr = 0;
4742         u32 entry = *start;
4743         int i, ret = 0;
4744
4745         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4746                 new_skb = skb_copy(skb, GFP_ATOMIC);
4747         else {
4748                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4749
4750                 new_skb = skb_copy_expand(skb,
4751                                           skb_headroom(skb) + more_headroom,
4752                                           skb_tailroom(skb), GFP_ATOMIC);
4753         }
4754
4755         if (!new_skb) {
4756                 ret = -1;
4757         } else {
4758                 /* New SKB is guaranteed to be linear. */
4759                 entry = *start;
4760                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4761                 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4762
4763                 /* Make sure new skb does not cross any 4G boundaries.
4764                  * Drop the packet if it does.
4765                  */
4766                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4767                         if (!ret)
4768                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
4769                                               DMA_TO_DEVICE);
4770                         ret = -1;
4771                         dev_kfree_skb(new_skb);
4772                         new_skb = NULL;
4773                 } else {
4774                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
4775                                     base_flags, 1 | (mss << 1));
4776                         *start = NEXT_TX(entry);
4777                 }
4778         }
4779
4780         /* Now clean up the sw ring entries. */
4781         i = 0;
4782         while (entry != last_plus_one) {
4783                 if (i == 0) {
4784                         tp->tx_buffers[entry].skb = new_skb;
4785                 } else {
4786                         tp->tx_buffers[entry].skb = NULL;
4787                 }
4788                 entry = NEXT_TX(entry);
4789                 i++;
4790         }
4791
4792         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4793         dev_kfree_skb(skb);
4794
4795         return ret;
4796 }
4797
4798 static void tg3_set_txd(struct tg3 *tp, int entry,
4799                         dma_addr_t mapping, int len, u32 flags,
4800                         u32 mss_and_is_end)
4801 {
4802         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4803         int is_end = (mss_and_is_end & 0x1);
4804         u32 mss = (mss_and_is_end >> 1);
4805         u32 vlan_tag = 0;
4806
4807         if (is_end)
4808                 flags |= TXD_FLAG_END;
4809         if (flags & TXD_FLAG_VLAN) {
4810                 vlan_tag = flags >> 16;
4811                 flags &= 0xffff;
4812         }
4813         vlan_tag |= (mss << TXD_MSS_SHIFT);
4814
4815         txd->addr_hi = ((u64) mapping >> 32);
4816         txd->addr_lo = ((u64) mapping & 0xffffffff);
4817         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4818         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4819 }
4820
4821 /* hard_start_xmit for devices that don't have any bugs and
4822  * support TG3_FLG2_HW_TSO_2 only.
4823  */
4824 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4825 {
4826         struct tg3 *tp = netdev_priv(dev);
4827         u32 len, entry, base_flags, mss;
4828         struct skb_shared_info *sp;
4829         dma_addr_t mapping;
4830
4831         len = skb_headlen(skb);
4832
4833         /* We are running in BH disabled context with netif_tx_lock
4834          * and TX reclaim runs via tp->napi.poll inside of a software
4835          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4836          * no IRQ context deadlocks to worry about either.  Rejoice!
4837          */
4838         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4839                 if (!netif_queue_stopped(dev)) {
4840                         netif_stop_queue(dev);
4841
4842                         /* This is a hard error, log it. */
4843                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4844                                "queue awake!\n", dev->name);
4845                 }
4846                 return NETDEV_TX_BUSY;
4847         }
4848
4849         entry = tp->tx_prod;
4850         base_flags = 0;
4851         mss = 0;
4852         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4853                 int tcp_opt_len, ip_tcp_len;
4854
4855                 if (skb_header_cloned(skb) &&
4856                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4857                         dev_kfree_skb(skb);
4858                         goto out_unlock;
4859                 }
4860
4861                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4862                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4863                 else {
4864                         struct iphdr *iph = ip_hdr(skb);
4865
4866                         tcp_opt_len = tcp_optlen(skb);
4867                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4868
4869                         iph->check = 0;
4870                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4871                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
4872                 }
4873
4874                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4875                                TXD_FLAG_CPU_POST_DMA);
4876
4877                 tcp_hdr(skb)->check = 0;
4878
4879         }
4880         else if (skb->ip_summed == CHECKSUM_PARTIAL)
4881                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4882 #if TG3_VLAN_TAG_USED
4883         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4884                 base_flags |= (TXD_FLAG_VLAN |
4885                                (vlan_tx_tag_get(skb) << 16));
4886 #endif
4887
4888         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4889                 dev_kfree_skb(skb);
4890                 goto out_unlock;
4891         }
4892
4893         sp = skb_shinfo(skb);
4894
4895         mapping = sp->dma_maps[0];
4896
4897         tp->tx_buffers[entry].skb = skb;
4898
4899         tg3_set_txd(tp, entry, mapping, len, base_flags,
4900                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4901
4902         entry = NEXT_TX(entry);
4903
4904         /* Now loop through additional data fragments, and queue them. */
4905         if (skb_shinfo(skb)->nr_frags > 0) {
4906                 unsigned int i, last;
4907
4908                 last = skb_shinfo(skb)->nr_frags - 1;
4909                 for (i = 0; i <= last; i++) {
4910                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4911
4912                         len = frag->size;
4913                         mapping = sp->dma_maps[i + 1];
4914                         tp->tx_buffers[entry].skb = NULL;
4915
4916                         tg3_set_txd(tp, entry, mapping, len,
4917                                     base_flags, (i == last) | (mss << 1));
4918
4919                         entry = NEXT_TX(entry);
4920                 }
4921         }
4922
4923         /* Packets are ready, update Tx producer idx local and on card. */
4924         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4925
4926         tp->tx_prod = entry;
4927         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4928                 netif_stop_queue(dev);
4929                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4930                         netif_wake_queue(tp->dev);
4931         }
4932
4933 out_unlock:
4934         mmiowb();
4935
4936         dev->trans_start = jiffies;
4937
4938         return NETDEV_TX_OK;
4939 }
4940
4941 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4942
4943 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4944  * TSO header is greater than 80 bytes.
4945  */
4946 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4947 {
4948         struct sk_buff *segs, *nskb;
4949
4950         /* Estimate the number of fragments in the worst case */
4951         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4952                 netif_stop_queue(tp->dev);
4953                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4954                         return NETDEV_TX_BUSY;
4955
4956                 netif_wake_queue(tp->dev);
4957         }
4958
4959         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4960         if (IS_ERR(segs))
4961                 goto tg3_tso_bug_end;
4962
4963         do {
4964                 nskb = segs;
4965                 segs = segs->next;
4966                 nskb->next = NULL;
4967                 tg3_start_xmit_dma_bug(nskb, tp->dev);
4968         } while (segs);
4969
4970 tg3_tso_bug_end:
4971         dev_kfree_skb(skb);
4972
4973         return NETDEV_TX_OK;
4974 }
4975
4976 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4977  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4978  */
4979 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4980 {
4981         struct tg3 *tp = netdev_priv(dev);
4982         u32 len, entry, base_flags, mss;
4983         struct skb_shared_info *sp;
4984         int would_hit_hwbug;
4985         dma_addr_t mapping;
4986
4987         len = skb_headlen(skb);
4988
4989         /* We are running in BH disabled context with netif_tx_lock
4990          * and TX reclaim runs via tp->napi.poll inside of a software
4991          * interrupt.  Furthermore, IRQ processing runs lockless so we have
4992          * no IRQ context deadlocks to worry about either.  Rejoice!
4993          */
4994         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4995                 if (!netif_queue_stopped(dev)) {
4996                         netif_stop_queue(dev);
4997
4998                         /* This is a hard error, log it. */
4999                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5000                                "queue awake!\n", dev->name);
5001                 }
5002                 return NETDEV_TX_BUSY;
5003         }
5004
5005         entry = tp->tx_prod;
5006         base_flags = 0;
5007         if (skb->ip_summed == CHECKSUM_PARTIAL)
5008                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5009         mss = 0;
5010         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5011                 struct iphdr *iph;
5012                 int tcp_opt_len, ip_tcp_len, hdr_len;
5013
5014                 if (skb_header_cloned(skb) &&
5015                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5016                         dev_kfree_skb(skb);
5017                         goto out_unlock;
5018                 }
5019
5020                 tcp_opt_len = tcp_optlen(skb);
5021                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5022
5023                 hdr_len = ip_tcp_len + tcp_opt_len;
5024                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5025                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5026                         return (tg3_tso_bug(tp, skb));
5027
5028                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5029                                TXD_FLAG_CPU_POST_DMA);
5030
5031                 iph = ip_hdr(skb);
5032                 iph->check = 0;
5033                 iph->tot_len = htons(mss + hdr_len);
5034                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5035                         tcp_hdr(skb)->check = 0;
5036                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5037                 } else
5038                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5039                                                                  iph->daddr, 0,
5040                                                                  IPPROTO_TCP,
5041                                                                  0);
5042
5043                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5044                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5045                         if (tcp_opt_len || iph->ihl > 5) {
5046                                 int tsflags;
5047
5048                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5049                                 mss |= (tsflags << 11);
5050                         }
5051                 } else {
5052                         if (tcp_opt_len || iph->ihl > 5) {
5053                                 int tsflags;
5054
5055                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5056                                 base_flags |= tsflags << 12;
5057                         }
5058                 }
5059         }
5060 #if TG3_VLAN_TAG_USED
5061         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5062                 base_flags |= (TXD_FLAG_VLAN |
5063                                (vlan_tx_tag_get(skb) << 16));
5064 #endif
5065
5066         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5067                 dev_kfree_skb(skb);
5068                 goto out_unlock;
5069         }
5070
5071         sp = skb_shinfo(skb);
5072
5073         mapping = sp->dma_maps[0];
5074
5075         tp->tx_buffers[entry].skb = skb;
5076
5077         would_hit_hwbug = 0;
5078
5079         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5080                 would_hit_hwbug = 1;
5081         else if (tg3_4g_overflow_test(mapping, len))
5082                 would_hit_hwbug = 1;
5083
5084         tg3_set_txd(tp, entry, mapping, len, base_flags,
5085                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5086
5087         entry = NEXT_TX(entry);
5088
5089         /* Now loop through additional data fragments, and queue them. */
5090         if (skb_shinfo(skb)->nr_frags > 0) {
5091                 unsigned int i, last;
5092
5093                 last = skb_shinfo(skb)->nr_frags - 1;
5094                 for (i = 0; i <= last; i++) {
5095                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5096
5097                         len = frag->size;
5098                         mapping = sp->dma_maps[i + 1];
5099
5100                         tp->tx_buffers[entry].skb = NULL;
5101
5102                         if (tg3_4g_overflow_test(mapping, len))
5103                                 would_hit_hwbug = 1;
5104
5105                         if (tg3_40bit_overflow_test(tp, mapping, len))
5106                                 would_hit_hwbug = 1;
5107
5108                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5109                                 tg3_set_txd(tp, entry, mapping, len,
5110                                             base_flags, (i == last)|(mss << 1));
5111                         else
5112                                 tg3_set_txd(tp, entry, mapping, len,
5113                                             base_flags, (i == last));
5114
5115                         entry = NEXT_TX(entry);
5116                 }
5117         }
5118
5119         if (would_hit_hwbug) {
5120                 u32 last_plus_one = entry;
5121                 u32 start;
5122
5123                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5124                 start &= (TG3_TX_RING_SIZE - 1);
5125
5126                 /* If the workaround fails due to memory/mapping
5127                  * failure, silently drop this packet.
5128                  */
5129                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5130                                                 &start, base_flags, mss))
5131                         goto out_unlock;
5132
5133                 entry = start;
5134         }
5135
5136         /* Packets are ready, update Tx producer idx local and on card. */
5137         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5138
5139         tp->tx_prod = entry;
5140         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5141                 netif_stop_queue(dev);
5142                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5143                         netif_wake_queue(tp->dev);
5144         }
5145
5146 out_unlock:
5147         mmiowb();
5148
5149         dev->trans_start = jiffies;
5150
5151         return NETDEV_TX_OK;
5152 }
5153
5154 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5155                                int new_mtu)
5156 {
5157         dev->mtu = new_mtu;
5158
5159         if (new_mtu > ETH_DATA_LEN) {
5160                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5161                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5162                         ethtool_op_set_tso(dev, 0);
5163                 }
5164                 else
5165                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5166         } else {
5167                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5168                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5169                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5170         }
5171 }
5172
5173 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5174 {
5175         struct tg3 *tp = netdev_priv(dev);
5176         int err;
5177
5178         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5179                 return -EINVAL;
5180
5181         if (!netif_running(dev)) {
5182                 /* We'll just catch it later when the
5183                  * device is up'd.
5184                  */
5185                 tg3_set_mtu(dev, tp, new_mtu);
5186                 return 0;
5187         }
5188
5189         tg3_phy_stop(tp);
5190
5191         tg3_netif_stop(tp);
5192
5193         tg3_full_lock(tp, 1);
5194
5195         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5196
5197         tg3_set_mtu(dev, tp, new_mtu);
5198
5199         err = tg3_restart_hw(tp, 0);
5200
5201         if (!err)
5202                 tg3_netif_start(tp);
5203
5204         tg3_full_unlock(tp);
5205
5206         if (!err)
5207                 tg3_phy_start(tp);
5208
5209         return err;
5210 }
5211
5212 /* Free up pending packets in all rx/tx rings.
5213  *
5214  * The chip has been shut down and the driver detached from
5215  * the networking, so no interrupts or new tx packets will
5216  * end up in the driver.  tp->{tx,}lock is not held and we are not
5217  * in an interrupt context and thus may sleep.
5218  */
5219 static void tg3_free_rings(struct tg3 *tp)
5220 {
5221         struct ring_info *rxp;
5222         int i;
5223
5224         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5225                 rxp = &tp->rx_std_buffers[i];
5226
5227                 if (rxp->skb == NULL)
5228                         continue;
5229                 pci_unmap_single(tp->pdev,
5230                                  pci_unmap_addr(rxp, mapping),
5231                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5232                                  PCI_DMA_FROMDEVICE);
5233                 dev_kfree_skb_any(rxp->skb);
5234                 rxp->skb = NULL;
5235         }
5236
5237         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5238                 rxp = &tp->rx_jumbo_buffers[i];
5239
5240                 if (rxp->skb == NULL)
5241                         continue;
5242                 pci_unmap_single(tp->pdev,
5243                                  pci_unmap_addr(rxp, mapping),
5244                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5245                                  PCI_DMA_FROMDEVICE);
5246                 dev_kfree_skb_any(rxp->skb);
5247                 rxp->skb = NULL;
5248         }
5249
5250         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5251                 struct tx_ring_info *txp;
5252                 struct sk_buff *skb;
5253
5254                 txp = &tp->tx_buffers[i];
5255                 skb = txp->skb;
5256
5257                 if (skb == NULL) {
5258                         i++;
5259                         continue;
5260                 }
5261
5262                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5263
5264                 txp->skb = NULL;
5265
5266                 i += skb_shinfo(skb)->nr_frags + 1;
5267
5268                 dev_kfree_skb_any(skb);
5269         }
5270 }
5271
5272 /* Initialize tx/rx rings for packet processing.
5273  *
5274  * The chip has been shut down and the driver detached from
5275  * the networking, so no interrupts or new tx packets will
5276  * end up in the driver.  tp->{tx,}lock are held and thus
5277  * we may not sleep.
5278  */
5279 static int tg3_init_rings(struct tg3 *tp)
5280 {
5281         u32 i;
5282
5283         /* Free up all the SKBs. */
5284         tg3_free_rings(tp);
5285
5286         /* Zero out all descriptors. */
5287         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5288         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5289         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5290         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5291
5292         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5293         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5294             (tp->dev->mtu > ETH_DATA_LEN))
5295                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5296
5297         /* Initialize invariants of the rings, we only set this
5298          * stuff once.  This works because the card does not
5299          * write into the rx buffer posting rings.
5300          */
5301         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5302                 struct tg3_rx_buffer_desc *rxd;
5303
5304                 rxd = &tp->rx_std[i];
5305                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5306                         << RXD_LEN_SHIFT;
5307                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5308                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5309                                (i << RXD_OPAQUE_INDEX_SHIFT));
5310         }
5311
5312         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5313                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5314                         struct tg3_rx_buffer_desc *rxd;
5315
5316                         rxd = &tp->rx_jumbo[i];
5317                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5318                                 << RXD_LEN_SHIFT;
5319                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5320                                 RXD_FLAG_JUMBO;
5321                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5322                                (i << RXD_OPAQUE_INDEX_SHIFT));
5323                 }
5324         }
5325
5326         /* Now allocate fresh SKBs for each rx ring. */
5327         for (i = 0; i < tp->rx_pending; i++) {
5328                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5329                         printk(KERN_WARNING PFX
5330                                "%s: Using a smaller RX standard ring, "
5331                                "only %d out of %d buffers were allocated "
5332                                "successfully.\n",
5333                                tp->dev->name, i, tp->rx_pending);
5334                         if (i == 0)
5335                                 return -ENOMEM;
5336                         tp->rx_pending = i;
5337                         break;
5338                 }
5339         }
5340
5341         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5342                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5343                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5344                                              -1, i) < 0) {
5345                                 printk(KERN_WARNING PFX
5346                                        "%s: Using a smaller RX jumbo ring, "
5347                                        "only %d out of %d buffers were "
5348                                        "allocated successfully.\n",
5349                                        tp->dev->name, i, tp->rx_jumbo_pending);
5350                                 if (i == 0) {
5351                                         tg3_free_rings(tp);
5352                                         return -ENOMEM;
5353                                 }
5354                                 tp->rx_jumbo_pending = i;
5355                                 break;
5356                         }
5357                 }
5358         }
5359         return 0;
5360 }
5361
5362 /*
5363  * Must not be invoked with interrupt sources disabled and
5364  * the hardware shutdown down.
5365  */
5366 static void tg3_free_consistent(struct tg3 *tp)
5367 {
5368         kfree(tp->rx_std_buffers);
5369         tp->rx_std_buffers = NULL;
5370         if (tp->rx_std) {
5371                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5372                                     tp->rx_std, tp->rx_std_mapping);
5373                 tp->rx_std = NULL;
5374         }
5375         if (tp->rx_jumbo) {
5376                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5377                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5378                 tp->rx_jumbo = NULL;
5379         }
5380         if (tp->rx_rcb) {
5381                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5382                                     tp->rx_rcb, tp->rx_rcb_mapping);
5383                 tp->rx_rcb = NULL;
5384         }
5385         if (tp->tx_ring) {
5386                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5387                         tp->tx_ring, tp->tx_desc_mapping);
5388                 tp->tx_ring = NULL;
5389         }
5390         if (tp->hw_status) {
5391                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5392                                     tp->hw_status, tp->status_mapping);
5393                 tp->hw_status = NULL;
5394         }
5395         if (tp->hw_stats) {
5396                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5397                                     tp->hw_stats, tp->stats_mapping);
5398                 tp->hw_stats = NULL;
5399         }
5400 }
5401
5402 /*
5403  * Must not be invoked with interrupt sources disabled and
5404  * the hardware shutdown down.  Can sleep.
5405  */
5406 static int tg3_alloc_consistent(struct tg3 *tp)
5407 {
5408         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5409                                       (TG3_RX_RING_SIZE +
5410                                        TG3_RX_JUMBO_RING_SIZE)) +
5411                                      (sizeof(struct tx_ring_info) *
5412                                       TG3_TX_RING_SIZE),
5413                                      GFP_KERNEL);
5414         if (!tp->rx_std_buffers)
5415                 return -ENOMEM;
5416
5417         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5418         tp->tx_buffers = (struct tx_ring_info *)
5419                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5420
5421         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5422                                           &tp->rx_std_mapping);
5423         if (!tp->rx_std)
5424                 goto err_out;
5425
5426         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5427                                             &tp->rx_jumbo_mapping);
5428
5429         if (!tp->rx_jumbo)
5430                 goto err_out;
5431
5432         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5433                                           &tp->rx_rcb_mapping);
5434         if (!tp->rx_rcb)
5435                 goto err_out;
5436
5437         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5438                                            &tp->tx_desc_mapping);
5439         if (!tp->tx_ring)
5440                 goto err_out;
5441
5442         tp->hw_status = pci_alloc_consistent(tp->pdev,
5443                                              TG3_HW_STATUS_SIZE,
5444                                              &tp->status_mapping);
5445         if (!tp->hw_status)
5446                 goto err_out;
5447
5448         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5449                                             sizeof(struct tg3_hw_stats),
5450                                             &tp->stats_mapping);
5451         if (!tp->hw_stats)
5452                 goto err_out;
5453
5454         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5455         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5456
5457         return 0;
5458
5459 err_out:
5460         tg3_free_consistent(tp);
5461         return -ENOMEM;
5462 }
5463
5464 #define MAX_WAIT_CNT 1000
5465
5466 /* To stop a block, clear the enable bit and poll till it
5467  * clears.  tp->lock is held.
5468  */
5469 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5470 {
5471         unsigned int i;
5472         u32 val;
5473
5474         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5475                 switch (ofs) {
5476                 case RCVLSC_MODE:
5477                 case DMAC_MODE:
5478                 case MBFREE_MODE:
5479                 case BUFMGR_MODE:
5480                 case MEMARB_MODE:
5481                         /* We can't enable/disable these bits of the
5482                          * 5705/5750, just say success.
5483                          */
5484                         return 0;
5485
5486                 default:
5487                         break;
5488                 }
5489         }
5490
5491         val = tr32(ofs);
5492         val &= ~enable_bit;
5493         tw32_f(ofs, val);
5494
5495         for (i = 0; i < MAX_WAIT_CNT; i++) {
5496                 udelay(100);
5497                 val = tr32(ofs);
5498                 if ((val & enable_bit) == 0)
5499                         break;
5500         }
5501
5502         if (i == MAX_WAIT_CNT && !silent) {
5503                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5504                        "ofs=%lx enable_bit=%x\n",
5505                        ofs, enable_bit);
5506                 return -ENODEV;
5507         }
5508
5509         return 0;
5510 }
5511
5512 /* tp->lock is held. */
5513 static int tg3_abort_hw(struct tg3 *tp, int silent)
5514 {
5515         int i, err;
5516
5517         tg3_disable_ints(tp);
5518
5519         tp->rx_mode &= ~RX_MODE_ENABLE;
5520         tw32_f(MAC_RX_MODE, tp->rx_mode);
5521         udelay(10);
5522
5523         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5524         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5525         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5526         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5527         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5528         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5529
5530         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5531         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5532         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5533         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5534         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5535         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5536         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5537
5538         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5539         tw32_f(MAC_MODE, tp->mac_mode);
5540         udelay(40);
5541
5542         tp->tx_mode &= ~TX_MODE_ENABLE;
5543         tw32_f(MAC_TX_MODE, tp->tx_mode);
5544
5545         for (i = 0; i < MAX_WAIT_CNT; i++) {
5546                 udelay(100);
5547                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5548                         break;
5549         }
5550         if (i >= MAX_WAIT_CNT) {
5551                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5552                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5553                        tp->dev->name, tr32(MAC_TX_MODE));
5554                 err |= -ENODEV;
5555         }
5556
5557         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5558         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5559         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5560
5561         tw32(FTQ_RESET, 0xffffffff);
5562         tw32(FTQ_RESET, 0x00000000);
5563
5564         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5565         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5566
5567         if (tp->hw_status)
5568                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5569         if (tp->hw_stats)
5570                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5571
5572         return err;
5573 }
5574
5575 /* tp->lock is held. */
5576 static int tg3_nvram_lock(struct tg3 *tp)
5577 {
5578         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5579                 int i;
5580
5581                 if (tp->nvram_lock_cnt == 0) {
5582                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5583                         for (i = 0; i < 8000; i++) {
5584                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5585                                         break;
5586                                 udelay(20);
5587                         }
5588                         if (i == 8000) {
5589                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5590                                 return -ENODEV;
5591                         }
5592                 }
5593                 tp->nvram_lock_cnt++;
5594         }
5595         return 0;
5596 }
5597
5598 /* tp->lock is held. */
5599 static void tg3_nvram_unlock(struct tg3 *tp)
5600 {
5601         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5602                 if (tp->nvram_lock_cnt > 0)
5603                         tp->nvram_lock_cnt--;
5604                 if (tp->nvram_lock_cnt == 0)
5605                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5606         }
5607 }
5608
5609 /* tp->lock is held. */
5610 static void tg3_enable_nvram_access(struct tg3 *tp)
5611 {
5612         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5613             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5614                 u32 nvaccess = tr32(NVRAM_ACCESS);
5615
5616                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5617         }
5618 }
5619
5620 /* tp->lock is held. */
5621 static void tg3_disable_nvram_access(struct tg3 *tp)
5622 {
5623         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5624             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5625                 u32 nvaccess = tr32(NVRAM_ACCESS);
5626
5627                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5628         }
5629 }
5630
5631 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5632 {
5633         int i;
5634         u32 apedata;
5635
5636         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5637         if (apedata != APE_SEG_SIG_MAGIC)
5638                 return;
5639
5640         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5641         if (!(apedata & APE_FW_STATUS_READY))
5642                 return;
5643
5644         /* Wait for up to 1 millisecond for APE to service previous event. */
5645         for (i = 0; i < 10; i++) {
5646                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5647                         return;
5648
5649                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5650
5651                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5652                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5653                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5654
5655                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5656
5657                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5658                         break;
5659
5660                 udelay(100);
5661         }
5662
5663         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5664                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5665 }
5666
5667 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5668 {
5669         u32 event;
5670         u32 apedata;
5671
5672         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5673                 return;
5674
5675         switch (kind) {
5676                 case RESET_KIND_INIT:
5677                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5678                                         APE_HOST_SEG_SIG_MAGIC);
5679                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5680                                         APE_HOST_SEG_LEN_MAGIC);
5681                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5682                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5683                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5684                                         APE_HOST_DRIVER_ID_MAGIC);
5685                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5686                                         APE_HOST_BEHAV_NO_PHYLOCK);
5687
5688                         event = APE_EVENT_STATUS_STATE_START;
5689                         break;
5690                 case RESET_KIND_SHUTDOWN:
5691                         /* With the interface we are currently using,
5692                          * APE does not track driver state.  Wiping
5693                          * out the HOST SEGMENT SIGNATURE forces
5694                          * the APE to assume OS absent status.
5695                          */
5696                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5697
5698                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5699                         break;
5700                 case RESET_KIND_SUSPEND:
5701                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5702                         break;
5703                 default:
5704                         return;
5705         }
5706
5707         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5708
5709         tg3_ape_send_event(tp, event);
5710 }
5711
5712 /* tp->lock is held. */
5713 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5714 {
5715         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5716                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5717
5718         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5719                 switch (kind) {
5720                 case RESET_KIND_INIT:
5721                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5722                                       DRV_STATE_START);
5723                         break;
5724
5725                 case RESET_KIND_SHUTDOWN:
5726                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5727                                       DRV_STATE_UNLOAD);
5728                         break;
5729
5730                 case RESET_KIND_SUSPEND:
5731                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5732                                       DRV_STATE_SUSPEND);
5733                         break;
5734
5735                 default:
5736                         break;
5737                 }
5738         }
5739
5740         if (kind == RESET_KIND_INIT ||
5741             kind == RESET_KIND_SUSPEND)
5742                 tg3_ape_driver_state_change(tp, kind);
5743 }
5744
5745 /* tp->lock is held. */
5746 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5747 {
5748         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5749                 switch (kind) {
5750                 case RESET_KIND_INIT:
5751                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5752                                       DRV_STATE_START_DONE);
5753                         break;
5754
5755                 case RESET_KIND_SHUTDOWN:
5756                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5757                                       DRV_STATE_UNLOAD_DONE);
5758                         break;
5759
5760                 default:
5761                         break;
5762                 }
5763         }
5764
5765         if (kind == RESET_KIND_SHUTDOWN)
5766                 tg3_ape_driver_state_change(tp, kind);
5767 }
5768
5769 /* tp->lock is held. */
5770 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5771 {
5772         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5773                 switch (kind) {
5774                 case RESET_KIND_INIT:
5775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5776                                       DRV_STATE_START);
5777                         break;
5778
5779                 case RESET_KIND_SHUTDOWN:
5780                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5781                                       DRV_STATE_UNLOAD);
5782                         break;
5783
5784                 case RESET_KIND_SUSPEND:
5785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5786                                       DRV_STATE_SUSPEND);
5787                         break;
5788
5789                 default:
5790                         break;
5791                 }
5792         }
5793 }
5794
5795 static int tg3_poll_fw(struct tg3 *tp)
5796 {
5797         int i;
5798         u32 val;
5799
5800         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5801                 /* Wait up to 20ms for init done. */
5802                 for (i = 0; i < 200; i++) {
5803                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5804                                 return 0;
5805                         udelay(100);
5806                 }
5807                 return -ENODEV;
5808         }
5809
5810         /* Wait for firmware initialization to complete. */
5811         for (i = 0; i < 100000; i++) {
5812                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5813                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5814                         break;
5815                 udelay(10);
5816         }
5817
5818         /* Chip might not be fitted with firmware.  Some Sun onboard
5819          * parts are configured like that.  So don't signal the timeout
5820          * of the above loop as an error, but do report the lack of
5821          * running firmware once.
5822          */
5823         if (i >= 100000 &&
5824             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5825                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5826
5827                 printk(KERN_INFO PFX "%s: No firmware running.\n",
5828                        tp->dev->name);
5829         }
5830
5831         return 0;
5832 }
5833
5834 /* Save PCI command register before chip reset */
5835 static void tg3_save_pci_state(struct tg3 *tp)
5836 {
5837         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5838 }
5839
5840 /* Restore PCI state after chip reset */
5841 static void tg3_restore_pci_state(struct tg3 *tp)
5842 {
5843         u32 val;
5844
5845         /* Re-enable indirect register accesses. */
5846         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5847                                tp->misc_host_ctrl);
5848
5849         /* Set MAX PCI retry to zero. */
5850         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5851         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5852             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5853                 val |= PCISTATE_RETRY_SAME_DMA;
5854         /* Allow reads and writes to the APE register and memory space. */
5855         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5856                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5857                        PCISTATE_ALLOW_APE_SHMEM_WR;
5858         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5859
5860         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5861
5862         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5863                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5864                         pcie_set_readrq(tp->pdev, 4096);
5865                 else {
5866                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5867                                               tp->pci_cacheline_sz);
5868                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5869                                               tp->pci_lat_timer);
5870                 }
5871         }
5872
5873         /* Make sure PCI-X relaxed ordering bit is clear. */
5874         if (tp->pcix_cap) {
5875                 u16 pcix_cmd;
5876
5877                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5878                                      &pcix_cmd);
5879                 pcix_cmd &= ~PCI_X_CMD_ERO;
5880                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5881                                       pcix_cmd);
5882         }
5883
5884         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5885
5886                 /* Chip reset on 5780 will reset MSI enable bit,
5887                  * so need to restore it.
5888                  */
5889                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5890                         u16 ctrl;
5891
5892                         pci_read_config_word(tp->pdev,
5893                                              tp->msi_cap + PCI_MSI_FLAGS,
5894                                              &ctrl);
5895                         pci_write_config_word(tp->pdev,
5896                                               tp->msi_cap + PCI_MSI_FLAGS,
5897                                               ctrl | PCI_MSI_FLAGS_ENABLE);
5898                         val = tr32(MSGINT_MODE);
5899                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5900                 }
5901         }
5902 }
5903
5904 static void tg3_stop_fw(struct tg3 *);
5905
5906 /* tp->lock is held. */
5907 static int tg3_chip_reset(struct tg3 *tp)
5908 {
5909         u32 val;
5910         void (*write_op)(struct tg3 *, u32, u32);
5911         int err;
5912
5913         tg3_nvram_lock(tp);
5914
5915         tg3_mdio_stop(tp);
5916
5917         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5918
5919         /* No matching tg3_nvram_unlock() after this because
5920          * chip reset below will undo the nvram lock.
5921          */
5922         tp->nvram_lock_cnt = 0;
5923
5924         /* GRC_MISC_CFG core clock reset will clear the memory
5925          * enable bit in PCI register 4 and the MSI enable bit
5926          * on some chips, so we save relevant registers here.
5927          */
5928         tg3_save_pci_state(tp);
5929
5930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5934             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5936                 tw32(GRC_FASTBOOT_PC, 0);
5937
5938         /*
5939          * We must avoid the readl() that normally takes place.
5940          * It locks machines, causes machine checks, and other
5941          * fun things.  So, temporarily disable the 5701
5942          * hardware workaround, while we do the reset.
5943          */
5944         write_op = tp->write32;
5945         if (write_op == tg3_write_flush_reg32)
5946                 tp->write32 = tg3_write32;
5947
5948         /* Prevent the irq handler from reading or writing PCI registers
5949          * during chip reset when the memory enable bit in the PCI command
5950          * register may be cleared.  The chip does not generate interrupt
5951          * at this time, but the irq handler may still be called due to irq
5952          * sharing or irqpoll.
5953          */
5954         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5955         if (tp->hw_status) {
5956                 tp->hw_status->status = 0;
5957                 tp->hw_status->status_tag = 0;
5958         }
5959         tp->last_tag = 0;
5960         smp_mb();
5961         synchronize_irq(tp->pdev->irq);
5962
5963         /* do the reset */
5964         val = GRC_MISC_CFG_CORECLK_RESET;
5965
5966         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5967                 if (tr32(0x7e2c) == 0x60) {
5968                         tw32(0x7e2c, 0x20);
5969                 }
5970                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5971                         tw32(GRC_MISC_CFG, (1 << 29));
5972                         val |= (1 << 29);
5973                 }
5974         }
5975
5976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5977                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5978                 tw32(GRC_VCPU_EXT_CTRL,
5979                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5980         }
5981
5982         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5983                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5984         tw32(GRC_MISC_CFG, val);
5985
5986         /* restore 5701 hardware bug workaround write method */
5987         tp->write32 = write_op;
5988
5989         /* Unfortunately, we have to delay before the PCI read back.
5990          * Some 575X chips even will not respond to a PCI cfg access
5991          * when the reset command is given to the chip.
5992          *
5993          * How do these hardware designers expect things to work
5994          * properly if the PCI write is posted for a long period
5995          * of time?  It is always necessary to have some method by
5996          * which a register read back can occur to push the write
5997          * out which does the reset.
5998          *
5999          * For most tg3 variants the trick below was working.
6000          * Ho hum...
6001          */
6002         udelay(120);
6003
6004         /* Flush PCI posted writes.  The normal MMIO registers
6005          * are inaccessible at this time so this is the only
6006          * way to make this reliably (actually, this is no longer
6007          * the case, see above).  I tried to use indirect
6008          * register read/write but this upset some 5701 variants.
6009          */
6010         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6011
6012         udelay(120);
6013
6014         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6015                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6016                         int i;
6017                         u32 cfg_val;
6018
6019                         /* Wait for link training to complete.  */
6020                         for (i = 0; i < 5000; i++)
6021                                 udelay(100);
6022
6023                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6024                         pci_write_config_dword(tp->pdev, 0xc4,
6025                                                cfg_val | (1 << 15));
6026                 }
6027                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
6028                         /* Set PCIE max payload size and clear error status.  */
6029                         pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
6030         }
6031
6032         tg3_restore_pci_state(tp);
6033
6034         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6035
6036         val = 0;
6037         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6038                 val = tr32(MEMARB_MODE);
6039         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6040
6041         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6042                 tg3_stop_fw(tp);
6043                 tw32(0x5000, 0x400);
6044         }
6045
6046         tw32(GRC_MODE, tp->grc_mode);
6047
6048         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6049                 val = tr32(0xc4);
6050
6051                 tw32(0xc4, val | (1 << 15));
6052         }
6053
6054         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6055             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6056                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6057                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6058                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6059                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6060         }
6061
6062         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6063                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6064                 tw32_f(MAC_MODE, tp->mac_mode);
6065         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6066                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6067                 tw32_f(MAC_MODE, tp->mac_mode);
6068         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6069                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6070                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6071                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6072                 tw32_f(MAC_MODE, tp->mac_mode);
6073         } else
6074                 tw32_f(MAC_MODE, 0);
6075         udelay(40);
6076
6077         tg3_mdio_start(tp);
6078
6079         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6080
6081         err = tg3_poll_fw(tp);
6082         if (err)
6083                 return err;
6084
6085         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6086             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6087                 val = tr32(0x7c00);
6088
6089                 tw32(0x7c00, val | (1 << 25));
6090         }
6091
6092         /* Reprobe ASF enable state.  */
6093         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6094         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6095         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6096         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6097                 u32 nic_cfg;
6098
6099                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6100                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6101                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6102                         tp->last_event_jiffies = jiffies;
6103                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6104                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6105                 }
6106         }
6107
6108         return 0;
6109 }
6110
6111 /* tp->lock is held. */
6112 static void tg3_stop_fw(struct tg3 *tp)
6113 {
6114         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6115            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6116                 /* Wait for RX cpu to ACK the previous event. */
6117                 tg3_wait_for_event_ack(tp);
6118
6119                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6120
6121                 tg3_generate_fw_event(tp);
6122
6123                 /* Wait for RX cpu to ACK this event. */
6124                 tg3_wait_for_event_ack(tp);
6125         }
6126 }
6127
6128 /* tp->lock is held. */
6129 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6130 {
6131         int err;
6132
6133         tg3_stop_fw(tp);
6134
6135         tg3_write_sig_pre_reset(tp, kind);
6136
6137         tg3_abort_hw(tp, silent);
6138         err = tg3_chip_reset(tp);
6139
6140         tg3_write_sig_legacy(tp, kind);
6141         tg3_write_sig_post_reset(tp, kind);
6142
6143         if (err)
6144                 return err;
6145
6146         return 0;
6147 }
6148
6149 #define TG3_FW_RELEASE_MAJOR    0x0
6150 #define TG3_FW_RELASE_MINOR     0x0
6151 #define TG3_FW_RELEASE_FIX      0x0
6152 #define TG3_FW_START_ADDR       0x08000000
6153 #define TG3_FW_TEXT_ADDR        0x08000000
6154 #define TG3_FW_TEXT_LEN         0x9c0
6155 #define TG3_FW_RODATA_ADDR      0x080009c0
6156 #define TG3_FW_RODATA_LEN       0x60
6157 #define TG3_FW_DATA_ADDR        0x08000a40
6158 #define TG3_FW_DATA_LEN         0x20
6159 #define TG3_FW_SBSS_ADDR        0x08000a60
6160 #define TG3_FW_SBSS_LEN         0xc
6161 #define TG3_FW_BSS_ADDR         0x08000a70
6162 #define TG3_FW_BSS_LEN          0x10
6163
6164 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6165         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6166         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6167         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6168         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6169         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6170         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6171         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6172         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6173         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6174         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6175         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6176         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6177         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6178         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6179         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6180         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6181         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6182         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6183         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6184         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6185         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6186         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6187         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6188         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6189         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6190         0, 0, 0, 0, 0, 0,
6191         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6192         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6193         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6194         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6195         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6196         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6197         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6198         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6199         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6200         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6201         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6202         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6203         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6204         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6205         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6206         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6207         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6208         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6209         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6210         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6211         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6212         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6213         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6214         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6215         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6216         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6217         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6218         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6219         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6220         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6221         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6222         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6223         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6224         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6225         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6226         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6227         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6228         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6229         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6230         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6231         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6232         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6233         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6234         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6235         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6236         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6237         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6238         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6239         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6240         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6241         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6242         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6243         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6244         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6245         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6246         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6247         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6248         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6249         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6250         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6251         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6252         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6253         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6254         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6255         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6256 };
6257
6258 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6259         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6260         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6261         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6262         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6263         0x00000000
6264 };
6265
6266 #if 0 /* All zeros, don't eat up space with it. */
6267 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6268         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6269         0x00000000, 0x00000000, 0x00000000, 0x00000000
6270 };
6271 #endif
6272
6273 #define RX_CPU_SCRATCH_BASE     0x30000
6274 #define RX_CPU_SCRATCH_SIZE     0x04000
6275 #define TX_CPU_SCRATCH_BASE     0x34000
6276 #define TX_CPU_SCRATCH_SIZE     0x04000
6277
6278 /* tp->lock is held. */
6279 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6280 {
6281         int i;
6282
6283         BUG_ON(offset == TX_CPU_BASE &&
6284             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6285
6286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6287                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6288
6289                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6290                 return 0;
6291         }
6292         if (offset == RX_CPU_BASE) {
6293                 for (i = 0; i < 10000; i++) {
6294                         tw32(offset + CPU_STATE, 0xffffffff);
6295                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6296                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6297                                 break;
6298                 }
6299
6300                 tw32(offset + CPU_STATE, 0xffffffff);
6301                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6302                 udelay(10);
6303         } else {
6304                 for (i = 0; i < 10000; i++) {
6305                         tw32(offset + CPU_STATE, 0xffffffff);
6306                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6307                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6308                                 break;
6309                 }
6310         }
6311
6312         if (i >= 10000) {
6313                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6314                        "and %s CPU\n",
6315                        tp->dev->name,
6316                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6317                 return -ENODEV;
6318         }
6319
6320         /* Clear firmware's nvram arbitration. */
6321         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6322                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6323         return 0;
6324 }
6325
6326 struct fw_info {
6327         unsigned int text_base;
6328         unsigned int text_len;
6329         const u32 *text_data;
6330         unsigned int rodata_base;
6331         unsigned int rodata_len;
6332         const u32 *rodata_data;
6333         unsigned int data_base;
6334         unsigned int data_len;
6335         const u32 *data_data;
6336 };
6337
6338 /* tp->lock is held. */
6339 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6340                                  int cpu_scratch_size, struct fw_info *info)
6341 {
6342         int err, lock_err, i;
6343         void (*write_op)(struct tg3 *, u32, u32);
6344
6345         if (cpu_base == TX_CPU_BASE &&
6346             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6347                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6348                        "TX cpu firmware on %s which is 5705.\n",
6349                        tp->dev->name);
6350                 return -EINVAL;
6351         }
6352
6353         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6354                 write_op = tg3_write_mem;
6355         else
6356                 write_op = tg3_write_indirect_reg32;
6357
6358         /* It is possible that bootcode is still loading at this point.
6359          * Get the nvram lock first before halting the cpu.
6360          */
6361         lock_err = tg3_nvram_lock(tp);
6362         err = tg3_halt_cpu(tp, cpu_base);
6363         if (!lock_err)
6364                 tg3_nvram_unlock(tp);
6365         if (err)
6366                 goto out;
6367
6368         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6369                 write_op(tp, cpu_scratch_base + i, 0);
6370         tw32(cpu_base + CPU_STATE, 0xffffffff);
6371         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6372         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6373                 write_op(tp, (cpu_scratch_base +
6374                               (info->text_base & 0xffff) +
6375                               (i * sizeof(u32))),
6376                          (info->text_data ?
6377                           info->text_data[i] : 0));
6378         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6379                 write_op(tp, (cpu_scratch_base +
6380                               (info->rodata_base & 0xffff) +
6381                               (i * sizeof(u32))),
6382                          (info->rodata_data ?
6383                           info->rodata_data[i] : 0));
6384         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6385                 write_op(tp, (cpu_scratch_base +
6386                               (info->data_base & 0xffff) +
6387                               (i * sizeof(u32))),
6388                          (info->data_data ?
6389                           info->data_data[i] : 0));
6390
6391         err = 0;
6392
6393 out:
6394         return err;
6395 }
6396
6397 /* tp->lock is held. */
6398 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6399 {
6400         struct fw_info info;
6401         int err, i;
6402
6403         info.text_base = TG3_FW_TEXT_ADDR;
6404         info.text_len = TG3_FW_TEXT_LEN;
6405         info.text_data = &tg3FwText[0];
6406         info.rodata_base = TG3_FW_RODATA_ADDR;
6407         info.rodata_len = TG3_FW_RODATA_LEN;
6408         info.rodata_data = &tg3FwRodata[0];
6409         info.data_base = TG3_FW_DATA_ADDR;
6410         info.data_len = TG3_FW_DATA_LEN;
6411         info.data_data = NULL;
6412
6413         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6414                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6415                                     &info);
6416         if (err)
6417                 return err;
6418
6419         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6420                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6421                                     &info);
6422         if (err)
6423                 return err;
6424
6425         /* Now startup only the RX cpu. */
6426         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6427         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6428
6429         for (i = 0; i < 5; i++) {
6430                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6431                         break;
6432                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6433                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6434                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
6435                 udelay(1000);
6436         }
6437         if (i >= 5) {
6438                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6439                        "to set RX CPU PC, is %08x should be %08x\n",
6440                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6441                        TG3_FW_TEXT_ADDR);
6442                 return -ENODEV;
6443         }
6444         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6445         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6446
6447         return 0;
6448 }
6449
6450
6451 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
6452 #define TG3_TSO_FW_RELASE_MINOR         0x6
6453 #define TG3_TSO_FW_RELEASE_FIX          0x0
6454 #define TG3_TSO_FW_START_ADDR           0x08000000
6455 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
6456 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
6457 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
6458 #define TG3_TSO_FW_RODATA_LEN           0x60
6459 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
6460 #define TG3_TSO_FW_DATA_LEN             0x30
6461 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
6462 #define TG3_TSO_FW_SBSS_LEN             0x2c
6463 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
6464 #define TG3_TSO_FW_BSS_LEN              0x894
6465
6466 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6467         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6468         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6469         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6470         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6471         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6472         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6473         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6474         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6475         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6476         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6477         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6478         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6479         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6480         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6481         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6482         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6483         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6484         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6485         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6486         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6487         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6488         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6489         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6490         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6491         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6492         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6493         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6494         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6495         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6496         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6497         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6498         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6499         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6500         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6501         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6502         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6503         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6504         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6505         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6506         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6507         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6508         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6509         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6510         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6511         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6512         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6513         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6514         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6515         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6516         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6517         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6518         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6519         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6520         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6521         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6522         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6523         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6524         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6525         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6526         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6527         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6528         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6529         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6530         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6531         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6532         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6533         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6534         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6535         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6536         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6537         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6538         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6539         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6540         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6541         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6542         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6543         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6544         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6545         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6546         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6547         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6548         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6549         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6550         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6551         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6552         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6553         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6554         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6555         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6556         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6557         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6558         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6559         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6560         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6561         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6562         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6563         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6564         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6565         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6566         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6567         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6568         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6569         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6570         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6571         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6572         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6573         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6574         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6575         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6576         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6577         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6578         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6579         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6580         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6581         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6582         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6583         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6584         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6585         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6586         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6587         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6588         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6589         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6590         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6591         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6592         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6593         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6594         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6595         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6596         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6597         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6598         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6599         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6600         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6601         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6602         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6603         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6604         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6605         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6606         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6607         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6608         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6609         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6610         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6611         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6612         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6613         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6614         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6615         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6616         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6617         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6618         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6619         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6620         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6621         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6622         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6623         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6624         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6625         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6626         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6627         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6628         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6629         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6630         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6631         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6632         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6633         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6634         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6635         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6636         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6637         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6638         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6639         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6640         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6641         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6642         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6643         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6644         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6645         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6646         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6647         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6648         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6649         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6650         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6651         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6652         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6653         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6654         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6655         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6656         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6657         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6658         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6659         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6660         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6661         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6662         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6663         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6664         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6665         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6666         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6667         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6668         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6669         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6670         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6671         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6672         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6673         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6674         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6675         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6676         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6677         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6678         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6679         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6680         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6681         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6682         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6683         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6684         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6685         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6686         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6687         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6688         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6689         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6690         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6691         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6692         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6693         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6694         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6695         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6696         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6697         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6698         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6699         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6700         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6701         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6702         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6703         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6704         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6705         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6706         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6707         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6708         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6709         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6710         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6711         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6712         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6713         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6714         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6715         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6716         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6717         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6718         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6719         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6720         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6721         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6722         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6723         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6724         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6725         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6726         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6727         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6728         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6729         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6730         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6731         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6732         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6733         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6734         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6735         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6736         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6737         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6738         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6739         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6740         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6741         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6742         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6743         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6744         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6745         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6746         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6747         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6748         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6749         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6750         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6751 };
6752
6753 static const u32 tg3TsoFwRodata[] = {
6754         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6755         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6756         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6757         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6758         0x00000000,
6759 };
6760
6761 static const u32 tg3TsoFwData[] = {
6762         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6763         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6764         0x00000000,
6765 };
6766
6767 /* 5705 needs a special version of the TSO firmware.  */
6768 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
6769 #define TG3_TSO5_FW_RELASE_MINOR        0x2
6770 #define TG3_TSO5_FW_RELEASE_FIX         0x0
6771 #define TG3_TSO5_FW_START_ADDR          0x00010000
6772 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
6773 #define TG3_TSO5_FW_TEXT_LEN            0xe90
6774 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
6775 #define TG3_TSO5_FW_RODATA_LEN          0x50
6776 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
6777 #define TG3_TSO5_FW_DATA_LEN            0x20
6778 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
6779 #define TG3_TSO5_FW_SBSS_LEN            0x28
6780 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
6781 #define TG3_TSO5_FW_BSS_LEN             0x88
6782
6783 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6784         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6785         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6786         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6787         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6788         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6789         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6790         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6791         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6792         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6793         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6794         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6795         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6796         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6797         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6798         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6799         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6800         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6801         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6802         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6803         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6804         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6805         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6806         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6807         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6808         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6809         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6810         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6811         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6812         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6813         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6814         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6815         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6816         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6817         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6818         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6819         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6820         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6821         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6822         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6823         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6824         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6825         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6826         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6827         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6828         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6829         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6830         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6831         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6832         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6833         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6834         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6835         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6836         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6837         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6838         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6839         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6840         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6841         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6842         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6843         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6844         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6845         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6846         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6847         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6848         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6849         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6850         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6851         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6852         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6853         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6854         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6855         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6856         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6857         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6858         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6859         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6860         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6861         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6862         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6863         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6864         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6865         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6866         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6867         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6868         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6869         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6870         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6871         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6872         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6873         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6874         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6875         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6876         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6877         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6878         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6879         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6880         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6881         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6882         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6883         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6884         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6885         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6886         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6887         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6888         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6889         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6890         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6891         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6892         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6893         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6894         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6895         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6896         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6897         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6898         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6899         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6900         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6901         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6902         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6903         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6904         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6905         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6906         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6907         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6908         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6909         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6910         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6911         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6912         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6913         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6914         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6915         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6916         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6917         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6918         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6919         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6920         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6921         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6922         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6923         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6924         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6925         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6926         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6927         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6928         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6929         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6930         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6931         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6932         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6933         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6934         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6935         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6936         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6937         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6938         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6939         0x00000000, 0x00000000, 0x00000000,
6940 };
6941
6942 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6943         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6944         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6945         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6946         0x00000000, 0x00000000, 0x00000000,
6947 };
6948
6949 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6950         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6951         0x00000000, 0x00000000, 0x00000000,
6952 };
6953
6954 /* tp->lock is held. */
6955 static int tg3_load_tso_firmware(struct tg3 *tp)
6956 {
6957         struct fw_info info;
6958         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6959         int err, i;
6960
6961         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6962                 return 0;
6963
6964         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6965                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6966                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6967                 info.text_data = &tg3Tso5FwText[0];
6968                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6969                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6970                 info.rodata_data = &tg3Tso5FwRodata[0];
6971                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6972                 info.data_len = TG3_TSO5_FW_DATA_LEN;
6973                 info.data_data = &tg3Tso5FwData[0];
6974                 cpu_base = RX_CPU_BASE;
6975                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6976                 cpu_scratch_size = (info.text_len +
6977                                     info.rodata_len +
6978                                     info.data_len +
6979                                     TG3_TSO5_FW_SBSS_LEN +
6980                                     TG3_TSO5_FW_BSS_LEN);
6981         } else {
6982                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6983                 info.text_len = TG3_TSO_FW_TEXT_LEN;
6984                 info.text_data = &tg3TsoFwText[0];
6985                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6986                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6987                 info.rodata_data = &tg3TsoFwRodata[0];
6988                 info.data_base = TG3_TSO_FW_DATA_ADDR;
6989                 info.data_len = TG3_TSO_FW_DATA_LEN;
6990                 info.data_data = &tg3TsoFwData[0];
6991                 cpu_base = TX_CPU_BASE;
6992                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6993                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6994         }
6995
6996         err = tg3_load_firmware_cpu(tp, cpu_base,
6997                                     cpu_scratch_base, cpu_scratch_size,
6998                                     &info);
6999         if (err)
7000                 return err;
7001
7002         /* Now startup the cpu. */
7003         tw32(cpu_base + CPU_STATE, 0xffffffff);
7004         tw32_f(cpu_base + CPU_PC,    info.text_base);
7005
7006         for (i = 0; i < 5; i++) {
7007                 if (tr32(cpu_base + CPU_PC) == info.text_base)
7008                         break;
7009                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7010                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7011                 tw32_f(cpu_base + CPU_PC,    info.text_base);
7012                 udelay(1000);
7013         }
7014         if (i >= 5) {
7015                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7016                        "to set CPU PC, is %08x should be %08x\n",
7017                        tp->dev->name, tr32(cpu_base + CPU_PC),
7018                        info.text_base);
7019                 return -ENODEV;
7020         }
7021         tw32(cpu_base + CPU_STATE, 0xffffffff);
7022         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7023         return 0;
7024 }
7025
7026
7027 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7028 {
7029         struct tg3 *tp = netdev_priv(dev);
7030         struct sockaddr *addr = p;
7031         int err = 0, skip_mac_1 = 0;
7032
7033         if (!is_valid_ether_addr(addr->sa_data))
7034                 return -EINVAL;
7035
7036         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7037
7038         if (!netif_running(dev))
7039                 return 0;
7040
7041         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7042                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7043
7044                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7045                 addr0_low = tr32(MAC_ADDR_0_LOW);
7046                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7047                 addr1_low = tr32(MAC_ADDR_1_LOW);
7048
7049                 /* Skip MAC addr 1 if ASF is using it. */
7050                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7051                     !(addr1_high == 0 && addr1_low == 0))
7052                         skip_mac_1 = 1;
7053         }
7054         spin_lock_bh(&tp->lock);
7055         __tg3_set_mac_addr(tp, skip_mac_1);
7056         spin_unlock_bh(&tp->lock);
7057
7058         return err;
7059 }
7060
7061 /* tp->lock is held. */
7062 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7063                            dma_addr_t mapping, u32 maxlen_flags,
7064                            u32 nic_addr)
7065 {
7066         tg3_write_mem(tp,
7067                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7068                       ((u64) mapping >> 32));
7069         tg3_write_mem(tp,
7070                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7071                       ((u64) mapping & 0xffffffff));
7072         tg3_write_mem(tp,
7073                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7074                        maxlen_flags);
7075
7076         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7077                 tg3_write_mem(tp,
7078                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7079                               nic_addr);
7080 }
7081
7082 static void __tg3_set_rx_mode(struct net_device *);
7083 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7084 {
7085         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7086         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7087         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7088         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7089         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7090                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7091                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7092         }
7093         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7094         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7095         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7096                 u32 val = ec->stats_block_coalesce_usecs;
7097
7098                 if (!netif_carrier_ok(tp->dev))
7099                         val = 0;
7100
7101                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7102         }
7103 }
7104
7105 /* tp->lock is held. */
7106 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7107 {
7108         u32 val, rdmac_mode;
7109         int i, err, limit;
7110
7111         tg3_disable_ints(tp);
7112
7113         tg3_stop_fw(tp);
7114
7115         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7116
7117         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7118                 tg3_abort_hw(tp, 1);
7119         }
7120
7121         if (reset_phy &&
7122             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7123                 tg3_phy_reset(tp);
7124
7125         err = tg3_chip_reset(tp);
7126         if (err)
7127                 return err;
7128
7129         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7130
7131         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7132                 val = tr32(TG3_CPMU_CTRL);
7133                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7134                 tw32(TG3_CPMU_CTRL, val);
7135
7136                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7137                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7138                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7139                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7140
7141                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7142                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7143                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7144                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7145
7146                 val = tr32(TG3_CPMU_HST_ACC);
7147                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7148                 val |= CPMU_HST_ACC_MACCLK_6_25;
7149                 tw32(TG3_CPMU_HST_ACC, val);
7150         }
7151
7152         /* This works around an issue with Athlon chipsets on
7153          * B3 tigon3 silicon.  This bit has no effect on any
7154          * other revision.  But do not set this on PCI Express
7155          * chips and don't even touch the clocks if the CPMU is present.
7156          */
7157         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7158                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7159                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7160                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7161         }
7162
7163         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7164             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7165                 val = tr32(TG3PCI_PCISTATE);
7166                 val |= PCISTATE_RETRY_SAME_DMA;
7167                 tw32(TG3PCI_PCISTATE, val);
7168         }
7169
7170         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7171                 /* Allow reads and writes to the
7172                  * APE register and memory space.
7173                  */
7174                 val = tr32(TG3PCI_PCISTATE);
7175                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7176                        PCISTATE_ALLOW_APE_SHMEM_WR;
7177                 tw32(TG3PCI_PCISTATE, val);
7178         }
7179
7180         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7181                 /* Enable some hw fixes.  */
7182                 val = tr32(TG3PCI_MSI_DATA);
7183                 val |= (1 << 26) | (1 << 28) | (1 << 29);
7184                 tw32(TG3PCI_MSI_DATA, val);
7185         }
7186
7187         /* Descriptor ring init may make accesses to the
7188          * NIC SRAM area to setup the TX descriptors, so we
7189          * can only do this after the hardware has been
7190          * successfully reset.
7191          */
7192         err = tg3_init_rings(tp);
7193         if (err)
7194                 return err;
7195
7196         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7197             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7198                 /* This value is determined during the probe time DMA
7199                  * engine test, tg3_test_dma.
7200                  */
7201                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7202         }
7203
7204         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7205                           GRC_MODE_4X_NIC_SEND_RINGS |
7206                           GRC_MODE_NO_TX_PHDR_CSUM |
7207                           GRC_MODE_NO_RX_PHDR_CSUM);
7208         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7209
7210         /* Pseudo-header checksum is done by hardware logic and not
7211          * the offload processers, so make the chip do the pseudo-
7212          * header checksums on receive.  For transmit it is more
7213          * convenient to do the pseudo-header checksum in software
7214          * as Linux does that on transmit for us in all cases.
7215          */
7216         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7217
7218         tw32(GRC_MODE,
7219              tp->grc_mode |
7220              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7221
7222         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
7223         val = tr32(GRC_MISC_CFG);
7224         val &= ~0xff;
7225         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7226         tw32(GRC_MISC_CFG, val);
7227
7228         /* Initialize MBUF/DESC pool. */
7229         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7230                 /* Do nothing.  */
7231         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7232                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7233                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7234                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7235                 else
7236                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7237                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7238                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7239         }
7240         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7241                 int fw_len;
7242
7243                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7244                           TG3_TSO5_FW_RODATA_LEN +
7245                           TG3_TSO5_FW_DATA_LEN +
7246                           TG3_TSO5_FW_SBSS_LEN +
7247                           TG3_TSO5_FW_BSS_LEN);
7248                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7249                 tw32(BUFMGR_MB_POOL_ADDR,
7250                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7251                 tw32(BUFMGR_MB_POOL_SIZE,
7252                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7253         }
7254
7255         if (tp->dev->mtu <= ETH_DATA_LEN) {
7256                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7257                      tp->bufmgr_config.mbuf_read_dma_low_water);
7258                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7259                      tp->bufmgr_config.mbuf_mac_rx_low_water);
7260                 tw32(BUFMGR_MB_HIGH_WATER,
7261                      tp->bufmgr_config.mbuf_high_water);
7262         } else {
7263                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7264                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7265                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7266                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7267                 tw32(BUFMGR_MB_HIGH_WATER,
7268                      tp->bufmgr_config.mbuf_high_water_jumbo);
7269         }
7270         tw32(BUFMGR_DMA_LOW_WATER,
7271              tp->bufmgr_config.dma_low_water);
7272         tw32(BUFMGR_DMA_HIGH_WATER,
7273              tp->bufmgr_config.dma_high_water);
7274
7275         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7276         for (i = 0; i < 2000; i++) {
7277                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7278                         break;
7279                 udelay(10);
7280         }
7281         if (i >= 2000) {
7282                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7283                        tp->dev->name);
7284                 return -ENODEV;
7285         }
7286
7287         /* Setup replenish threshold. */
7288         val = tp->rx_pending / 8;
7289         if (val == 0)
7290                 val = 1;
7291         else if (val > tp->rx_std_max_post)
7292                 val = tp->rx_std_max_post;
7293         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7294                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7295                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7296
7297                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7298                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7299         }
7300
7301         tw32(RCVBDI_STD_THRESH, val);
7302
7303         /* Initialize TG3_BDINFO's at:
7304          *  RCVDBDI_STD_BD:     standard eth size rx ring
7305          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
7306          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
7307          *
7308          * like so:
7309          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
7310          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
7311          *                              ring attribute flags
7312          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
7313          *
7314          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7315          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7316          *
7317          * The size of each ring is fixed in the firmware, but the location is
7318          * configurable.
7319          */
7320         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7321              ((u64) tp->rx_std_mapping >> 32));
7322         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7323              ((u64) tp->rx_std_mapping & 0xffffffff));
7324         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7325              NIC_SRAM_RX_BUFFER_DESC);
7326
7327         /* Don't even try to program the JUMBO/MINI buffer descriptor
7328          * configs on 5705.
7329          */
7330         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7331                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7332                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7333         } else {
7334                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7335                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7336
7337                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7338                      BDINFO_FLAGS_DISABLED);
7339
7340                 /* Setup replenish threshold. */
7341                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7342
7343                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7344                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7345                              ((u64) tp->rx_jumbo_mapping >> 32));
7346                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7347                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7348                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7349                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7350                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7351                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7352                 } else {
7353                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7354                              BDINFO_FLAGS_DISABLED);
7355                 }
7356
7357         }
7358
7359         /* There is only one send ring on 5705/5750, no need to explicitly
7360          * disable the others.
7361          */
7362         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7363                 /* Clear out send RCB ring in SRAM. */
7364                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7365                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7366                                       BDINFO_FLAGS_DISABLED);
7367         }
7368
7369         tp->tx_prod = 0;
7370         tp->tx_cons = 0;
7371         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7372         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7373
7374         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7375                        tp->tx_desc_mapping,
7376                        (TG3_TX_RING_SIZE <<
7377                         BDINFO_FLAGS_MAXLEN_SHIFT),
7378                        NIC_SRAM_TX_BUFFER_DESC);
7379
7380         /* There is only one receive return ring on 5705/5750, no need
7381          * to explicitly disable the others.
7382          */
7383         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7384                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7385                      i += TG3_BDINFO_SIZE) {
7386                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7387                                       BDINFO_FLAGS_DISABLED);
7388                 }
7389         }
7390
7391         tp->rx_rcb_ptr = 0;
7392         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7393
7394         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7395                        tp->rx_rcb_mapping,
7396                        (TG3_RX_RCB_RING_SIZE(tp) <<
7397                         BDINFO_FLAGS_MAXLEN_SHIFT),
7398                        0);
7399
7400         tp->rx_std_ptr = tp->rx_pending;
7401         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7402                      tp->rx_std_ptr);
7403
7404         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7405                                                 tp->rx_jumbo_pending : 0;
7406         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7407                      tp->rx_jumbo_ptr);
7408
7409         /* Initialize MAC address and backoff seed. */
7410         __tg3_set_mac_addr(tp, 0);
7411
7412         /* MTU + ethernet header + FCS + optional VLAN tag */
7413         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7414
7415         /* The slot time is changed by tg3_setup_phy if we
7416          * run at gigabit with half duplex.
7417          */
7418         tw32(MAC_TX_LENGTHS,
7419              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7420              (6 << TX_LENGTHS_IPG_SHIFT) |
7421              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7422
7423         /* Receive rules. */
7424         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7425         tw32(RCVLPC_CONFIG, 0x0181);
7426
7427         /* Calculate RDMAC_MODE setting early, we need it to determine
7428          * the RCVLPC_STATE_ENABLE mask.
7429          */
7430         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7431                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7432                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7433                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7434                       RDMAC_MODE_LNGREAD_ENAB);
7435
7436         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7437             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7438                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7439                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7440                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7441
7442         /* If statement applies to 5705 and 5750 PCI devices only */
7443         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7444              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7445             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7446                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7447                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7448                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7449                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7450                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7451                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7452                 }
7453         }
7454
7455         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7456                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7457
7458         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7459                 rdmac_mode |= (1 << 27);
7460
7461         /* Receive/send statistics. */
7462         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7463                 val = tr32(RCVLPC_STATS_ENABLE);
7464                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7465                 tw32(RCVLPC_STATS_ENABLE, val);
7466         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7467                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7468                 val = tr32(RCVLPC_STATS_ENABLE);
7469                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7470                 tw32(RCVLPC_STATS_ENABLE, val);
7471         } else {
7472                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7473         }
7474         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7475         tw32(SNDDATAI_STATSENAB, 0xffffff);
7476         tw32(SNDDATAI_STATSCTRL,
7477              (SNDDATAI_SCTRL_ENABLE |
7478               SNDDATAI_SCTRL_FASTUPD));
7479
7480         /* Setup host coalescing engine. */
7481         tw32(HOSTCC_MODE, 0);
7482         for (i = 0; i < 2000; i++) {
7483                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7484                         break;
7485                 udelay(10);
7486         }
7487
7488         __tg3_set_coalesce(tp, &tp->coal);
7489
7490         /* set status block DMA address */
7491         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7492              ((u64) tp->status_mapping >> 32));
7493         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7494              ((u64) tp->status_mapping & 0xffffffff));
7495
7496         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7497                 /* Status/statistics block address.  See tg3_timer,
7498                  * the tg3_periodic_fetch_stats call there, and
7499                  * tg3_get_stats to see how this works for 5705/5750 chips.
7500                  */
7501                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7502                      ((u64) tp->stats_mapping >> 32));
7503                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7504                      ((u64) tp->stats_mapping & 0xffffffff));
7505                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7506                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7507         }
7508
7509         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7510
7511         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7512         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7513         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7514                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7515
7516         /* Clear statistics/status block in chip, and status block in ram. */
7517         for (i = NIC_SRAM_STATS_BLK;
7518              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7519              i += sizeof(u32)) {
7520                 tg3_write_mem(tp, i, 0);
7521                 udelay(40);
7522         }
7523         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7524
7525         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7526                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7527                 /* reset to prevent losing 1st rx packet intermittently */
7528                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7529                 udelay(10);
7530         }
7531
7532         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7533                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7534         else
7535                 tp->mac_mode = 0;
7536         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7537                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7538         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7539             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7540             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7541                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7542         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7543         udelay(40);
7544
7545         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7546          * If TG3_FLG2_IS_NIC is zero, we should read the
7547          * register to preserve the GPIO settings for LOMs. The GPIOs,
7548          * whether used as inputs or outputs, are set by boot code after
7549          * reset.
7550          */
7551         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7552                 u32 gpio_mask;
7553
7554                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7555                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7556                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7557
7558                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7559                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7560                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7561
7562                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7563                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7564
7565                 tp->grc_local_ctrl &= ~gpio_mask;
7566                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7567
7568                 /* GPIO1 must be driven high for eeprom write protect */
7569                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7570                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7571                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7572         }
7573         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7574         udelay(100);
7575
7576         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7577         tp->last_tag = 0;
7578
7579         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7580                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7581                 udelay(40);
7582         }
7583
7584         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7585                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7586                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7587                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7588                WDMAC_MODE_LNGREAD_ENAB);
7589
7590         /* If statement applies to 5705 and 5750 PCI devices only */
7591         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7592              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7594                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7595                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7596                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7597                         /* nothing */
7598                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7599                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7600                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7601                         val |= WDMAC_MODE_RX_ACCEL;
7602                 }
7603         }
7604
7605         /* Enable host coalescing bug fix */
7606         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7607             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7608             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7609             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7610             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7611                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7612
7613         tw32_f(WDMAC_MODE, val);
7614         udelay(40);
7615
7616         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7617                 u16 pcix_cmd;
7618
7619                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7620                                      &pcix_cmd);
7621                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7622                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7623                         pcix_cmd |= PCI_X_CMD_READ_2K;
7624                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7625                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7626                         pcix_cmd |= PCI_X_CMD_READ_2K;
7627                 }
7628                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7629                                       pcix_cmd);
7630         }
7631
7632         tw32_f(RDMAC_MODE, rdmac_mode);
7633         udelay(40);
7634
7635         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7636         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7637                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7638
7639         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7640                 tw32(SNDDATAC_MODE,
7641                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7642         else
7643                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7644
7645         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7646         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7647         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7648         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7649         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7650                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7651         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7652         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7653
7654         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7655                 err = tg3_load_5701_a0_firmware_fix(tp);
7656                 if (err)
7657                         return err;
7658         }
7659
7660         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7661                 err = tg3_load_tso_firmware(tp);
7662                 if (err)
7663                         return err;
7664         }
7665
7666         tp->tx_mode = TX_MODE_ENABLE;
7667         tw32_f(MAC_TX_MODE, tp->tx_mode);
7668         udelay(100);
7669
7670         tp->rx_mode = RX_MODE_ENABLE;
7671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7675                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7676
7677         tw32_f(MAC_RX_MODE, tp->rx_mode);
7678         udelay(10);
7679
7680         tw32(MAC_LED_CTRL, tp->led_ctrl);
7681
7682         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7683         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7684                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7685                 udelay(10);
7686         }
7687         tw32_f(MAC_RX_MODE, tp->rx_mode);
7688         udelay(10);
7689
7690         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7691                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7692                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7693                         /* Set drive transmission level to 1.2V  */
7694                         /* only if the signal pre-emphasis bit is not set  */
7695                         val = tr32(MAC_SERDES_CFG);
7696                         val &= 0xfffff000;
7697                         val |= 0x880;
7698                         tw32(MAC_SERDES_CFG, val);
7699                 }
7700                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7701                         tw32(MAC_SERDES_CFG, 0x616000);
7702         }
7703
7704         /* Prevent chip from dropping frames when flow control
7705          * is enabled.
7706          */
7707         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7708
7709         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7710             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7711                 /* Use hardware link auto-negotiation */
7712                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7713         }
7714
7715         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7716             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7717                 u32 tmp;
7718
7719                 tmp = tr32(SERDES_RX_CTRL);
7720                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7721                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7722                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7723                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7724         }
7725
7726         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7727                 if (tp->link_config.phy_is_low_power) {
7728                         tp->link_config.phy_is_low_power = 0;
7729                         tp->link_config.speed = tp->link_config.orig_speed;
7730                         tp->link_config.duplex = tp->link_config.orig_duplex;
7731                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7732                 }
7733
7734                 err = tg3_setup_phy(tp, 0);
7735                 if (err)
7736                         return err;
7737
7738                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7739                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7740                         u32 tmp;
7741
7742                         /* Clear CRC stats. */
7743                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7744                                 tg3_writephy(tp, MII_TG3_TEST1,
7745                                              tmp | MII_TG3_TEST1_CRC_EN);
7746                                 tg3_readphy(tp, 0x14, &tmp);
7747                         }
7748                 }
7749         }
7750
7751         __tg3_set_rx_mode(tp->dev);
7752
7753         /* Initialize receive rules. */
7754         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7755         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7756         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7757         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7758
7759         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7760             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7761                 limit = 8;
7762         else
7763                 limit = 16;
7764         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7765                 limit -= 4;
7766         switch (limit) {
7767         case 16:
7768                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7769         case 15:
7770                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7771         case 14:
7772                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7773         case 13:
7774                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7775         case 12:
7776                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7777         case 11:
7778                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7779         case 10:
7780                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7781         case 9:
7782                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7783         case 8:
7784                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7785         case 7:
7786                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7787         case 6:
7788                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7789         case 5:
7790                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7791         case 4:
7792                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7793         case 3:
7794                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7795         case 2:
7796         case 1:
7797
7798         default:
7799                 break;
7800         }
7801
7802         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7803                 /* Write our heartbeat update interval to APE. */
7804                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7805                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7806
7807         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7808
7809         return 0;
7810 }
7811
7812 /* Called at device open time to get the chip ready for
7813  * packet processing.  Invoked with tp->lock held.
7814  */
7815 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7816 {
7817         tg3_switch_clocks(tp);
7818
7819         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7820
7821         return tg3_reset_hw(tp, reset_phy);
7822 }
7823
7824 #define TG3_STAT_ADD32(PSTAT, REG) \
7825 do {    u32 __val = tr32(REG); \
7826         (PSTAT)->low += __val; \
7827         if ((PSTAT)->low < __val) \
7828                 (PSTAT)->high += 1; \
7829 } while (0)
7830
7831 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7832 {
7833         struct tg3_hw_stats *sp = tp->hw_stats;
7834
7835         if (!netif_carrier_ok(tp->dev))
7836                 return;
7837
7838         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7839         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7840         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7841         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7842         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7843         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7844         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7845         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7846         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7847         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7848         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7849         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7850         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7851
7852         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7853         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7854         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7855         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7856         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7857         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7858         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7859         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7860         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7861         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7862         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7863         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7864         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7865         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7866
7867         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7868         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7869         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7870 }
7871
7872 static void tg3_timer(unsigned long __opaque)
7873 {
7874         struct tg3 *tp = (struct tg3 *) __opaque;
7875
7876         if (tp->irq_sync)
7877                 goto restart_timer;
7878
7879         spin_lock(&tp->lock);
7880
7881         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7882                 /* All of this garbage is because when using non-tagged
7883                  * IRQ status the mailbox/status_block protocol the chip
7884                  * uses with the cpu is race prone.
7885                  */
7886                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7887                         tw32(GRC_LOCAL_CTRL,
7888                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7889                 } else {
7890                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7891                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7892                 }
7893
7894                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7895                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7896                         spin_unlock(&tp->lock);
7897                         schedule_work(&tp->reset_task);
7898                         return;
7899                 }
7900         }
7901
7902         /* This part only runs once per second. */
7903         if (!--tp->timer_counter) {
7904                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7905                         tg3_periodic_fetch_stats(tp);
7906
7907                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7908                         u32 mac_stat;
7909                         int phy_event;
7910
7911                         mac_stat = tr32(MAC_STATUS);
7912
7913                         phy_event = 0;
7914                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7915                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7916                                         phy_event = 1;
7917                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7918                                 phy_event = 1;
7919
7920                         if (phy_event)
7921                                 tg3_setup_phy(tp, 0);
7922                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7923                         u32 mac_stat = tr32(MAC_STATUS);
7924                         int need_setup = 0;
7925
7926                         if (netif_carrier_ok(tp->dev) &&
7927                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7928                                 need_setup = 1;
7929                         }
7930                         if (! netif_carrier_ok(tp->dev) &&
7931                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7932                                          MAC_STATUS_SIGNAL_DET))) {
7933                                 need_setup = 1;
7934                         }
7935                         if (need_setup) {
7936                                 if (!tp->serdes_counter) {
7937                                         tw32_f(MAC_MODE,
7938                                              (tp->mac_mode &
7939                                               ~MAC_MODE_PORT_MODE_MASK));
7940                                         udelay(40);
7941                                         tw32_f(MAC_MODE, tp->mac_mode);
7942                                         udelay(40);
7943                                 }
7944                                 tg3_setup_phy(tp, 0);
7945                         }
7946                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7947                         tg3_serdes_parallel_detect(tp);
7948
7949                 tp->timer_counter = tp->timer_multiplier;
7950         }
7951
7952         /* Heartbeat is only sent once every 2 seconds.
7953          *
7954          * The heartbeat is to tell the ASF firmware that the host
7955          * driver is still alive.  In the event that the OS crashes,
7956          * ASF needs to reset the hardware to free up the FIFO space
7957          * that may be filled with rx packets destined for the host.
7958          * If the FIFO is full, ASF will no longer function properly.
7959          *
7960          * Unintended resets have been reported on real time kernels
7961          * where the timer doesn't run on time.  Netpoll will also have
7962          * same problem.
7963          *
7964          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7965          * to check the ring condition when the heartbeat is expiring
7966          * before doing the reset.  This will prevent most unintended
7967          * resets.
7968          */
7969         if (!--tp->asf_counter) {
7970                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7971                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7972                         tg3_wait_for_event_ack(tp);
7973
7974                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7975                                       FWCMD_NICDRV_ALIVE3);
7976                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7977                         /* 5 seconds timeout */
7978                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7979
7980                         tg3_generate_fw_event(tp);
7981                 }
7982                 tp->asf_counter = tp->asf_multiplier;
7983         }
7984
7985         spin_unlock(&tp->lock);
7986
7987 restart_timer:
7988         tp->timer.expires = jiffies + tp->timer_offset;
7989         add_timer(&tp->timer);
7990 }
7991
7992 static int tg3_request_irq(struct tg3 *tp)
7993 {
7994         irq_handler_t fn;
7995         unsigned long flags;
7996         struct net_device *dev = tp->dev;
7997
7998         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7999                 fn = tg3_msi;
8000                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8001                         fn = tg3_msi_1shot;
8002                 flags = IRQF_SAMPLE_RANDOM;
8003         } else {
8004                 fn = tg3_interrupt;
8005                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8006                         fn = tg3_interrupt_tagged;
8007                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8008         }
8009         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8010 }
8011
8012 static int tg3_test_interrupt(struct tg3 *tp)
8013 {
8014         struct net_device *dev = tp->dev;
8015         int err, i, intr_ok = 0;
8016
8017         if (!netif_running(dev))
8018                 return -ENODEV;
8019
8020         tg3_disable_ints(tp);
8021
8022         free_irq(tp->pdev->irq, dev);
8023
8024         err = request_irq(tp->pdev->irq, tg3_test_isr,
8025                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
8026         if (err)
8027                 return err;
8028
8029         tp->hw_status->status &= ~SD_STATUS_UPDATED;
8030         tg3_enable_ints(tp);
8031
8032         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8033                HOSTCC_MODE_NOW);
8034
8035         for (i = 0; i < 5; i++) {
8036                 u32 int_mbox, misc_host_ctrl;
8037
8038                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8039                                         TG3_64BIT_REG_LOW);
8040                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8041
8042                 if ((int_mbox != 0) ||
8043                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8044                         intr_ok = 1;
8045                         break;
8046                 }
8047
8048                 msleep(10);
8049         }
8050
8051         tg3_disable_ints(tp);
8052
8053         free_irq(tp->pdev->irq, dev);
8054
8055         err = tg3_request_irq(tp);
8056
8057         if (err)
8058                 return err;
8059
8060         if (intr_ok)
8061                 return 0;
8062
8063         return -EIO;
8064 }
8065
8066 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8067  * successfully restored
8068  */
8069 static int tg3_test_msi(struct tg3 *tp)
8070 {
8071         struct net_device *dev = tp->dev;
8072         int err;
8073         u16 pci_cmd;
8074
8075         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8076                 return 0;
8077
8078         /* Turn off SERR reporting in case MSI terminates with Master
8079          * Abort.
8080          */
8081         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8082         pci_write_config_word(tp->pdev, PCI_COMMAND,
8083                               pci_cmd & ~PCI_COMMAND_SERR);
8084
8085         err = tg3_test_interrupt(tp);
8086
8087         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8088
8089         if (!err)
8090                 return 0;
8091
8092         /* other failures */
8093         if (err != -EIO)
8094                 return err;
8095
8096         /* MSI test failed, go back to INTx mode */
8097         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8098                "switching to INTx mode. Please report this failure to "
8099                "the PCI maintainer and include system chipset information.\n",
8100                        tp->dev->name);
8101
8102         free_irq(tp->pdev->irq, dev);
8103         pci_disable_msi(tp->pdev);
8104
8105         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8106
8107         err = tg3_request_irq(tp);
8108         if (err)
8109                 return err;
8110
8111         /* Need to reset the chip because the MSI cycle may have terminated
8112          * with Master Abort.
8113          */
8114         tg3_full_lock(tp, 1);
8115
8116         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8117         err = tg3_init_hw(tp, 1);
8118
8119         tg3_full_unlock(tp);
8120
8121         if (err)
8122                 free_irq(tp->pdev->irq, dev);
8123
8124         return err;
8125 }
8126
8127 static int tg3_open(struct net_device *dev)
8128 {
8129         struct tg3 *tp = netdev_priv(dev);
8130         int err;
8131
8132         netif_carrier_off(tp->dev);
8133
8134         err = tg3_set_power_state(tp, PCI_D0);
8135         if (err)
8136                 return err;
8137
8138         tg3_full_lock(tp, 0);
8139
8140         tg3_disable_ints(tp);
8141         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8142
8143         tg3_full_unlock(tp);
8144
8145         /* The placement of this call is tied
8146          * to the setup and use of Host TX descriptors.
8147          */
8148         err = tg3_alloc_consistent(tp);
8149         if (err)
8150                 return err;
8151
8152         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8153                 /* All MSI supporting chips should support tagged
8154                  * status.  Assert that this is the case.
8155                  */
8156                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8157                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8158                                "Not using MSI.\n", tp->dev->name);
8159                 } else if (pci_enable_msi(tp->pdev) == 0) {
8160                         u32 msi_mode;
8161
8162                         msi_mode = tr32(MSGINT_MODE);
8163                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8164                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8165                 }
8166         }
8167         err = tg3_request_irq(tp);
8168
8169         if (err) {
8170                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8171                         pci_disable_msi(tp->pdev);
8172                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8173                 }
8174                 tg3_free_consistent(tp);
8175                 return err;
8176         }
8177
8178         napi_enable(&tp->napi);
8179
8180         tg3_full_lock(tp, 0);
8181
8182         err = tg3_init_hw(tp, 1);
8183         if (err) {
8184                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8185                 tg3_free_rings(tp);
8186         } else {
8187                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8188                         tp->timer_offset = HZ;
8189                 else
8190                         tp->timer_offset = HZ / 10;
8191
8192                 BUG_ON(tp->timer_offset > HZ);
8193                 tp->timer_counter = tp->timer_multiplier =
8194                         (HZ / tp->timer_offset);
8195                 tp->asf_counter = tp->asf_multiplier =
8196                         ((HZ / tp->timer_offset) * 2);
8197
8198                 init_timer(&tp->timer);
8199                 tp->timer.expires = jiffies + tp->timer_offset;
8200                 tp->timer.data = (unsigned long) tp;
8201                 tp->timer.function = tg3_timer;
8202         }
8203
8204         tg3_full_unlock(tp);
8205
8206         if (err) {
8207                 napi_disable(&tp->napi);
8208                 free_irq(tp->pdev->irq, dev);
8209                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8210                         pci_disable_msi(tp->pdev);
8211                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8212                 }
8213                 tg3_free_consistent(tp);
8214                 return err;
8215         }
8216
8217         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8218                 err = tg3_test_msi(tp);
8219
8220                 if (err) {
8221                         tg3_full_lock(tp, 0);
8222
8223                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8224                                 pci_disable_msi(tp->pdev);
8225                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8226                         }
8227                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8228                         tg3_free_rings(tp);
8229                         tg3_free_consistent(tp);
8230
8231                         tg3_full_unlock(tp);
8232
8233                         napi_disable(&tp->napi);
8234
8235                         return err;
8236                 }
8237
8238                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8239                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8240                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
8241
8242                                 tw32(PCIE_TRANSACTION_CFG,
8243                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
8244                         }
8245                 }
8246         }
8247
8248         tg3_phy_start(tp);
8249
8250         tg3_full_lock(tp, 0);
8251
8252         add_timer(&tp->timer);
8253         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8254         tg3_enable_ints(tp);
8255
8256         tg3_full_unlock(tp);
8257
8258         netif_start_queue(dev);
8259
8260         return 0;
8261 }
8262
8263 #if 0
8264 /*static*/ void tg3_dump_state(struct tg3 *tp)
8265 {
8266         u32 val32, val32_2, val32_3, val32_4, val32_5;
8267         u16 val16;
8268         int i;
8269
8270         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8271         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8272         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8273                val16, val32);
8274
8275         /* MAC block */
8276         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8277                tr32(MAC_MODE), tr32(MAC_STATUS));
8278         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8279                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8280         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8281                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8282         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8283                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8284
8285         /* Send data initiator control block */
8286         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8287                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8288         printk("       SNDDATAI_STATSCTRL[%08x]\n",
8289                tr32(SNDDATAI_STATSCTRL));
8290
8291         /* Send data completion control block */
8292         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8293
8294         /* Send BD ring selector block */
8295         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8296                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8297
8298         /* Send BD initiator control block */
8299         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8300                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8301
8302         /* Send BD completion control block */
8303         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8304
8305         /* Receive list placement control block */
8306         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8307                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8308         printk("       RCVLPC_STATSCTRL[%08x]\n",
8309                tr32(RCVLPC_STATSCTRL));
8310
8311         /* Receive data and receive BD initiator control block */
8312         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8313                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8314
8315         /* Receive data completion control block */
8316         printk("DEBUG: RCVDCC_MODE[%08x]\n",
8317                tr32(RCVDCC_MODE));
8318
8319         /* Receive BD initiator control block */
8320         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8321                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8322
8323         /* Receive BD completion control block */
8324         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8325                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8326
8327         /* Receive list selector control block */
8328         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8329                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8330
8331         /* Mbuf cluster free block */
8332         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8333                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8334
8335         /* Host coalescing control block */
8336         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8337                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8338         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8339                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8340                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8341         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8342                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8343                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8344         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8345                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8346         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8347                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8348
8349         /* Memory arbiter control block */
8350         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8351                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8352
8353         /* Buffer manager control block */
8354         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8355                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8356         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8357                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8358         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8359                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8360                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8361                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8362
8363         /* Read DMA control block */
8364         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8365                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8366
8367         /* Write DMA control block */
8368         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8369                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8370
8371         /* DMA completion block */
8372         printk("DEBUG: DMAC_MODE[%08x]\n",
8373                tr32(DMAC_MODE));
8374
8375         /* GRC block */
8376         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8377                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8378         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8379                tr32(GRC_LOCAL_CTRL));
8380
8381         /* TG3_BDINFOs */
8382         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8383                tr32(RCVDBDI_JUMBO_BD + 0x0),
8384                tr32(RCVDBDI_JUMBO_BD + 0x4),
8385                tr32(RCVDBDI_JUMBO_BD + 0x8),
8386                tr32(RCVDBDI_JUMBO_BD + 0xc));
8387         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8388                tr32(RCVDBDI_STD_BD + 0x0),
8389                tr32(RCVDBDI_STD_BD + 0x4),
8390                tr32(RCVDBDI_STD_BD + 0x8),
8391                tr32(RCVDBDI_STD_BD + 0xc));
8392         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8393                tr32(RCVDBDI_MINI_BD + 0x0),
8394                tr32(RCVDBDI_MINI_BD + 0x4),
8395                tr32(RCVDBDI_MINI_BD + 0x8),
8396                tr32(RCVDBDI_MINI_BD + 0xc));
8397
8398         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8399         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8400         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8401         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8402         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8403                val32, val32_2, val32_3, val32_4);
8404
8405         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8406         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8407         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8408         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8409         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8410                val32, val32_2, val32_3, val32_4);
8411
8412         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8413         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8414         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8415         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8416         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8417         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8418                val32, val32_2, val32_3, val32_4, val32_5);
8419
8420         /* SW status block */
8421         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8422                tp->hw_status->status,
8423                tp->hw_status->status_tag,
8424                tp->hw_status->rx_jumbo_consumer,
8425                tp->hw_status->rx_consumer,
8426                tp->hw_status->rx_mini_consumer,
8427                tp->hw_status->idx[0].rx_producer,
8428                tp->hw_status->idx[0].tx_consumer);
8429
8430         /* SW statistics block */
8431         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8432                ((u32 *)tp->hw_stats)[0],
8433                ((u32 *)tp->hw_stats)[1],
8434                ((u32 *)tp->hw_stats)[2],
8435                ((u32 *)tp->hw_stats)[3]);
8436
8437         /* Mailboxes */
8438         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8439                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8440                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8441                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8442                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8443
8444         /* NIC side send descriptors. */
8445         for (i = 0; i < 6; i++) {
8446                 unsigned long txd;
8447
8448                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8449                         + (i * sizeof(struct tg3_tx_buffer_desc));
8450                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8451                        i,
8452                        readl(txd + 0x0), readl(txd + 0x4),
8453                        readl(txd + 0x8), readl(txd + 0xc));
8454         }
8455
8456         /* NIC side RX descriptors. */
8457         for (i = 0; i < 6; i++) {
8458                 unsigned long rxd;
8459
8460                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8461                         + (i * sizeof(struct tg3_rx_buffer_desc));
8462                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8463                        i,
8464                        readl(rxd + 0x0), readl(rxd + 0x4),
8465                        readl(rxd + 0x8), readl(rxd + 0xc));
8466                 rxd += (4 * sizeof(u32));
8467                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8468                        i,
8469                        readl(rxd + 0x0), readl(rxd + 0x4),
8470                        readl(rxd + 0x8), readl(rxd + 0xc));
8471         }
8472
8473         for (i = 0; i < 6; i++) {
8474                 unsigned long rxd;
8475
8476                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8477                         + (i * sizeof(struct tg3_rx_buffer_desc));
8478                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8479                        i,
8480                        readl(rxd + 0x0), readl(rxd + 0x4),
8481                        readl(rxd + 0x8), readl(rxd + 0xc));
8482                 rxd += (4 * sizeof(u32));
8483                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8484                        i,
8485                        readl(rxd + 0x0), readl(rxd + 0x4),
8486                        readl(rxd + 0x8), readl(rxd + 0xc));
8487         }
8488 }
8489 #endif
8490
8491 static struct net_device_stats *tg3_get_stats(struct net_device *);
8492 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8493
8494 static int tg3_close(struct net_device *dev)
8495 {
8496         struct tg3 *tp = netdev_priv(dev);
8497
8498         napi_disable(&tp->napi);
8499         cancel_work_sync(&tp->reset_task);
8500
8501         netif_stop_queue(dev);
8502
8503         del_timer_sync(&tp->timer);
8504
8505         tg3_full_lock(tp, 1);
8506 #if 0
8507         tg3_dump_state(tp);
8508 #endif
8509
8510         tg3_disable_ints(tp);
8511
8512         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8513         tg3_free_rings(tp);
8514         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8515
8516         tg3_full_unlock(tp);
8517
8518         free_irq(tp->pdev->irq, dev);
8519         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8520                 pci_disable_msi(tp->pdev);
8521                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8522         }
8523
8524         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8525                sizeof(tp->net_stats_prev));
8526         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8527                sizeof(tp->estats_prev));
8528
8529         tg3_free_consistent(tp);
8530
8531         tg3_set_power_state(tp, PCI_D3hot);
8532
8533         netif_carrier_off(tp->dev);
8534
8535         return 0;
8536 }
8537
8538 static inline unsigned long get_stat64(tg3_stat64_t *val)
8539 {
8540         unsigned long ret;
8541
8542 #if (BITS_PER_LONG == 32)
8543         ret = val->low;
8544 #else
8545         ret = ((u64)val->high << 32) | ((u64)val->low);
8546 #endif
8547         return ret;
8548 }
8549
8550 static inline u64 get_estat64(tg3_stat64_t *val)
8551 {
8552        return ((u64)val->high << 32) | ((u64)val->low);
8553 }
8554
8555 static unsigned long calc_crc_errors(struct tg3 *tp)
8556 {
8557         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8558
8559         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8560             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8561              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8562                 u32 val;
8563
8564                 spin_lock_bh(&tp->lock);
8565                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8566                         tg3_writephy(tp, MII_TG3_TEST1,
8567                                      val | MII_TG3_TEST1_CRC_EN);
8568                         tg3_readphy(tp, 0x14, &val);
8569                 } else
8570                         val = 0;
8571                 spin_unlock_bh(&tp->lock);
8572
8573                 tp->phy_crc_errors += val;
8574
8575                 return tp->phy_crc_errors;
8576         }
8577
8578         return get_stat64(&hw_stats->rx_fcs_errors);
8579 }
8580
8581 #define ESTAT_ADD(member) \
8582         estats->member =        old_estats->member + \
8583                                 get_estat64(&hw_stats->member)
8584
8585 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8586 {
8587         struct tg3_ethtool_stats *estats = &tp->estats;
8588         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8589         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8590
8591         if (!hw_stats)
8592                 return old_estats;
8593
8594         ESTAT_ADD(rx_octets);
8595         ESTAT_ADD(rx_fragments);
8596         ESTAT_ADD(rx_ucast_packets);
8597         ESTAT_ADD(rx_mcast_packets);
8598         ESTAT_ADD(rx_bcast_packets);
8599         ESTAT_ADD(rx_fcs_errors);
8600         ESTAT_ADD(rx_align_errors);
8601         ESTAT_ADD(rx_xon_pause_rcvd);
8602         ESTAT_ADD(rx_xoff_pause_rcvd);
8603         ESTAT_ADD(rx_mac_ctrl_rcvd);
8604         ESTAT_ADD(rx_xoff_entered);
8605         ESTAT_ADD(rx_frame_too_long_errors);
8606         ESTAT_ADD(rx_jabbers);
8607         ESTAT_ADD(rx_undersize_packets);
8608         ESTAT_ADD(rx_in_length_errors);
8609         ESTAT_ADD(rx_out_length_errors);
8610         ESTAT_ADD(rx_64_or_less_octet_packets);
8611         ESTAT_ADD(rx_65_to_127_octet_packets);
8612         ESTAT_ADD(rx_128_to_255_octet_packets);
8613         ESTAT_ADD(rx_256_to_511_octet_packets);
8614         ESTAT_ADD(rx_512_to_1023_octet_packets);
8615         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8616         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8617         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8618         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8619         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8620
8621         ESTAT_ADD(tx_octets);
8622         ESTAT_ADD(tx_collisions);
8623         ESTAT_ADD(tx_xon_sent);
8624         ESTAT_ADD(tx_xoff_sent);
8625         ESTAT_ADD(tx_flow_control);
8626         ESTAT_ADD(tx_mac_errors);
8627         ESTAT_ADD(tx_single_collisions);
8628         ESTAT_ADD(tx_mult_collisions);
8629         ESTAT_ADD(tx_deferred);
8630         ESTAT_ADD(tx_excessive_collisions);
8631         ESTAT_ADD(tx_late_collisions);
8632         ESTAT_ADD(tx_collide_2times);
8633         ESTAT_ADD(tx_collide_3times);
8634         ESTAT_ADD(tx_collide_4times);
8635         ESTAT_ADD(tx_collide_5times);
8636         ESTAT_ADD(tx_collide_6times);
8637         ESTAT_ADD(tx_collide_7times);
8638         ESTAT_ADD(tx_collide_8times);
8639         ESTAT_ADD(tx_collide_9times);
8640         ESTAT_ADD(tx_collide_10times);
8641         ESTAT_ADD(tx_collide_11times);
8642         ESTAT_ADD(tx_collide_12times);
8643         ESTAT_ADD(tx_collide_13times);
8644         ESTAT_ADD(tx_collide_14times);
8645         ESTAT_ADD(tx_collide_15times);
8646         ESTAT_ADD(tx_ucast_packets);
8647         ESTAT_ADD(tx_mcast_packets);
8648         ESTAT_ADD(tx_bcast_packets);
8649         ESTAT_ADD(tx_carrier_sense_errors);
8650         ESTAT_ADD(tx_discards);
8651         ESTAT_ADD(tx_errors);
8652
8653         ESTAT_ADD(dma_writeq_full);
8654         ESTAT_ADD(dma_write_prioq_full);
8655         ESTAT_ADD(rxbds_empty);
8656         ESTAT_ADD(rx_discards);
8657         ESTAT_ADD(rx_errors);
8658         ESTAT_ADD(rx_threshold_hit);
8659
8660         ESTAT_ADD(dma_readq_full);
8661         ESTAT_ADD(dma_read_prioq_full);
8662         ESTAT_ADD(tx_comp_queue_full);
8663
8664         ESTAT_ADD(ring_set_send_prod_index);
8665         ESTAT_ADD(ring_status_update);
8666         ESTAT_ADD(nic_irqs);
8667         ESTAT_ADD(nic_avoided_irqs);
8668         ESTAT_ADD(nic_tx_threshold_hit);
8669
8670         return estats;
8671 }
8672
8673 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8674 {
8675         struct tg3 *tp = netdev_priv(dev);
8676         struct net_device_stats *stats = &tp->net_stats;
8677         struct net_device_stats *old_stats = &tp->net_stats_prev;
8678         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8679
8680         if (!hw_stats)
8681                 return old_stats;
8682
8683         stats->rx_packets = old_stats->rx_packets +
8684                 get_stat64(&hw_stats->rx_ucast_packets) +
8685                 get_stat64(&hw_stats->rx_mcast_packets) +
8686                 get_stat64(&hw_stats->rx_bcast_packets);
8687
8688         stats->tx_packets = old_stats->tx_packets +
8689                 get_stat64(&hw_stats->tx_ucast_packets) +
8690                 get_stat64(&hw_stats->tx_mcast_packets) +
8691                 get_stat64(&hw_stats->tx_bcast_packets);
8692
8693         stats->rx_bytes = old_stats->rx_bytes +
8694                 get_stat64(&hw_stats->rx_octets);
8695         stats->tx_bytes = old_stats->tx_bytes +
8696                 get_stat64(&hw_stats->tx_octets);
8697
8698         stats->rx_errors = old_stats->rx_errors +
8699                 get_stat64(&hw_stats->rx_errors);
8700         stats->tx_errors = old_stats->tx_errors +
8701                 get_stat64(&hw_stats->tx_errors) +
8702                 get_stat64(&hw_stats->tx_mac_errors) +
8703                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8704                 get_stat64(&hw_stats->tx_discards);
8705
8706         stats->multicast = old_stats->multicast +
8707                 get_stat64(&hw_stats->rx_mcast_packets);
8708         stats->collisions = old_stats->collisions +
8709                 get_stat64(&hw_stats->tx_collisions);
8710
8711         stats->rx_length_errors = old_stats->rx_length_errors +
8712                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8713                 get_stat64(&hw_stats->rx_undersize_packets);
8714
8715         stats->rx_over_errors = old_stats->rx_over_errors +
8716                 get_stat64(&hw_stats->rxbds_empty);
8717         stats->rx_frame_errors = old_stats->rx_frame_errors +
8718                 get_stat64(&hw_stats->rx_align_errors);
8719         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8720                 get_stat64(&hw_stats->tx_discards);
8721         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8722                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8723
8724         stats->rx_crc_errors = old_stats->rx_crc_errors +
8725                 calc_crc_errors(tp);
8726
8727         stats->rx_missed_errors = old_stats->rx_missed_errors +
8728                 get_stat64(&hw_stats->rx_discards);
8729
8730         return stats;
8731 }
8732
8733 static inline u32 calc_crc(unsigned char *buf, int len)
8734 {
8735         u32 reg;
8736         u32 tmp;
8737         int j, k;
8738
8739         reg = 0xffffffff;
8740
8741         for (j = 0; j < len; j++) {
8742                 reg ^= buf[j];
8743
8744                 for (k = 0; k < 8; k++) {
8745                         tmp = reg & 0x01;
8746
8747                         reg >>= 1;
8748
8749                         if (tmp) {
8750                                 reg ^= 0xedb88320;
8751                         }
8752                 }
8753         }
8754
8755         return ~reg;
8756 }
8757
8758 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8759 {
8760         /* accept or reject all multicast frames */
8761         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8762         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8763         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8764         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8765 }
8766
8767 static void __tg3_set_rx_mode(struct net_device *dev)
8768 {
8769         struct tg3 *tp = netdev_priv(dev);
8770         u32 rx_mode;
8771
8772         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8773                                   RX_MODE_KEEP_VLAN_TAG);
8774
8775         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8776          * flag clear.
8777          */
8778 #if TG3_VLAN_TAG_USED
8779         if (!tp->vlgrp &&
8780             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8781                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8782 #else
8783         /* By definition, VLAN is disabled always in this
8784          * case.
8785          */
8786         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8787                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8788 #endif
8789
8790         if (dev->flags & IFF_PROMISC) {
8791                 /* Promiscuous mode. */
8792                 rx_mode |= RX_MODE_PROMISC;
8793         } else if (dev->flags & IFF_ALLMULTI) {
8794                 /* Accept all multicast. */
8795                 tg3_set_multi (tp, 1);
8796         } else if (dev->mc_count < 1) {
8797                 /* Reject all multicast. */
8798                 tg3_set_multi (tp, 0);
8799         } else {
8800                 /* Accept one or more multicast(s). */
8801                 struct dev_mc_list *mclist;
8802                 unsigned int i;
8803                 u32 mc_filter[4] = { 0, };
8804                 u32 regidx;
8805                 u32 bit;
8806                 u32 crc;
8807
8808                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8809                      i++, mclist = mclist->next) {
8810
8811                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8812                         bit = ~crc & 0x7f;
8813                         regidx = (bit & 0x60) >> 5;
8814                         bit &= 0x1f;
8815                         mc_filter[regidx] |= (1 << bit);
8816                 }
8817
8818                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8819                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8820                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8821                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8822         }
8823
8824         if (rx_mode != tp->rx_mode) {
8825                 tp->rx_mode = rx_mode;
8826                 tw32_f(MAC_RX_MODE, rx_mode);
8827                 udelay(10);
8828         }
8829 }
8830
8831 static void tg3_set_rx_mode(struct net_device *dev)
8832 {
8833         struct tg3 *tp = netdev_priv(dev);
8834
8835         if (!netif_running(dev))
8836                 return;
8837
8838         tg3_full_lock(tp, 0);
8839         __tg3_set_rx_mode(dev);
8840         tg3_full_unlock(tp);
8841 }
8842
8843 #define TG3_REGDUMP_LEN         (32 * 1024)
8844
8845 static int tg3_get_regs_len(struct net_device *dev)
8846 {
8847         return TG3_REGDUMP_LEN;
8848 }
8849
8850 static void tg3_get_regs(struct net_device *dev,
8851                 struct ethtool_regs *regs, void *_p)
8852 {
8853         u32 *p = _p;
8854         struct tg3 *tp = netdev_priv(dev);
8855         u8 *orig_p = _p;
8856         int i;
8857
8858         regs->version = 0;
8859
8860         memset(p, 0, TG3_REGDUMP_LEN);
8861
8862         if (tp->link_config.phy_is_low_power)
8863                 return;
8864
8865         tg3_full_lock(tp, 0);
8866
8867 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8868 #define GET_REG32_LOOP(base,len)                \
8869 do {    p = (u32 *)(orig_p + (base));           \
8870         for (i = 0; i < len; i += 4)            \
8871                 __GET_REG32((base) + i);        \
8872 } while (0)
8873 #define GET_REG32_1(reg)                        \
8874 do {    p = (u32 *)(orig_p + (reg));            \
8875         __GET_REG32((reg));                     \
8876 } while (0)
8877
8878         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8879         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8880         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8881         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8882         GET_REG32_1(SNDDATAC_MODE);
8883         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8884         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8885         GET_REG32_1(SNDBDC_MODE);
8886         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8887         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8888         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8889         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8890         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8891         GET_REG32_1(RCVDCC_MODE);
8892         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8893         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8894         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8895         GET_REG32_1(MBFREE_MODE);
8896         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8897         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8898         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8899         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8900         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8901         GET_REG32_1(RX_CPU_MODE);
8902         GET_REG32_1(RX_CPU_STATE);
8903         GET_REG32_1(RX_CPU_PGMCTR);
8904         GET_REG32_1(RX_CPU_HWBKPT);
8905         GET_REG32_1(TX_CPU_MODE);
8906         GET_REG32_1(TX_CPU_STATE);
8907         GET_REG32_1(TX_CPU_PGMCTR);
8908         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8909         GET_REG32_LOOP(FTQ_RESET, 0x120);
8910         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8911         GET_REG32_1(DMAC_MODE);
8912         GET_REG32_LOOP(GRC_MODE, 0x4c);
8913         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8914                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8915
8916 #undef __GET_REG32
8917 #undef GET_REG32_LOOP
8918 #undef GET_REG32_1
8919
8920         tg3_full_unlock(tp);
8921 }
8922
8923 static int tg3_get_eeprom_len(struct net_device *dev)
8924 {
8925         struct tg3 *tp = netdev_priv(dev);
8926
8927         return tp->nvram_size;
8928 }
8929
8930 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8931 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8932 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8933
8934 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8935 {
8936         struct tg3 *tp = netdev_priv(dev);
8937         int ret;
8938         u8  *pd;
8939         u32 i, offset, len, b_offset, b_count;
8940         __le32 val;
8941
8942         if (tp->link_config.phy_is_low_power)
8943                 return -EAGAIN;
8944
8945         offset = eeprom->offset;
8946         len = eeprom->len;
8947         eeprom->len = 0;
8948
8949         eeprom->magic = TG3_EEPROM_MAGIC;
8950
8951         if (offset & 3) {
8952                 /* adjustments to start on required 4 byte boundary */
8953                 b_offset = offset & 3;
8954                 b_count = 4 - b_offset;
8955                 if (b_count > len) {
8956                         /* i.e. offset=1 len=2 */
8957                         b_count = len;
8958                 }
8959                 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8960                 if (ret)
8961                         return ret;
8962                 memcpy(data, ((char*)&val) + b_offset, b_count);
8963                 len -= b_count;
8964                 offset += b_count;
8965                 eeprom->len += b_count;
8966         }
8967
8968         /* read bytes upto the last 4 byte boundary */
8969         pd = &data[eeprom->len];
8970         for (i = 0; i < (len - (len & 3)); i += 4) {
8971                 ret = tg3_nvram_read_le(tp, offset + i, &val);
8972                 if (ret) {
8973                         eeprom->len += i;
8974                         return ret;
8975                 }
8976                 memcpy(pd + i, &val, 4);
8977         }
8978         eeprom->len += i;
8979
8980         if (len & 3) {
8981                 /* read last bytes not ending on 4 byte boundary */
8982                 pd = &data[eeprom->len];
8983                 b_count = len & 3;
8984                 b_offset = offset + len - b_count;
8985                 ret = tg3_nvram_read_le(tp, b_offset, &val);
8986                 if (ret)
8987                         return ret;
8988                 memcpy(pd, &val, b_count);
8989                 eeprom->len += b_count;
8990         }
8991         return 0;
8992 }
8993
8994 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8995
8996 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8997 {
8998         struct tg3 *tp = netdev_priv(dev);
8999         int ret;
9000         u32 offset, len, b_offset, odd_len;
9001         u8 *buf;
9002         __le32 start, end;
9003
9004         if (tp->link_config.phy_is_low_power)
9005                 return -EAGAIN;
9006
9007         if (eeprom->magic != TG3_EEPROM_MAGIC)
9008                 return -EINVAL;
9009
9010         offset = eeprom->offset;
9011         len = eeprom->len;
9012
9013         if ((b_offset = (offset & 3))) {
9014                 /* adjustments to start on required 4 byte boundary */
9015                 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
9016                 if (ret)
9017                         return ret;
9018                 len += b_offset;
9019                 offset &= ~3;
9020                 if (len < 4)
9021                         len = 4;
9022         }
9023
9024         odd_len = 0;
9025         if (len & 3) {
9026                 /* adjustments to end on required 4 byte boundary */
9027                 odd_len = 1;
9028                 len = (len + 3) & ~3;
9029                 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
9030                 if (ret)
9031                         return ret;
9032         }
9033
9034         buf = data;
9035         if (b_offset || odd_len) {
9036                 buf = kmalloc(len, GFP_KERNEL);
9037                 if (!buf)
9038                         return -ENOMEM;
9039                 if (b_offset)
9040                         memcpy(buf, &start, 4);
9041                 if (odd_len)
9042                         memcpy(buf+len-4, &end, 4);
9043                 memcpy(buf + b_offset, data, eeprom->len);
9044         }
9045
9046         ret = tg3_nvram_write_block(tp, offset, len, buf);
9047
9048         if (buf != data)
9049                 kfree(buf);
9050
9051         return ret;
9052 }
9053
9054 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9055 {
9056         struct tg3 *tp = netdev_priv(dev);
9057
9058         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9059                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9060                         return -EAGAIN;
9061                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9062         }
9063
9064         cmd->supported = (SUPPORTED_Autoneg);
9065
9066         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9067                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9068                                    SUPPORTED_1000baseT_Full);
9069
9070         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9071                 cmd->supported |= (SUPPORTED_100baseT_Half |
9072                                   SUPPORTED_100baseT_Full |
9073                                   SUPPORTED_10baseT_Half |
9074                                   SUPPORTED_10baseT_Full |
9075                                   SUPPORTED_TP);
9076                 cmd->port = PORT_TP;
9077         } else {
9078                 cmd->supported |= SUPPORTED_FIBRE;
9079                 cmd->port = PORT_FIBRE;
9080         }
9081
9082         cmd->advertising = tp->link_config.advertising;
9083         if (netif_running(dev)) {
9084                 cmd->speed = tp->link_config.active_speed;
9085                 cmd->duplex = tp->link_config.active_duplex;
9086         }
9087         cmd->phy_address = PHY_ADDR;
9088         cmd->transceiver = 0;
9089         cmd->autoneg = tp->link_config.autoneg;
9090         cmd->maxtxpkt = 0;
9091         cmd->maxrxpkt = 0;
9092         return 0;
9093 }
9094
9095 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9096 {
9097         struct tg3 *tp = netdev_priv(dev);
9098
9099         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9100                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9101                         return -EAGAIN;
9102                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9103         }
9104
9105         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9106                 /* These are the only valid advertisement bits allowed.  */
9107                 if (cmd->autoneg == AUTONEG_ENABLE &&
9108                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9109                                           ADVERTISED_1000baseT_Full |
9110                                           ADVERTISED_Autoneg |
9111                                           ADVERTISED_FIBRE)))
9112                         return -EINVAL;
9113                 /* Fiber can only do SPEED_1000.  */
9114                 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9115                          (cmd->speed != SPEED_1000))
9116                         return -EINVAL;
9117         /* Copper cannot force SPEED_1000.  */
9118         } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9119                    (cmd->speed == SPEED_1000))
9120                 return -EINVAL;
9121         else if ((cmd->speed == SPEED_1000) &&
9122                  (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9123                 return -EINVAL;
9124
9125         tg3_full_lock(tp, 0);
9126
9127         tp->link_config.autoneg = cmd->autoneg;
9128         if (cmd->autoneg == AUTONEG_ENABLE) {
9129                 tp->link_config.advertising = (cmd->advertising |
9130                                               ADVERTISED_Autoneg);
9131                 tp->link_config.speed = SPEED_INVALID;
9132                 tp->link_config.duplex = DUPLEX_INVALID;
9133         } else {
9134                 tp->link_config.advertising = 0;
9135                 tp->link_config.speed = cmd->speed;
9136                 tp->link_config.duplex = cmd->duplex;
9137         }
9138
9139         tp->link_config.orig_speed = tp->link_config.speed;
9140         tp->link_config.orig_duplex = tp->link_config.duplex;
9141         tp->link_config.orig_autoneg = tp->link_config.autoneg;
9142
9143         if (netif_running(dev))
9144                 tg3_setup_phy(tp, 1);
9145
9146         tg3_full_unlock(tp);
9147
9148         return 0;
9149 }
9150
9151 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9152 {
9153         struct tg3 *tp = netdev_priv(dev);
9154
9155         strcpy(info->driver, DRV_MODULE_NAME);
9156         strcpy(info->version, DRV_MODULE_VERSION);
9157         strcpy(info->fw_version, tp->fw_ver);
9158         strcpy(info->bus_info, pci_name(tp->pdev));
9159 }
9160
9161 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9162 {
9163         struct tg3 *tp = netdev_priv(dev);
9164
9165         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9166             device_can_wakeup(&tp->pdev->dev))
9167                 wol->supported = WAKE_MAGIC;
9168         else
9169                 wol->supported = 0;
9170         wol->wolopts = 0;
9171         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9172             device_can_wakeup(&tp->pdev->dev))
9173                 wol->wolopts = WAKE_MAGIC;
9174         memset(&wol->sopass, 0, sizeof(wol->sopass));
9175 }
9176
9177 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9178 {
9179         struct tg3 *tp = netdev_priv(dev);
9180         struct device *dp = &tp->pdev->dev;
9181
9182         if (wol->wolopts & ~WAKE_MAGIC)
9183                 return -EINVAL;
9184         if ((wol->wolopts & WAKE_MAGIC) &&
9185             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9186                 return -EINVAL;
9187
9188         spin_lock_bh(&tp->lock);
9189         if (wol->wolopts & WAKE_MAGIC) {
9190                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9191                 device_set_wakeup_enable(dp, true);
9192         } else {
9193                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9194                 device_set_wakeup_enable(dp, false);
9195         }
9196         spin_unlock_bh(&tp->lock);
9197
9198         return 0;
9199 }
9200
9201 static u32 tg3_get_msglevel(struct net_device *dev)
9202 {
9203         struct tg3 *tp = netdev_priv(dev);
9204         return tp->msg_enable;
9205 }
9206
9207 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9208 {
9209         struct tg3 *tp = netdev_priv(dev);
9210         tp->msg_enable = value;
9211 }
9212
9213 static int tg3_set_tso(struct net_device *dev, u32 value)
9214 {
9215         struct tg3 *tp = netdev_priv(dev);
9216
9217         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9218                 if (value)
9219                         return -EINVAL;
9220                 return 0;
9221         }
9222         if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9223             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9224                 if (value) {
9225                         dev->features |= NETIF_F_TSO6;
9226                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9227                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9228                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9229                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9230                                 dev->features |= NETIF_F_TSO_ECN;
9231                 } else
9232                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9233         }
9234         return ethtool_op_set_tso(dev, value);
9235 }
9236
9237 static int tg3_nway_reset(struct net_device *dev)
9238 {
9239         struct tg3 *tp = netdev_priv(dev);
9240         int r;
9241
9242         if (!netif_running(dev))
9243                 return -EAGAIN;
9244
9245         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9246                 return -EINVAL;
9247
9248         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9249                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9250                         return -EAGAIN;
9251                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9252         } else {
9253                 u32 bmcr;
9254
9255                 spin_lock_bh(&tp->lock);
9256                 r = -EINVAL;
9257                 tg3_readphy(tp, MII_BMCR, &bmcr);
9258                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9259                     ((bmcr & BMCR_ANENABLE) ||
9260                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9261                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9262                                                    BMCR_ANENABLE);
9263                         r = 0;
9264                 }
9265                 spin_unlock_bh(&tp->lock);
9266         }
9267
9268         return r;
9269 }
9270
9271 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9272 {
9273         struct tg3 *tp = netdev_priv(dev);
9274
9275         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9276         ering->rx_mini_max_pending = 0;
9277         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9278                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9279         else
9280                 ering->rx_jumbo_max_pending = 0;
9281
9282         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9283
9284         ering->rx_pending = tp->rx_pending;
9285         ering->rx_mini_pending = 0;
9286         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9287                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9288         else
9289                 ering->rx_jumbo_pending = 0;
9290
9291         ering->tx_pending = tp->tx_pending;
9292 }
9293
9294 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9295 {
9296         struct tg3 *tp = netdev_priv(dev);
9297         int irq_sync = 0, err = 0;
9298
9299         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9300             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9301             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9302             (ering->tx_pending <= MAX_SKB_FRAGS) ||
9303             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9304              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9305                 return -EINVAL;
9306
9307         if (netif_running(dev)) {
9308                 tg3_phy_stop(tp);
9309                 tg3_netif_stop(tp);
9310                 irq_sync = 1;
9311         }
9312
9313         tg3_full_lock(tp, irq_sync);
9314
9315         tp->rx_pending = ering->rx_pending;
9316
9317         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9318             tp->rx_pending > 63)
9319                 tp->rx_pending = 63;
9320         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9321         tp->tx_pending = ering->tx_pending;
9322
9323         if (netif_running(dev)) {
9324                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9325                 err = tg3_restart_hw(tp, 1);
9326                 if (!err)
9327                         tg3_netif_start(tp);
9328         }
9329
9330         tg3_full_unlock(tp);
9331
9332         if (irq_sync && !err)
9333                 tg3_phy_start(tp);
9334
9335         return err;
9336 }
9337
9338 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9339 {
9340         struct tg3 *tp = netdev_priv(dev);
9341
9342         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9343
9344         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9345                 epause->rx_pause = 1;
9346         else
9347                 epause->rx_pause = 0;
9348
9349         if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9350                 epause->tx_pause = 1;
9351         else
9352                 epause->tx_pause = 0;
9353 }
9354
9355 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9356 {
9357         struct tg3 *tp = netdev_priv(dev);
9358         int err = 0;
9359
9360         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9361                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9362                         return -EAGAIN;
9363
9364                 if (epause->autoneg) {
9365                         u32 newadv;
9366                         struct phy_device *phydev;
9367
9368                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9369
9370                         if (epause->rx_pause) {
9371                                 if (epause->tx_pause)
9372                                         newadv = ADVERTISED_Pause;
9373                                 else
9374                                         newadv = ADVERTISED_Pause |
9375                                                  ADVERTISED_Asym_Pause;
9376                         } else if (epause->tx_pause) {
9377                                 newadv = ADVERTISED_Asym_Pause;
9378                         } else
9379                                 newadv = 0;
9380
9381                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9382                                 u32 oldadv = phydev->advertising &
9383                                              (ADVERTISED_Pause |
9384                                               ADVERTISED_Asym_Pause);
9385                                 if (oldadv != newadv) {
9386                                         phydev->advertising &=
9387                                                 ~(ADVERTISED_Pause |
9388                                                   ADVERTISED_Asym_Pause);
9389                                         phydev->advertising |= newadv;
9390                                         err = phy_start_aneg(phydev);
9391                                 }
9392                         } else {
9393                                 tp->link_config.advertising &=
9394                                                 ~(ADVERTISED_Pause |
9395                                                   ADVERTISED_Asym_Pause);
9396                                 tp->link_config.advertising |= newadv;
9397                         }
9398                 } else {
9399                         if (epause->rx_pause)
9400                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9401                         else
9402                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9403
9404                         if (epause->tx_pause)
9405                                 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9406                         else
9407                                 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9408
9409                         if (netif_running(dev))
9410                                 tg3_setup_flow_control(tp, 0, 0);
9411                 }
9412         } else {
9413                 int irq_sync = 0;
9414
9415                 if (netif_running(dev)) {
9416                         tg3_netif_stop(tp);
9417                         irq_sync = 1;
9418                 }
9419
9420                 tg3_full_lock(tp, irq_sync);
9421
9422                 if (epause->autoneg)
9423                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9424                 else
9425                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9426                 if (epause->rx_pause)
9427                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9428                 else
9429                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9430                 if (epause->tx_pause)
9431                         tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9432                 else
9433                         tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9434
9435                 if (netif_running(dev)) {
9436                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9437                         err = tg3_restart_hw(tp, 1);
9438                         if (!err)
9439                                 tg3_netif_start(tp);
9440                 }
9441
9442                 tg3_full_unlock(tp);
9443         }
9444
9445         return err;
9446 }
9447
9448 static u32 tg3_get_rx_csum(struct net_device *dev)
9449 {
9450         struct tg3 *tp = netdev_priv(dev);
9451         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9452 }
9453
9454 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9455 {
9456         struct tg3 *tp = netdev_priv(dev);
9457
9458         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9459                 if (data != 0)
9460                         return -EINVAL;
9461                 return 0;
9462         }
9463
9464         spin_lock_bh(&tp->lock);
9465         if (data)
9466                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9467         else
9468                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9469         spin_unlock_bh(&tp->lock);
9470
9471         return 0;
9472 }
9473
9474 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9475 {
9476         struct tg3 *tp = netdev_priv(dev);
9477
9478         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9479                 if (data != 0)
9480                         return -EINVAL;
9481                 return 0;
9482         }
9483
9484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9487             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9488             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9489                 ethtool_op_set_tx_ipv6_csum(dev, data);
9490         else
9491                 ethtool_op_set_tx_csum(dev, data);
9492
9493         return 0;
9494 }
9495
9496 static int tg3_get_sset_count (struct net_device *dev, int sset)
9497 {
9498         switch (sset) {
9499         case ETH_SS_TEST:
9500                 return TG3_NUM_TEST;
9501         case ETH_SS_STATS:
9502                 return TG3_NUM_STATS;
9503         default:
9504                 return -EOPNOTSUPP;
9505         }
9506 }
9507
9508 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9509 {
9510         switch (stringset) {
9511         case ETH_SS_STATS:
9512                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9513                 break;
9514         case ETH_SS_TEST:
9515                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9516                 break;
9517         default:
9518                 WARN_ON(1);     /* we need a WARN() */
9519                 break;
9520         }
9521 }
9522
9523 static int tg3_phys_id(struct net_device *dev, u32 data)
9524 {
9525         struct tg3 *tp = netdev_priv(dev);
9526         int i;
9527
9528         if (!netif_running(tp->dev))
9529                 return -EAGAIN;
9530
9531         if (data == 0)
9532                 data = UINT_MAX / 2;
9533
9534         for (i = 0; i < (data * 2); i++) {
9535                 if ((i % 2) == 0)
9536                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9537                                            LED_CTRL_1000MBPS_ON |
9538                                            LED_CTRL_100MBPS_ON |
9539                                            LED_CTRL_10MBPS_ON |
9540                                            LED_CTRL_TRAFFIC_OVERRIDE |
9541                                            LED_CTRL_TRAFFIC_BLINK |
9542                                            LED_CTRL_TRAFFIC_LED);
9543
9544                 else
9545                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9546                                            LED_CTRL_TRAFFIC_OVERRIDE);
9547
9548                 if (msleep_interruptible(500))
9549                         break;
9550         }
9551         tw32(MAC_LED_CTRL, tp->led_ctrl);
9552         return 0;
9553 }
9554
9555 static void tg3_get_ethtool_stats (struct net_device *dev,
9556                                    struct ethtool_stats *estats, u64 *tmp_stats)
9557 {
9558         struct tg3 *tp = netdev_priv(dev);
9559         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9560 }
9561
9562 #define NVRAM_TEST_SIZE 0x100
9563 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9564 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9565 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9566 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9567 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9568
9569 static int tg3_test_nvram(struct tg3 *tp)
9570 {
9571         u32 csum, magic;
9572         __le32 *buf;
9573         int i, j, k, err = 0, size;
9574
9575         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9576                 return -EIO;
9577
9578         if (magic == TG3_EEPROM_MAGIC)
9579                 size = NVRAM_TEST_SIZE;
9580         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9581                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9582                     TG3_EEPROM_SB_FORMAT_1) {
9583                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9584                         case TG3_EEPROM_SB_REVISION_0:
9585                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9586                                 break;
9587                         case TG3_EEPROM_SB_REVISION_2:
9588                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9589                                 break;
9590                         case TG3_EEPROM_SB_REVISION_3:
9591                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9592                                 break;
9593                         default:
9594                                 return 0;
9595                         }
9596                 } else
9597                         return 0;
9598         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9599                 size = NVRAM_SELFBOOT_HW_SIZE;
9600         else
9601                 return -EIO;
9602
9603         buf = kmalloc(size, GFP_KERNEL);
9604         if (buf == NULL)
9605                 return -ENOMEM;
9606
9607         err = -EIO;
9608         for (i = 0, j = 0; i < size; i += 4, j++) {
9609                 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9610                         break;
9611         }
9612         if (i < size)
9613                 goto out;
9614
9615         /* Selfboot format */
9616         magic = swab32(le32_to_cpu(buf[0]));
9617         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9618             TG3_EEPROM_MAGIC_FW) {
9619                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9620
9621                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9622                     TG3_EEPROM_SB_REVISION_2) {
9623                         /* For rev 2, the csum doesn't include the MBA. */
9624                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9625                                 csum8 += buf8[i];
9626                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9627                                 csum8 += buf8[i];
9628                 } else {
9629                         for (i = 0; i < size; i++)
9630                                 csum8 += buf8[i];
9631                 }
9632
9633                 if (csum8 == 0) {
9634                         err = 0;
9635                         goto out;
9636                 }
9637
9638                 err = -EIO;
9639                 goto out;
9640         }
9641
9642         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9643             TG3_EEPROM_MAGIC_HW) {
9644                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9645                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9646                 u8 *buf8 = (u8 *) buf;
9647
9648                 /* Separate the parity bits and the data bytes.  */
9649                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9650                         if ((i == 0) || (i == 8)) {
9651                                 int l;
9652                                 u8 msk;
9653
9654                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9655                                         parity[k++] = buf8[i] & msk;
9656                                 i++;
9657                         }
9658                         else if (i == 16) {
9659                                 int l;
9660                                 u8 msk;
9661
9662                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9663                                         parity[k++] = buf8[i] & msk;
9664                                 i++;
9665
9666                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9667                                         parity[k++] = buf8[i] & msk;
9668                                 i++;
9669                         }
9670                         data[j++] = buf8[i];
9671                 }
9672
9673                 err = -EIO;
9674                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9675                         u8 hw8 = hweight8(data[i]);
9676
9677                         if ((hw8 & 0x1) && parity[i])
9678                                 goto out;
9679                         else if (!(hw8 & 0x1) && !parity[i])
9680                                 goto out;
9681                 }
9682                 err = 0;
9683                 goto out;
9684         }
9685
9686         /* Bootstrap checksum at offset 0x10 */
9687         csum = calc_crc((unsigned char *) buf, 0x10);
9688         if(csum != le32_to_cpu(buf[0x10/4]))
9689                 goto out;
9690
9691         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9692         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9693         if (csum != le32_to_cpu(buf[0xfc/4]))
9694                  goto out;
9695
9696         err = 0;
9697
9698 out:
9699         kfree(buf);
9700         return err;
9701 }
9702
9703 #define TG3_SERDES_TIMEOUT_SEC  2
9704 #define TG3_COPPER_TIMEOUT_SEC  6
9705
9706 static int tg3_test_link(struct tg3 *tp)
9707 {
9708         int i, max;
9709
9710         if (!netif_running(tp->dev))
9711                 return -ENODEV;
9712
9713         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9714                 max = TG3_SERDES_TIMEOUT_SEC;
9715         else
9716                 max = TG3_COPPER_TIMEOUT_SEC;
9717
9718         for (i = 0; i < max; i++) {
9719                 if (netif_carrier_ok(tp->dev))
9720                         return 0;
9721
9722                 if (msleep_interruptible(1000))
9723                         break;
9724         }
9725
9726         return -EIO;
9727 }
9728
9729 /* Only test the commonly used registers */
9730 static int tg3_test_registers(struct tg3 *tp)
9731 {
9732         int i, is_5705, is_5750;
9733         u32 offset, read_mask, write_mask, val, save_val, read_val;
9734         static struct {
9735                 u16 offset;
9736                 u16 flags;
9737 #define TG3_FL_5705     0x1
9738 #define TG3_FL_NOT_5705 0x2
9739 #define TG3_FL_NOT_5788 0x4
9740 #define TG3_FL_NOT_5750 0x8
9741                 u32 read_mask;
9742                 u32 write_mask;
9743         } reg_tbl[] = {
9744                 /* MAC Control Registers */
9745                 { MAC_MODE, TG3_FL_NOT_5705,
9746                         0x00000000, 0x00ef6f8c },
9747                 { MAC_MODE, TG3_FL_5705,
9748                         0x00000000, 0x01ef6b8c },
9749                 { MAC_STATUS, TG3_FL_NOT_5705,
9750                         0x03800107, 0x00000000 },
9751                 { MAC_STATUS, TG3_FL_5705,
9752                         0x03800100, 0x00000000 },
9753                 { MAC_ADDR_0_HIGH, 0x0000,
9754                         0x00000000, 0x0000ffff },
9755                 { MAC_ADDR_0_LOW, 0x0000,
9756                         0x00000000, 0xffffffff },
9757                 { MAC_RX_MTU_SIZE, 0x0000,
9758                         0x00000000, 0x0000ffff },
9759                 { MAC_TX_MODE, 0x0000,
9760                         0x00000000, 0x00000070 },
9761                 { MAC_TX_LENGTHS, 0x0000,
9762                         0x00000000, 0x00003fff },
9763                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9764                         0x00000000, 0x000007fc },
9765                 { MAC_RX_MODE, TG3_FL_5705,
9766                         0x00000000, 0x000007dc },
9767                 { MAC_HASH_REG_0, 0x0000,
9768                         0x00000000, 0xffffffff },
9769                 { MAC_HASH_REG_1, 0x0000,
9770                         0x00000000, 0xffffffff },
9771                 { MAC_HASH_REG_2, 0x0000,
9772                         0x00000000, 0xffffffff },
9773                 { MAC_HASH_REG_3, 0x0000,
9774                         0x00000000, 0xffffffff },
9775
9776                 /* Receive Data and Receive BD Initiator Control Registers. */
9777                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9778                         0x00000000, 0xffffffff },
9779                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9780                         0x00000000, 0xffffffff },
9781                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9782                         0x00000000, 0x00000003 },
9783                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9784                         0x00000000, 0xffffffff },
9785                 { RCVDBDI_STD_BD+0, 0x0000,
9786                         0x00000000, 0xffffffff },
9787                 { RCVDBDI_STD_BD+4, 0x0000,
9788                         0x00000000, 0xffffffff },
9789                 { RCVDBDI_STD_BD+8, 0x0000,
9790                         0x00000000, 0xffff0002 },
9791                 { RCVDBDI_STD_BD+0xc, 0x0000,
9792                         0x00000000, 0xffffffff },
9793
9794                 /* Receive BD Initiator Control Registers. */
9795                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9796                         0x00000000, 0xffffffff },
9797                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9798                         0x00000000, 0x000003ff },
9799                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9800                         0x00000000, 0xffffffff },
9801
9802                 /* Host Coalescing Control Registers. */
9803                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9804                         0x00000000, 0x00000004 },
9805                 { HOSTCC_MODE, TG3_FL_5705,
9806                         0x00000000, 0x000000f6 },
9807                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9808                         0x00000000, 0xffffffff },
9809                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9810                         0x00000000, 0x000003ff },
9811                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9812                         0x00000000, 0xffffffff },
9813                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9814                         0x00000000, 0x000003ff },
9815                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9816                         0x00000000, 0xffffffff },
9817                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9818                         0x00000000, 0x000000ff },
9819                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9820                         0x00000000, 0xffffffff },
9821                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9822                         0x00000000, 0x000000ff },
9823                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9824                         0x00000000, 0xffffffff },
9825                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9826                         0x00000000, 0xffffffff },
9827                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9828                         0x00000000, 0xffffffff },
9829                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9830                         0x00000000, 0x000000ff },
9831                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9832                         0x00000000, 0xffffffff },
9833                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9834                         0x00000000, 0x000000ff },
9835                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9836                         0x00000000, 0xffffffff },
9837                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9838                         0x00000000, 0xffffffff },
9839                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9840                         0x00000000, 0xffffffff },
9841                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9842                         0x00000000, 0xffffffff },
9843                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9844                         0x00000000, 0xffffffff },
9845                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9846                         0xffffffff, 0x00000000 },
9847                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9848                         0xffffffff, 0x00000000 },
9849
9850                 /* Buffer Manager Control Registers. */
9851                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9852                         0x00000000, 0x007fff80 },
9853                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9854                         0x00000000, 0x007fffff },
9855                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9856                         0x00000000, 0x0000003f },
9857                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9858                         0x00000000, 0x000001ff },
9859                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9860                         0x00000000, 0x000001ff },
9861                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9862                         0xffffffff, 0x00000000 },
9863                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9864                         0xffffffff, 0x00000000 },
9865
9866                 /* Mailbox Registers */
9867                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9868                         0x00000000, 0x000001ff },
9869                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9870                         0x00000000, 0x000001ff },
9871                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9872                         0x00000000, 0x000007ff },
9873                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9874                         0x00000000, 0x000001ff },
9875
9876                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9877         };
9878
9879         is_5705 = is_5750 = 0;
9880         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9881                 is_5705 = 1;
9882                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9883                         is_5750 = 1;
9884         }
9885
9886         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9887                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9888                         continue;
9889
9890                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9891                         continue;
9892
9893                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9894                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9895                         continue;
9896
9897                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9898                         continue;
9899
9900                 offset = (u32) reg_tbl[i].offset;
9901                 read_mask = reg_tbl[i].read_mask;
9902                 write_mask = reg_tbl[i].write_mask;
9903
9904                 /* Save the original register content */
9905                 save_val = tr32(offset);
9906
9907                 /* Determine the read-only value. */
9908                 read_val = save_val & read_mask;
9909
9910                 /* Write zero to the register, then make sure the read-only bits
9911                  * are not changed and the read/write bits are all zeros.
9912                  */
9913                 tw32(offset, 0);
9914
9915                 val = tr32(offset);
9916
9917                 /* Test the read-only and read/write bits. */
9918                 if (((val & read_mask) != read_val) || (val & write_mask))
9919                         goto out;
9920
9921                 /* Write ones to all the bits defined by RdMask and WrMask, then
9922                  * make sure the read-only bits are not changed and the
9923                  * read/write bits are all ones.
9924                  */
9925                 tw32(offset, read_mask | write_mask);
9926
9927                 val = tr32(offset);
9928
9929                 /* Test the read-only bits. */
9930                 if ((val & read_mask) != read_val)
9931                         goto out;
9932
9933                 /* Test the read/write bits. */
9934                 if ((val & write_mask) != write_mask)
9935                         goto out;
9936
9937                 tw32(offset, save_val);
9938         }
9939
9940         return 0;
9941
9942 out:
9943         if (netif_msg_hw(tp))
9944                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9945                        offset);
9946         tw32(offset, save_val);
9947         return -EIO;
9948 }
9949
9950 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9951 {
9952         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9953         int i;
9954         u32 j;
9955
9956         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9957                 for (j = 0; j < len; j += 4) {
9958                         u32 val;
9959
9960                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9961                         tg3_read_mem(tp, offset + j, &val);
9962                         if (val != test_pattern[i])
9963                                 return -EIO;
9964                 }
9965         }
9966         return 0;
9967 }
9968
9969 static int tg3_test_memory(struct tg3 *tp)
9970 {
9971         static struct mem_entry {
9972                 u32 offset;
9973                 u32 len;
9974         } mem_tbl_570x[] = {
9975                 { 0x00000000, 0x00b50},
9976                 { 0x00002000, 0x1c000},
9977                 { 0xffffffff, 0x00000}
9978         }, mem_tbl_5705[] = {
9979                 { 0x00000100, 0x0000c},
9980                 { 0x00000200, 0x00008},
9981                 { 0x00004000, 0x00800},
9982                 { 0x00006000, 0x01000},
9983                 { 0x00008000, 0x02000},
9984                 { 0x00010000, 0x0e000},
9985                 { 0xffffffff, 0x00000}
9986         }, mem_tbl_5755[] = {
9987                 { 0x00000200, 0x00008},
9988                 { 0x00004000, 0x00800},
9989                 { 0x00006000, 0x00800},
9990                 { 0x00008000, 0x02000},
9991                 { 0x00010000, 0x0c000},
9992                 { 0xffffffff, 0x00000}
9993         }, mem_tbl_5906[] = {
9994                 { 0x00000200, 0x00008},
9995                 { 0x00004000, 0x00400},
9996                 { 0x00006000, 0x00400},
9997                 { 0x00008000, 0x01000},
9998                 { 0x00010000, 0x01000},
9999                 { 0xffffffff, 0x00000}
10000         };
10001         struct mem_entry *mem_tbl;
10002         int err = 0;
10003         int i;
10004
10005         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10006                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10007                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10008                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10009                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10010                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10011                         mem_tbl = mem_tbl_5755;
10012                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10013                         mem_tbl = mem_tbl_5906;
10014                 else
10015                         mem_tbl = mem_tbl_5705;
10016         } else
10017                 mem_tbl = mem_tbl_570x;
10018
10019         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10020                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10021                     mem_tbl[i].len)) != 0)
10022                         break;
10023         }
10024
10025         return err;
10026 }
10027
10028 #define TG3_MAC_LOOPBACK        0
10029 #define TG3_PHY_LOOPBACK        1
10030
10031 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10032 {
10033         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10034         u32 desc_idx;
10035         struct sk_buff *skb, *rx_skb;
10036         u8 *tx_data;
10037         dma_addr_t map;
10038         int num_pkts, tx_len, rx_len, i, err;
10039         struct tg3_rx_buffer_desc *desc;
10040
10041         if (loopback_mode == TG3_MAC_LOOPBACK) {
10042                 /* HW errata - mac loopback fails in some cases on 5780.
10043                  * Normal traffic and PHY loopback are not affected by
10044                  * errata.
10045                  */
10046                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10047                         return 0;
10048
10049                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10050                            MAC_MODE_PORT_INT_LPBACK;
10051                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10052                         mac_mode |= MAC_MODE_LINK_POLARITY;
10053                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10054                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10055                 else
10056                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10057                 tw32(MAC_MODE, mac_mode);
10058         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10059                 u32 val;
10060
10061                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10062                         u32 phytest;
10063
10064                         if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10065                                 u32 phy;
10066
10067                                 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10068                                              phytest | MII_TG3_EPHY_SHADOW_EN);
10069                                 if (!tg3_readphy(tp, 0x1b, &phy))
10070                                         tg3_writephy(tp, 0x1b, phy & ~0x20);
10071                                 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10072                         }
10073                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10074                 } else
10075                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10076
10077                 tg3_phy_toggle_automdix(tp, 0);
10078
10079                 tg3_writephy(tp, MII_BMCR, val);
10080                 udelay(40);
10081
10082                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10083                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10084                         tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10085                         mac_mode |= MAC_MODE_PORT_MODE_MII;
10086                 } else
10087                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
10088
10089                 /* reset to prevent losing 1st rx packet intermittently */
10090                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10091                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10092                         udelay(10);
10093                         tw32_f(MAC_RX_MODE, tp->rx_mode);
10094                 }
10095                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10096                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10097                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10098                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10099                                 mac_mode |= MAC_MODE_LINK_POLARITY;
10100                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
10101                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10102                 }
10103                 tw32(MAC_MODE, mac_mode);
10104         }
10105         else
10106                 return -EINVAL;
10107
10108         err = -EIO;
10109
10110         tx_len = 1514;
10111         skb = netdev_alloc_skb(tp->dev, tx_len);
10112         if (!skb)
10113                 return -ENOMEM;
10114
10115         tx_data = skb_put(skb, tx_len);
10116         memcpy(tx_data, tp->dev->dev_addr, 6);
10117         memset(tx_data + 6, 0x0, 8);
10118
10119         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10120
10121         for (i = 14; i < tx_len; i++)
10122                 tx_data[i] = (u8) (i & 0xff);
10123
10124         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10125
10126         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10127              HOSTCC_MODE_NOW);
10128
10129         udelay(10);
10130
10131         rx_start_idx = tp->hw_status->idx[0].rx_producer;
10132
10133         num_pkts = 0;
10134
10135         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10136
10137         tp->tx_prod++;
10138         num_pkts++;
10139
10140         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10141                      tp->tx_prod);
10142         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10143
10144         udelay(10);
10145
10146         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
10147         for (i = 0; i < 25; i++) {
10148                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10149                        HOSTCC_MODE_NOW);
10150
10151                 udelay(10);
10152
10153                 tx_idx = tp->hw_status->idx[0].tx_consumer;
10154                 rx_idx = tp->hw_status->idx[0].rx_producer;
10155                 if ((tx_idx == tp->tx_prod) &&
10156                     (rx_idx == (rx_start_idx + num_pkts)))
10157                         break;
10158         }
10159
10160         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10161         dev_kfree_skb(skb);
10162
10163         if (tx_idx != tp->tx_prod)
10164                 goto out;
10165
10166         if (rx_idx != rx_start_idx + num_pkts)
10167                 goto out;
10168
10169         desc = &tp->rx_rcb[rx_start_idx];
10170         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10171         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10172         if (opaque_key != RXD_OPAQUE_RING_STD)
10173                 goto out;
10174
10175         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10176             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10177                 goto out;
10178
10179         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10180         if (rx_len != tx_len)
10181                 goto out;
10182
10183         rx_skb = tp->rx_std_buffers[desc_idx].skb;
10184
10185         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10186         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10187
10188         for (i = 14; i < tx_len; i++) {
10189                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10190                         goto out;
10191         }
10192         err = 0;
10193
10194         /* tg3_free_rings will unmap and free the rx_skb */
10195 out:
10196         return err;
10197 }
10198
10199 #define TG3_MAC_LOOPBACK_FAILED         1
10200 #define TG3_PHY_LOOPBACK_FAILED         2
10201 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
10202                                          TG3_PHY_LOOPBACK_FAILED)
10203
10204 static int tg3_test_loopback(struct tg3 *tp)
10205 {
10206         int err = 0;
10207         u32 cpmuctrl = 0;
10208
10209         if (!netif_running(tp->dev))
10210                 return TG3_LOOPBACK_FAILED;
10211
10212         err = tg3_reset_hw(tp, 1);
10213         if (err)
10214                 return TG3_LOOPBACK_FAILED;
10215
10216         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10217             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10218             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10219                 int i;
10220                 u32 status;
10221
10222                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10223
10224                 /* Wait for up to 40 microseconds to acquire lock. */
10225                 for (i = 0; i < 4; i++) {
10226                         status = tr32(TG3_CPMU_MUTEX_GNT);
10227                         if (status == CPMU_MUTEX_GNT_DRIVER)
10228                                 break;
10229                         udelay(10);
10230                 }
10231
10232                 if (status != CPMU_MUTEX_GNT_DRIVER)
10233                         return TG3_LOOPBACK_FAILED;
10234
10235                 /* Turn off link-based power management. */
10236                 cpmuctrl = tr32(TG3_CPMU_CTRL);
10237                 tw32(TG3_CPMU_CTRL,
10238                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10239                                   CPMU_CTRL_LINK_AWARE_MODE));
10240         }
10241
10242         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10243                 err |= TG3_MAC_LOOPBACK_FAILED;
10244
10245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10246             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10247             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10248                 tw32(TG3_CPMU_CTRL, cpmuctrl);
10249
10250                 /* Release the mutex */
10251                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10252         }
10253
10254         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10255             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10256                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10257                         err |= TG3_PHY_LOOPBACK_FAILED;
10258         }
10259
10260         return err;
10261 }
10262
10263 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10264                           u64 *data)
10265 {
10266         struct tg3 *tp = netdev_priv(dev);
10267
10268         if (tp->link_config.phy_is_low_power)
10269                 tg3_set_power_state(tp, PCI_D0);
10270
10271         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10272
10273         if (tg3_test_nvram(tp) != 0) {
10274                 etest->flags |= ETH_TEST_FL_FAILED;
10275                 data[0] = 1;
10276         }
10277         if (tg3_test_link(tp) != 0) {
10278                 etest->flags |= ETH_TEST_FL_FAILED;
10279                 data[1] = 1;
10280         }
10281         if (etest->flags & ETH_TEST_FL_OFFLINE) {
10282                 int err, err2 = 0, irq_sync = 0;
10283
10284                 if (netif_running(dev)) {
10285                         tg3_phy_stop(tp);
10286                         tg3_netif_stop(tp);
10287                         irq_sync = 1;
10288                 }
10289
10290                 tg3_full_lock(tp, irq_sync);
10291
10292                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10293                 err = tg3_nvram_lock(tp);
10294                 tg3_halt_cpu(tp, RX_CPU_BASE);
10295                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10296                         tg3_halt_cpu(tp, TX_CPU_BASE);
10297                 if (!err)
10298                         tg3_nvram_unlock(tp);
10299
10300                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10301                         tg3_phy_reset(tp);
10302
10303                 if (tg3_test_registers(tp) != 0) {
10304                         etest->flags |= ETH_TEST_FL_FAILED;
10305                         data[2] = 1;
10306                 }
10307                 if (tg3_test_memory(tp) != 0) {
10308                         etest->flags |= ETH_TEST_FL_FAILED;
10309                         data[3] = 1;
10310                 }
10311                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10312                         etest->flags |= ETH_TEST_FL_FAILED;
10313
10314                 tg3_full_unlock(tp);
10315
10316                 if (tg3_test_interrupt(tp) != 0) {
10317                         etest->flags |= ETH_TEST_FL_FAILED;
10318                         data[5] = 1;
10319                 }
10320
10321                 tg3_full_lock(tp, 0);
10322
10323                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10324                 if (netif_running(dev)) {
10325                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10326                         err2 = tg3_restart_hw(tp, 1);
10327                         if (!err2)
10328                                 tg3_netif_start(tp);
10329                 }
10330
10331                 tg3_full_unlock(tp);
10332
10333                 if (irq_sync && !err2)
10334                         tg3_phy_start(tp);
10335         }
10336         if (tp->link_config.phy_is_low_power)
10337                 tg3_set_power_state(tp, PCI_D3hot);
10338
10339 }
10340
10341 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10342 {
10343         struct mii_ioctl_data *data = if_mii(ifr);
10344         struct tg3 *tp = netdev_priv(dev);
10345         int err;
10346
10347         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10348                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10349                         return -EAGAIN;
10350                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10351         }
10352
10353         switch(cmd) {
10354         case SIOCGMIIPHY:
10355                 data->phy_id = PHY_ADDR;
10356
10357                 /* fallthru */
10358         case SIOCGMIIREG: {
10359                 u32 mii_regval;
10360
10361                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10362                         break;                  /* We have no PHY */
10363
10364                 if (tp->link_config.phy_is_low_power)
10365                         return -EAGAIN;
10366
10367                 spin_lock_bh(&tp->lock);
10368                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10369                 spin_unlock_bh(&tp->lock);
10370
10371                 data->val_out = mii_regval;
10372
10373                 return err;
10374         }
10375
10376         case SIOCSMIIREG:
10377                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10378                         break;                  /* We have no PHY */
10379
10380                 if (!capable(CAP_NET_ADMIN))
10381                         return -EPERM;
10382
10383                 if (tp->link_config.phy_is_low_power)
10384                         return -EAGAIN;
10385
10386                 spin_lock_bh(&tp->lock);
10387                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10388                 spin_unlock_bh(&tp->lock);
10389
10390                 return err;
10391
10392         default:
10393                 /* do nothing */
10394                 break;
10395         }
10396         return -EOPNOTSUPP;
10397 }
10398
10399 #if TG3_VLAN_TAG_USED
10400 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10401 {
10402         struct tg3 *tp = netdev_priv(dev);
10403
10404         if (netif_running(dev))
10405                 tg3_netif_stop(tp);
10406
10407         tg3_full_lock(tp, 0);
10408
10409         tp->vlgrp = grp;
10410
10411         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10412         __tg3_set_rx_mode(dev);
10413
10414         if (netif_running(dev))
10415                 tg3_netif_start(tp);
10416
10417         tg3_full_unlock(tp);
10418 }
10419 #endif
10420
10421 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10422 {
10423         struct tg3 *tp = netdev_priv(dev);
10424
10425         memcpy(ec, &tp->coal, sizeof(*ec));
10426         return 0;
10427 }
10428
10429 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10430 {
10431         struct tg3 *tp = netdev_priv(dev);
10432         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10433         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10434
10435         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10436                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10437                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10438                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10439                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10440         }
10441
10442         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10443             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10444             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10445             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10446             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10447             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10448             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10449             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10450             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10451             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10452                 return -EINVAL;
10453
10454         /* No rx interrupts will be generated if both are zero */
10455         if ((ec->rx_coalesce_usecs == 0) &&
10456             (ec->rx_max_coalesced_frames == 0))
10457                 return -EINVAL;
10458
10459         /* No tx interrupts will be generated if both are zero */
10460         if ((ec->tx_coalesce_usecs == 0) &&
10461             (ec->tx_max_coalesced_frames == 0))
10462                 return -EINVAL;
10463
10464         /* Only copy relevant parameters, ignore all others. */
10465         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10466         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10467         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10468         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10469         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10470         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10471         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10472         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10473         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10474
10475         if (netif_running(dev)) {
10476                 tg3_full_lock(tp, 0);
10477                 __tg3_set_coalesce(tp, &tp->coal);
10478                 tg3_full_unlock(tp);
10479         }
10480         return 0;
10481 }
10482
10483 static const struct ethtool_ops tg3_ethtool_ops = {
10484         .get_settings           = tg3_get_settings,
10485         .set_settings           = tg3_set_settings,
10486         .get_drvinfo            = tg3_get_drvinfo,
10487         .get_regs_len           = tg3_get_regs_len,
10488         .get_regs               = tg3_get_regs,
10489         .get_wol                = tg3_get_wol,
10490         .set_wol                = tg3_set_wol,
10491         .get_msglevel           = tg3_get_msglevel,
10492         .set_msglevel           = tg3_set_msglevel,
10493         .nway_reset             = tg3_nway_reset,
10494         .get_link               = ethtool_op_get_link,
10495         .get_eeprom_len         = tg3_get_eeprom_len,
10496         .get_eeprom             = tg3_get_eeprom,
10497         .set_eeprom             = tg3_set_eeprom,
10498         .get_ringparam          = tg3_get_ringparam,
10499         .set_ringparam          = tg3_set_ringparam,
10500         .get_pauseparam         = tg3_get_pauseparam,
10501         .set_pauseparam         = tg3_set_pauseparam,
10502         .get_rx_csum            = tg3_get_rx_csum,
10503         .set_rx_csum            = tg3_set_rx_csum,
10504         .set_tx_csum            = tg3_set_tx_csum,
10505         .set_sg                 = ethtool_op_set_sg,
10506         .set_tso                = tg3_set_tso,
10507         .self_test              = tg3_self_test,
10508         .get_strings            = tg3_get_strings,
10509         .phys_id                = tg3_phys_id,
10510         .get_ethtool_stats      = tg3_get_ethtool_stats,
10511         .get_coalesce           = tg3_get_coalesce,
10512         .set_coalesce           = tg3_set_coalesce,
10513         .get_sset_count         = tg3_get_sset_count,
10514 };
10515
10516 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10517 {
10518         u32 cursize, val, magic;
10519
10520         tp->nvram_size = EEPROM_CHIP_SIZE;
10521
10522         if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10523                 return;
10524
10525         if ((magic != TG3_EEPROM_MAGIC) &&
10526             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10527             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10528                 return;
10529
10530         /*
10531          * Size the chip by reading offsets at increasing powers of two.
10532          * When we encounter our validation signature, we know the addressing
10533          * has wrapped around, and thus have our chip size.
10534          */
10535         cursize = 0x10;
10536
10537         while (cursize < tp->nvram_size) {
10538                 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10539                         return;
10540
10541                 if (val == magic)
10542                         break;
10543
10544                 cursize <<= 1;
10545         }
10546
10547         tp->nvram_size = cursize;
10548 }
10549
10550 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10551 {
10552         u32 val;
10553
10554         if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10555                 return;
10556
10557         /* Selfboot format */
10558         if (val != TG3_EEPROM_MAGIC) {
10559                 tg3_get_eeprom_size(tp);
10560                 return;
10561         }
10562
10563         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10564                 if (val != 0) {
10565                         tp->nvram_size = (val >> 16) * 1024;
10566                         return;
10567                 }
10568         }
10569         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10570 }
10571
10572 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10573 {
10574         u32 nvcfg1;
10575
10576         nvcfg1 = tr32(NVRAM_CFG1);
10577         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10578                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10579         }
10580         else {
10581                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10582                 tw32(NVRAM_CFG1, nvcfg1);
10583         }
10584
10585         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10586             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10587                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10588                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10589                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10590                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10591                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10592                                 break;
10593                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10594                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10595                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10596                                 break;
10597                         case FLASH_VENDOR_ATMEL_EEPROM:
10598                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10599                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10600                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10601                                 break;
10602                         case FLASH_VENDOR_ST:
10603                                 tp->nvram_jedecnum = JEDEC_ST;
10604                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10605                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10606                                 break;
10607                         case FLASH_VENDOR_SAIFUN:
10608                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10609                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10610                                 break;
10611                         case FLASH_VENDOR_SST_SMALL:
10612                         case FLASH_VENDOR_SST_LARGE:
10613                                 tp->nvram_jedecnum = JEDEC_SST;
10614                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10615                                 break;
10616                 }
10617         }
10618         else {
10619                 tp->nvram_jedecnum = JEDEC_ATMEL;
10620                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10621                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10622         }
10623 }
10624
10625 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10626 {
10627         u32 nvcfg1;
10628
10629         nvcfg1 = tr32(NVRAM_CFG1);
10630
10631         /* NVRAM protection for TPM */
10632         if (nvcfg1 & (1 << 27))
10633                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10634
10635         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10636                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10637                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10638                         tp->nvram_jedecnum = JEDEC_ATMEL;
10639                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10640                         break;
10641                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10642                         tp->nvram_jedecnum = JEDEC_ATMEL;
10643                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10644                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10645                         break;
10646                 case FLASH_5752VENDOR_ST_M45PE10:
10647                 case FLASH_5752VENDOR_ST_M45PE20:
10648                 case FLASH_5752VENDOR_ST_M45PE40:
10649                         tp->nvram_jedecnum = JEDEC_ST;
10650                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10651                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10652                         break;
10653         }
10654
10655         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10656                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10657                         case FLASH_5752PAGE_SIZE_256:
10658                                 tp->nvram_pagesize = 256;
10659                                 break;
10660                         case FLASH_5752PAGE_SIZE_512:
10661                                 tp->nvram_pagesize = 512;
10662                                 break;
10663                         case FLASH_5752PAGE_SIZE_1K:
10664                                 tp->nvram_pagesize = 1024;
10665                                 break;
10666                         case FLASH_5752PAGE_SIZE_2K:
10667                                 tp->nvram_pagesize = 2048;
10668                                 break;
10669                         case FLASH_5752PAGE_SIZE_4K:
10670                                 tp->nvram_pagesize = 4096;
10671                                 break;
10672                         case FLASH_5752PAGE_SIZE_264:
10673                                 tp->nvram_pagesize = 264;
10674                                 break;
10675                 }
10676         }
10677         else {
10678                 /* For eeprom, set pagesize to maximum eeprom size */
10679                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10680
10681                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10682                 tw32(NVRAM_CFG1, nvcfg1);
10683         }
10684 }
10685
10686 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10687 {
10688         u32 nvcfg1, protect = 0;
10689
10690         nvcfg1 = tr32(NVRAM_CFG1);
10691
10692         /* NVRAM protection for TPM */
10693         if (nvcfg1 & (1 << 27)) {
10694                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10695                 protect = 1;
10696         }
10697
10698         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10699         switch (nvcfg1) {
10700                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10701                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10702                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10703                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10704                         tp->nvram_jedecnum = JEDEC_ATMEL;
10705                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10706                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10707                         tp->nvram_pagesize = 264;
10708                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10709                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10710                                 tp->nvram_size = (protect ? 0x3e200 :
10711                                                   TG3_NVRAM_SIZE_512KB);
10712                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10713                                 tp->nvram_size = (protect ? 0x1f200 :
10714                                                   TG3_NVRAM_SIZE_256KB);
10715                         else
10716                                 tp->nvram_size = (protect ? 0x1f200 :
10717                                                   TG3_NVRAM_SIZE_128KB);
10718                         break;
10719                 case FLASH_5752VENDOR_ST_M45PE10:
10720                 case FLASH_5752VENDOR_ST_M45PE20:
10721                 case FLASH_5752VENDOR_ST_M45PE40:
10722                         tp->nvram_jedecnum = JEDEC_ST;
10723                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10724                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10725                         tp->nvram_pagesize = 256;
10726                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10727                                 tp->nvram_size = (protect ?
10728                                                   TG3_NVRAM_SIZE_64KB :
10729                                                   TG3_NVRAM_SIZE_128KB);
10730                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10731                                 tp->nvram_size = (protect ?
10732                                                   TG3_NVRAM_SIZE_64KB :
10733                                                   TG3_NVRAM_SIZE_256KB);
10734                         else
10735                                 tp->nvram_size = (protect ?
10736                                                   TG3_NVRAM_SIZE_128KB :
10737                                                   TG3_NVRAM_SIZE_512KB);
10738                         break;
10739         }
10740 }
10741
10742 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10743 {
10744         u32 nvcfg1;
10745
10746         nvcfg1 = tr32(NVRAM_CFG1);
10747
10748         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10749                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10750                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10751                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10752                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10753                         tp->nvram_jedecnum = JEDEC_ATMEL;
10754                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10755                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10756
10757                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10758                         tw32(NVRAM_CFG1, nvcfg1);
10759                         break;
10760                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10761                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10762                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10763                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10764                         tp->nvram_jedecnum = JEDEC_ATMEL;
10765                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10766                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10767                         tp->nvram_pagesize = 264;
10768                         break;
10769                 case FLASH_5752VENDOR_ST_M45PE10:
10770                 case FLASH_5752VENDOR_ST_M45PE20:
10771                 case FLASH_5752VENDOR_ST_M45PE40:
10772                         tp->nvram_jedecnum = JEDEC_ST;
10773                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10774                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10775                         tp->nvram_pagesize = 256;
10776                         break;
10777         }
10778 }
10779
10780 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10781 {
10782         u32 nvcfg1, protect = 0;
10783
10784         nvcfg1 = tr32(NVRAM_CFG1);
10785
10786         /* NVRAM protection for TPM */
10787         if (nvcfg1 & (1 << 27)) {
10788                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10789                 protect = 1;
10790         }
10791
10792         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10793         switch (nvcfg1) {
10794                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10795                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10796                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10797                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10798                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10799                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10800                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10801                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10802                         tp->nvram_jedecnum = JEDEC_ATMEL;
10803                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10804                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10805                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10806                         tp->nvram_pagesize = 256;
10807                         break;
10808                 case FLASH_5761VENDOR_ST_A_M45PE20:
10809                 case FLASH_5761VENDOR_ST_A_M45PE40:
10810                 case FLASH_5761VENDOR_ST_A_M45PE80:
10811                 case FLASH_5761VENDOR_ST_A_M45PE16:
10812                 case FLASH_5761VENDOR_ST_M_M45PE20:
10813                 case FLASH_5761VENDOR_ST_M_M45PE40:
10814                 case FLASH_5761VENDOR_ST_M_M45PE80:
10815                 case FLASH_5761VENDOR_ST_M_M45PE16:
10816                         tp->nvram_jedecnum = JEDEC_ST;
10817                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10818                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10819                         tp->nvram_pagesize = 256;
10820                         break;
10821         }
10822
10823         if (protect) {
10824                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10825         } else {
10826                 switch (nvcfg1) {
10827                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10828                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10829                         case FLASH_5761VENDOR_ST_A_M45PE16:
10830                         case FLASH_5761VENDOR_ST_M_M45PE16:
10831                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10832                                 break;
10833                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10834                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10835                         case FLASH_5761VENDOR_ST_A_M45PE80:
10836                         case FLASH_5761VENDOR_ST_M_M45PE80:
10837                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10838                                 break;
10839                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10840                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10841                         case FLASH_5761VENDOR_ST_A_M45PE40:
10842                         case FLASH_5761VENDOR_ST_M_M45PE40:
10843                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10844                                 break;
10845                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10846                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10847                         case FLASH_5761VENDOR_ST_A_M45PE20:
10848                         case FLASH_5761VENDOR_ST_M_M45PE20:
10849                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10850                                 break;
10851                 }
10852         }
10853 }
10854
10855 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10856 {
10857         tp->nvram_jedecnum = JEDEC_ATMEL;
10858         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10859         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10860 }
10861
10862 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10863 static void __devinit tg3_nvram_init(struct tg3 *tp)
10864 {
10865         tw32_f(GRC_EEPROM_ADDR,
10866              (EEPROM_ADDR_FSM_RESET |
10867               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10868                EEPROM_ADDR_CLKPERD_SHIFT)));
10869
10870         msleep(1);
10871
10872         /* Enable seeprom accesses. */
10873         tw32_f(GRC_LOCAL_CTRL,
10874              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10875         udelay(100);
10876
10877         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10878             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10879                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10880
10881                 if (tg3_nvram_lock(tp)) {
10882                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10883                                "tg3_nvram_init failed.\n", tp->dev->name);
10884                         return;
10885                 }
10886                 tg3_enable_nvram_access(tp);
10887
10888                 tp->nvram_size = 0;
10889
10890                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10891                         tg3_get_5752_nvram_info(tp);
10892                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10893                         tg3_get_5755_nvram_info(tp);
10894                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10895                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10896                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10897                         tg3_get_5787_nvram_info(tp);
10898                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10899                         tg3_get_5761_nvram_info(tp);
10900                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10901                         tg3_get_5906_nvram_info(tp);
10902                 else
10903                         tg3_get_nvram_info(tp);
10904
10905                 if (tp->nvram_size == 0)
10906                         tg3_get_nvram_size(tp);
10907
10908                 tg3_disable_nvram_access(tp);
10909                 tg3_nvram_unlock(tp);
10910
10911         } else {
10912                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10913
10914                 tg3_get_eeprom_size(tp);
10915         }
10916 }
10917
10918 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10919                                         u32 offset, u32 *val)
10920 {
10921         u32 tmp;
10922         int i;
10923
10924         if (offset > EEPROM_ADDR_ADDR_MASK ||
10925             (offset % 4) != 0)
10926                 return -EINVAL;
10927
10928         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10929                                         EEPROM_ADDR_DEVID_MASK |
10930                                         EEPROM_ADDR_READ);
10931         tw32(GRC_EEPROM_ADDR,
10932              tmp |
10933              (0 << EEPROM_ADDR_DEVID_SHIFT) |
10934              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10935               EEPROM_ADDR_ADDR_MASK) |
10936              EEPROM_ADDR_READ | EEPROM_ADDR_START);
10937
10938         for (i = 0; i < 1000; i++) {
10939                 tmp = tr32(GRC_EEPROM_ADDR);
10940
10941                 if (tmp & EEPROM_ADDR_COMPLETE)
10942                         break;
10943                 msleep(1);
10944         }
10945         if (!(tmp & EEPROM_ADDR_COMPLETE))
10946                 return -EBUSY;
10947
10948         *val = tr32(GRC_EEPROM_DATA);
10949         return 0;
10950 }
10951
10952 #define NVRAM_CMD_TIMEOUT 10000
10953
10954 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10955 {
10956         int i;
10957
10958         tw32(NVRAM_CMD, nvram_cmd);
10959         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10960                 udelay(10);
10961                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10962                         udelay(10);
10963                         break;
10964                 }
10965         }
10966         if (i == NVRAM_CMD_TIMEOUT) {
10967                 return -EBUSY;
10968         }
10969         return 0;
10970 }
10971
10972 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10973 {
10974         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10975             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10976             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10977            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10978             (tp->nvram_jedecnum == JEDEC_ATMEL))
10979
10980                 addr = ((addr / tp->nvram_pagesize) <<
10981                         ATMEL_AT45DB0X1B_PAGE_POS) +
10982                        (addr % tp->nvram_pagesize);
10983
10984         return addr;
10985 }
10986
10987 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10988 {
10989         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10990             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10991             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10992            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10993             (tp->nvram_jedecnum == JEDEC_ATMEL))
10994
10995                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10996                         tp->nvram_pagesize) +
10997                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10998
10999         return addr;
11000 }
11001
11002 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11003 {
11004         int ret;
11005
11006         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11007                 return tg3_nvram_read_using_eeprom(tp, offset, val);
11008
11009         offset = tg3_nvram_phys_addr(tp, offset);
11010
11011         if (offset > NVRAM_ADDR_MSK)
11012                 return -EINVAL;
11013
11014         ret = tg3_nvram_lock(tp);
11015         if (ret)
11016                 return ret;
11017
11018         tg3_enable_nvram_access(tp);
11019
11020         tw32(NVRAM_ADDR, offset);
11021         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11022                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11023
11024         if (ret == 0)
11025                 *val = swab32(tr32(NVRAM_RDDATA));
11026
11027         tg3_disable_nvram_access(tp);
11028
11029         tg3_nvram_unlock(tp);
11030
11031         return ret;
11032 }
11033
11034 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11035 {
11036         u32 v;
11037         int res = tg3_nvram_read(tp, offset, &v);
11038         if (!res)
11039                 *val = cpu_to_le32(v);
11040         return res;
11041 }
11042
11043 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11044 {
11045         int err;
11046         u32 tmp;
11047
11048         err = tg3_nvram_read(tp, offset, &tmp);
11049         *val = swab32(tmp);
11050         return err;
11051 }
11052
11053 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11054                                     u32 offset, u32 len, u8 *buf)
11055 {
11056         int i, j, rc = 0;
11057         u32 val;
11058
11059         for (i = 0; i < len; i += 4) {
11060                 u32 addr;
11061                 __le32 data;
11062
11063                 addr = offset + i;
11064
11065                 memcpy(&data, buf + i, 4);
11066
11067                 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11068
11069                 val = tr32(GRC_EEPROM_ADDR);
11070                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11071
11072                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11073                         EEPROM_ADDR_READ);
11074                 tw32(GRC_EEPROM_ADDR, val |
11075                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
11076                         (addr & EEPROM_ADDR_ADDR_MASK) |
11077                         EEPROM_ADDR_START |
11078                         EEPROM_ADDR_WRITE);
11079
11080                 for (j = 0; j < 1000; j++) {
11081                         val = tr32(GRC_EEPROM_ADDR);
11082
11083                         if (val & EEPROM_ADDR_COMPLETE)
11084                                 break;
11085                         msleep(1);
11086                 }
11087                 if (!(val & EEPROM_ADDR_COMPLETE)) {
11088                         rc = -EBUSY;
11089                         break;
11090                 }
11091         }
11092
11093         return rc;
11094 }
11095
11096 /* offset and length are dword aligned */
11097 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11098                 u8 *buf)
11099 {
11100         int ret = 0;
11101         u32 pagesize = tp->nvram_pagesize;
11102         u32 pagemask = pagesize - 1;
11103         u32 nvram_cmd;
11104         u8 *tmp;
11105
11106         tmp = kmalloc(pagesize, GFP_KERNEL);
11107         if (tmp == NULL)
11108                 return -ENOMEM;
11109
11110         while (len) {
11111                 int j;
11112                 u32 phy_addr, page_off, size;
11113
11114                 phy_addr = offset & ~pagemask;
11115
11116                 for (j = 0; j < pagesize; j += 4) {
11117                         if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11118                                                 (__le32 *) (tmp + j))))
11119                                 break;
11120                 }
11121                 if (ret)
11122                         break;
11123
11124                 page_off = offset & pagemask;
11125                 size = pagesize;
11126                 if (len < size)
11127                         size = len;
11128
11129                 len -= size;
11130
11131                 memcpy(tmp + page_off, buf, size);
11132
11133                 offset = offset + (pagesize - page_off);
11134
11135                 tg3_enable_nvram_access(tp);
11136
11137                 /*
11138                  * Before we can erase the flash page, we need
11139                  * to issue a special "write enable" command.
11140                  */
11141                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11142
11143                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11144                         break;
11145
11146                 /* Erase the target page */
11147                 tw32(NVRAM_ADDR, phy_addr);
11148
11149                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11150                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11151
11152                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11153                         break;
11154
11155                 /* Issue another write enable to start the write. */
11156                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11157
11158                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11159                         break;
11160
11161                 for (j = 0; j < pagesize; j += 4) {
11162                         __be32 data;
11163
11164                         data = *((__be32 *) (tmp + j));
11165                         /* swab32(le32_to_cpu(data)), actually */
11166                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
11167
11168                         tw32(NVRAM_ADDR, phy_addr + j);
11169
11170                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11171                                 NVRAM_CMD_WR;
11172
11173                         if (j == 0)
11174                                 nvram_cmd |= NVRAM_CMD_FIRST;
11175                         else if (j == (pagesize - 4))
11176                                 nvram_cmd |= NVRAM_CMD_LAST;
11177
11178                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11179                                 break;
11180                 }
11181                 if (ret)
11182                         break;
11183         }
11184
11185         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11186         tg3_nvram_exec_cmd(tp, nvram_cmd);
11187
11188         kfree(tmp);
11189
11190         return ret;
11191 }
11192
11193 /* offset and length are dword aligned */
11194 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11195                 u8 *buf)
11196 {
11197         int i, ret = 0;
11198
11199         for (i = 0; i < len; i += 4, offset += 4) {
11200                 u32 page_off, phy_addr, nvram_cmd;
11201                 __be32 data;
11202
11203                 memcpy(&data, buf + i, 4);
11204                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11205
11206                 page_off = offset % tp->nvram_pagesize;
11207
11208                 phy_addr = tg3_nvram_phys_addr(tp, offset);
11209
11210                 tw32(NVRAM_ADDR, phy_addr);
11211
11212                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11213
11214                 if ((page_off == 0) || (i == 0))
11215                         nvram_cmd |= NVRAM_CMD_FIRST;
11216                 if (page_off == (tp->nvram_pagesize - 4))
11217                         nvram_cmd |= NVRAM_CMD_LAST;
11218
11219                 if (i == (len - 4))
11220                         nvram_cmd |= NVRAM_CMD_LAST;
11221
11222                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11223                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11224                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11225                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11226                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11227                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11228                     (tp->nvram_jedecnum == JEDEC_ST) &&
11229                     (nvram_cmd & NVRAM_CMD_FIRST)) {
11230
11231                         if ((ret = tg3_nvram_exec_cmd(tp,
11232                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11233                                 NVRAM_CMD_DONE)))
11234
11235                                 break;
11236                 }
11237                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11238                         /* We always do complete word writes to eeprom. */
11239                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11240                 }
11241
11242                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11243                         break;
11244         }
11245         return ret;
11246 }
11247
11248 /* offset and length are dword aligned */
11249 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11250 {
11251         int ret;
11252
11253         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11254                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11255                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
11256                 udelay(40);
11257         }
11258
11259         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11260                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11261         }
11262         else {
11263                 u32 grc_mode;
11264
11265                 ret = tg3_nvram_lock(tp);
11266                 if (ret)
11267                         return ret;
11268
11269                 tg3_enable_nvram_access(tp);
11270                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11271                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11272                         tw32(NVRAM_WRITE1, 0x406);
11273
11274                 grc_mode = tr32(GRC_MODE);
11275                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11276
11277                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11278                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11279
11280                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
11281                                 buf);
11282                 }
11283                 else {
11284                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11285                                 buf);
11286                 }
11287
11288                 grc_mode = tr32(GRC_MODE);
11289                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11290
11291                 tg3_disable_nvram_access(tp);
11292                 tg3_nvram_unlock(tp);
11293         }
11294
11295         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11296                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11297                 udelay(40);
11298         }
11299
11300         return ret;
11301 }
11302
11303 struct subsys_tbl_ent {
11304         u16 subsys_vendor, subsys_devid;
11305         u32 phy_id;
11306 };
11307
11308 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11309         /* Broadcom boards. */
11310         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11311         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11312         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11313         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
11314         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11315         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11316         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
11317         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11318         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11319         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11320         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11321
11322         /* 3com boards. */
11323         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11324         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11325         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11326         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11327         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11328
11329         /* DELL boards. */
11330         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11331         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11332         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11333         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11334
11335         /* Compaq boards. */
11336         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11337         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11338         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11339         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11340         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11341
11342         /* IBM boards. */
11343         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11344 };
11345
11346 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11347 {
11348         int i;
11349
11350         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11351                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11352                      tp->pdev->subsystem_vendor) &&
11353                     (subsys_id_to_phy_id[i].subsys_devid ==
11354                      tp->pdev->subsystem_device))
11355                         return &subsys_id_to_phy_id[i];
11356         }
11357         return NULL;
11358 }
11359
11360 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11361 {
11362         u32 val;
11363         u16 pmcsr;
11364
11365         /* On some early chips the SRAM cannot be accessed in D3hot state,
11366          * so need make sure we're in D0.
11367          */
11368         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11369         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11370         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11371         msleep(1);
11372
11373         /* Make sure register accesses (indirect or otherwise)
11374          * will function correctly.
11375          */
11376         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11377                                tp->misc_host_ctrl);
11378
11379         /* The memory arbiter has to be enabled in order for SRAM accesses
11380          * to succeed.  Normally on powerup the tg3 chip firmware will make
11381          * sure it is enabled, but other entities such as system netboot
11382          * code might disable it.
11383          */
11384         val = tr32(MEMARB_MODE);
11385         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11386
11387         tp->phy_id = PHY_ID_INVALID;
11388         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11389
11390         /* Assume an onboard device and WOL capable by default.  */
11391         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11392
11393         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11394                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11395                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11396                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11397                 }
11398                 val = tr32(VCPU_CFGSHDW);
11399                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11400                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11401                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11402                     (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11403                     device_may_wakeup(&tp->pdev->dev))
11404                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11405                 goto done;
11406         }
11407
11408         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11409         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11410                 u32 nic_cfg, led_cfg;
11411                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11412                 int eeprom_phy_serdes = 0;
11413
11414                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11415                 tp->nic_sram_data_cfg = nic_cfg;
11416
11417                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11418                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11419                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11420                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11421                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11422                     (ver > 0) && (ver < 0x100))
11423                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11424
11425                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11426                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11427
11428                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11429                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11430                         eeprom_phy_serdes = 1;
11431
11432                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11433                 if (nic_phy_id != 0) {
11434                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11435                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11436
11437                         eeprom_phy_id  = (id1 >> 16) << 10;
11438                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11439                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11440                 } else
11441                         eeprom_phy_id = 0;
11442
11443                 tp->phy_id = eeprom_phy_id;
11444                 if (eeprom_phy_serdes) {
11445                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11446                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11447                         else
11448                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11449                 }
11450
11451                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11452                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11453                                     SHASTA_EXT_LED_MODE_MASK);
11454                 else
11455                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11456
11457                 switch (led_cfg) {
11458                 default:
11459                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11460                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11461                         break;
11462
11463                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11464                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11465                         break;
11466
11467                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11468                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11469
11470                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11471                          * read on some older 5700/5701 bootcode.
11472                          */
11473                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11474                             ASIC_REV_5700 ||
11475                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11476                             ASIC_REV_5701)
11477                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11478
11479                         break;
11480
11481                 case SHASTA_EXT_LED_SHARED:
11482                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11483                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11484                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11485                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11486                                                  LED_CTRL_MODE_PHY_2);
11487                         break;
11488
11489                 case SHASTA_EXT_LED_MAC:
11490                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11491                         break;
11492
11493                 case SHASTA_EXT_LED_COMBO:
11494                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11495                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11496                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11497                                                  LED_CTRL_MODE_PHY_2);
11498                         break;
11499
11500                 }
11501
11502                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11503                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11504                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11505                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11506
11507                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11508                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11509
11510                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11511                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11512                         if ((tp->pdev->subsystem_vendor ==
11513                              PCI_VENDOR_ID_ARIMA) &&
11514                             (tp->pdev->subsystem_device == 0x205a ||
11515                              tp->pdev->subsystem_device == 0x2063))
11516                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11517                 } else {
11518                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11519                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11520                 }
11521
11522                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11523                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11524                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11525                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11526                 }
11527
11528                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11529                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11530                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11531
11532                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11533                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11534                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11535
11536                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11537                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11538                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11539
11540                 if (cfg2 & (1 << 17))
11541                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11542
11543                 /* serdes signal pre-emphasis in register 0x590 set by */
11544                 /* bootcode if bit 18 is set */
11545                 if (cfg2 & (1 << 18))
11546                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11547
11548                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11549                         u32 cfg3;
11550
11551                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11552                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11553                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11554                 }
11555
11556                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11557                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11558                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11559                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11560                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11561                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11562         }
11563 done:
11564         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11565         device_set_wakeup_enable(&tp->pdev->dev,
11566                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11567 }
11568
11569 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11570 {
11571         int i;
11572         u32 val;
11573
11574         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11575         tw32(OTP_CTRL, cmd);
11576
11577         /* Wait for up to 1 ms for command to execute. */
11578         for (i = 0; i < 100; i++) {
11579                 val = tr32(OTP_STATUS);
11580                 if (val & OTP_STATUS_CMD_DONE)
11581                         break;
11582                 udelay(10);
11583         }
11584
11585         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11586 }
11587
11588 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11589  * configuration is a 32-bit value that straddles the alignment boundary.
11590  * We do two 32-bit reads and then shift and merge the results.
11591  */
11592 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11593 {
11594         u32 bhalf_otp, thalf_otp;
11595
11596         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11597
11598         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11599                 return 0;
11600
11601         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11602
11603         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11604                 return 0;
11605
11606         thalf_otp = tr32(OTP_READ_DATA);
11607
11608         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11609
11610         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11611                 return 0;
11612
11613         bhalf_otp = tr32(OTP_READ_DATA);
11614
11615         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11616 }
11617
11618 static int __devinit tg3_phy_probe(struct tg3 *tp)
11619 {
11620         u32 hw_phy_id_1, hw_phy_id_2;
11621         u32 hw_phy_id, hw_phy_id_masked;
11622         int err;
11623
11624         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11625                 return tg3_phy_init(tp);
11626
11627         /* Reading the PHY ID register can conflict with ASF
11628          * firwmare access to the PHY hardware.
11629          */
11630         err = 0;
11631         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11632             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11633                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11634         } else {
11635                 /* Now read the physical PHY_ID from the chip and verify
11636                  * that it is sane.  If it doesn't look good, we fall back
11637                  * to either the hard-coded table based PHY_ID and failing
11638                  * that the value found in the eeprom area.
11639                  */
11640                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11641                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11642
11643                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11644                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11645                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11646
11647                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11648         }
11649
11650         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11651                 tp->phy_id = hw_phy_id;
11652                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11653                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11654                 else
11655                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11656         } else {
11657                 if (tp->phy_id != PHY_ID_INVALID) {
11658                         /* Do nothing, phy ID already set up in
11659                          * tg3_get_eeprom_hw_cfg().
11660                          */
11661                 } else {
11662                         struct subsys_tbl_ent *p;
11663
11664                         /* No eeprom signature?  Try the hardcoded
11665                          * subsys device table.
11666                          */
11667                         p = lookup_by_subsys(tp);
11668                         if (!p)
11669                                 return -ENODEV;
11670
11671                         tp->phy_id = p->phy_id;
11672                         if (!tp->phy_id ||
11673                             tp->phy_id == PHY_ID_BCM8002)
11674                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11675                 }
11676         }
11677
11678         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11679             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11680             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11681                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11682
11683                 tg3_readphy(tp, MII_BMSR, &bmsr);
11684                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11685                     (bmsr & BMSR_LSTATUS))
11686                         goto skip_phy_reset;
11687
11688                 err = tg3_phy_reset(tp);
11689                 if (err)
11690                         return err;
11691
11692                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11693                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11694                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11695                 tg3_ctrl = 0;
11696                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11697                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11698                                     MII_TG3_CTRL_ADV_1000_FULL);
11699                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11700                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11701                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11702                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11703                 }
11704
11705                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11706                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11707                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11708                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11709                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11710
11711                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11712                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11713
11714                         tg3_writephy(tp, MII_BMCR,
11715                                      BMCR_ANENABLE | BMCR_ANRESTART);
11716                 }
11717                 tg3_phy_set_wirespeed(tp);
11718
11719                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11720                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11721                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11722         }
11723
11724 skip_phy_reset:
11725         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11726                 err = tg3_init_5401phy_dsp(tp);
11727                 if (err)
11728                         return err;
11729         }
11730
11731         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11732                 err = tg3_init_5401phy_dsp(tp);
11733         }
11734
11735         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11736                 tp->link_config.advertising =
11737                         (ADVERTISED_1000baseT_Half |
11738                          ADVERTISED_1000baseT_Full |
11739                          ADVERTISED_Autoneg |
11740                          ADVERTISED_FIBRE);
11741         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11742                 tp->link_config.advertising &=
11743                         ~(ADVERTISED_1000baseT_Half |
11744                           ADVERTISED_1000baseT_Full);
11745
11746         return err;
11747 }
11748
11749 static void __devinit tg3_read_partno(struct tg3 *tp)
11750 {
11751         unsigned char vpd_data[256];
11752         unsigned int i;
11753         u32 magic;
11754
11755         if (tg3_nvram_read_swab(tp, 0x0, &magic))
11756                 goto out_not_found;
11757
11758         if (magic == TG3_EEPROM_MAGIC) {
11759                 for (i = 0; i < 256; i += 4) {
11760                         u32 tmp;
11761
11762                         if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11763                                 goto out_not_found;
11764
11765                         vpd_data[i + 0] = ((tmp >>  0) & 0xff);
11766                         vpd_data[i + 1] = ((tmp >>  8) & 0xff);
11767                         vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11768                         vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11769                 }
11770         } else {
11771                 int vpd_cap;
11772
11773                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11774                 for (i = 0; i < 256; i += 4) {
11775                         u32 tmp, j = 0;
11776                         __le32 v;
11777                         u16 tmp16;
11778
11779                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11780                                               i);
11781                         while (j++ < 100) {
11782                                 pci_read_config_word(tp->pdev, vpd_cap +
11783                                                      PCI_VPD_ADDR, &tmp16);
11784                                 if (tmp16 & 0x8000)
11785                                         break;
11786                                 msleep(1);
11787                         }
11788                         if (!(tmp16 & 0x8000))
11789                                 goto out_not_found;
11790
11791                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11792                                               &tmp);
11793                         v = cpu_to_le32(tmp);
11794                         memcpy(&vpd_data[i], &v, 4);
11795                 }
11796         }
11797
11798         /* Now parse and find the part number. */
11799         for (i = 0; i < 254; ) {
11800                 unsigned char val = vpd_data[i];
11801                 unsigned int block_end;
11802
11803                 if (val == 0x82 || val == 0x91) {
11804                         i = (i + 3 +
11805                              (vpd_data[i + 1] +
11806                               (vpd_data[i + 2] << 8)));
11807                         continue;
11808                 }
11809
11810                 if (val != 0x90)
11811                         goto out_not_found;
11812
11813                 block_end = (i + 3 +
11814                              (vpd_data[i + 1] +
11815                               (vpd_data[i + 2] << 8)));
11816                 i += 3;
11817
11818                 if (block_end > 256)
11819                         goto out_not_found;
11820
11821                 while (i < (block_end - 2)) {
11822                         if (vpd_data[i + 0] == 'P' &&
11823                             vpd_data[i + 1] == 'N') {
11824                                 int partno_len = vpd_data[i + 2];
11825
11826                                 i += 3;
11827                                 if (partno_len > 24 || (partno_len + i) > 256)
11828                                         goto out_not_found;
11829
11830                                 memcpy(tp->board_part_number,
11831                                        &vpd_data[i], partno_len);
11832
11833                                 /* Success. */
11834                                 return;
11835                         }
11836                         i += 3 + vpd_data[i + 2];
11837                 }
11838
11839                 /* Part number not found. */
11840                 goto out_not_found;
11841         }
11842
11843 out_not_found:
11844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11845                 strcpy(tp->board_part_number, "BCM95906");
11846         else
11847                 strcpy(tp->board_part_number, "none");
11848 }
11849
11850 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11851 {
11852         u32 val;
11853
11854         if (tg3_nvram_read_swab(tp, offset, &val) ||
11855             (val & 0xfc000000) != 0x0c000000 ||
11856             tg3_nvram_read_swab(tp, offset + 4, &val) ||
11857             val != 0)
11858                 return 0;
11859
11860         return 1;
11861 }
11862
11863 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11864 {
11865         u32 val, offset, start;
11866         u32 ver_offset;
11867         int i, bcnt;
11868
11869         if (tg3_nvram_read_swab(tp, 0, &val))
11870                 return;
11871
11872         if (val != TG3_EEPROM_MAGIC)
11873                 return;
11874
11875         if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11876             tg3_nvram_read_swab(tp, 0x4, &start))
11877                 return;
11878
11879         offset = tg3_nvram_logical_addr(tp, offset);
11880
11881         if (!tg3_fw_img_is_valid(tp, offset) ||
11882             tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11883                 return;
11884
11885         offset = offset + ver_offset - start;
11886         for (i = 0; i < 16; i += 4) {
11887                 __le32 v;
11888                 if (tg3_nvram_read_le(tp, offset + i, &v))
11889                         return;
11890
11891                 memcpy(tp->fw_ver + i, &v, 4);
11892         }
11893
11894         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11895              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11896                 return;
11897
11898         for (offset = TG3_NVM_DIR_START;
11899              offset < TG3_NVM_DIR_END;
11900              offset += TG3_NVM_DIRENT_SIZE) {
11901                 if (tg3_nvram_read_swab(tp, offset, &val))
11902                         return;
11903
11904                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11905                         break;
11906         }
11907
11908         if (offset == TG3_NVM_DIR_END)
11909                 return;
11910
11911         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11912                 start = 0x08000000;
11913         else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11914                 return;
11915
11916         if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11917             !tg3_fw_img_is_valid(tp, offset) ||
11918             tg3_nvram_read_swab(tp, offset + 8, &val))
11919                 return;
11920
11921         offset += val - start;
11922
11923         bcnt = strlen(tp->fw_ver);
11924
11925         tp->fw_ver[bcnt++] = ',';
11926         tp->fw_ver[bcnt++] = ' ';
11927
11928         for (i = 0; i < 4; i++) {
11929                 __le32 v;
11930                 if (tg3_nvram_read_le(tp, offset, &v))
11931                         return;
11932
11933                 offset += sizeof(v);
11934
11935                 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11936                         memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11937                         break;
11938                 }
11939
11940                 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11941                 bcnt += sizeof(v);
11942         }
11943
11944         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11945 }
11946
11947 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11948
11949 static int __devinit tg3_get_invariants(struct tg3 *tp)
11950 {
11951         static struct pci_device_id write_reorder_chipsets[] = {
11952                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11953                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11954                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11955                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11956                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11957                              PCI_DEVICE_ID_VIA_8385_0) },
11958                 { },
11959         };
11960         u32 misc_ctrl_reg;
11961         u32 cacheline_sz_reg;
11962         u32 pci_state_reg, grc_misc_cfg;
11963         u32 val;
11964         u16 pci_cmd;
11965         int err, pcie_cap;
11966
11967         /* Force memory write invalidate off.  If we leave it on,
11968          * then on 5700_BX chips we have to enable a workaround.
11969          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11970          * to match the cacheline size.  The Broadcom driver have this
11971          * workaround but turns MWI off all the times so never uses
11972          * it.  This seems to suggest that the workaround is insufficient.
11973          */
11974         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11975         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11976         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11977
11978         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11979          * has the register indirect write enable bit set before
11980          * we try to access any of the MMIO registers.  It is also
11981          * critical that the PCI-X hw workaround situation is decided
11982          * before that as well.
11983          */
11984         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11985                               &misc_ctrl_reg);
11986
11987         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11988                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11990                 u32 prod_id_asic_rev;
11991
11992                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11993                                       &prod_id_asic_rev);
11994                 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11995         }
11996
11997         /* Wrong chip ID in 5752 A0. This code can be removed later
11998          * as A0 is not in production.
11999          */
12000         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12001                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12002
12003         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12004          * we need to disable memory and use config. cycles
12005          * only to access all registers. The 5702/03 chips
12006          * can mistakenly decode the special cycles from the
12007          * ICH chipsets as memory write cycles, causing corruption
12008          * of register and memory space. Only certain ICH bridges
12009          * will drive special cycles with non-zero data during the
12010          * address phase which can fall within the 5703's address
12011          * range. This is not an ICH bug as the PCI spec allows
12012          * non-zero address during special cycles. However, only
12013          * these ICH bridges are known to drive non-zero addresses
12014          * during special cycles.
12015          *
12016          * Since special cycles do not cross PCI bridges, we only
12017          * enable this workaround if the 5703 is on the secondary
12018          * bus of these ICH bridges.
12019          */
12020         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12021             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12022                 static struct tg3_dev_id {
12023                         u32     vendor;
12024                         u32     device;
12025                         u32     rev;
12026                 } ich_chipsets[] = {
12027                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12028                           PCI_ANY_ID },
12029                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12030                           PCI_ANY_ID },
12031                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12032                           0xa },
12033                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12034                           PCI_ANY_ID },
12035                         { },
12036                 };
12037                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12038                 struct pci_dev *bridge = NULL;
12039
12040                 while (pci_id->vendor != 0) {
12041                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
12042                                                 bridge);
12043                         if (!bridge) {
12044                                 pci_id++;
12045                                 continue;
12046                         }
12047                         if (pci_id->rev != PCI_ANY_ID) {
12048                                 if (bridge->revision > pci_id->rev)
12049                                         continue;
12050                         }
12051                         if (bridge->subordinate &&
12052                             (bridge->subordinate->number ==
12053                              tp->pdev->bus->number)) {
12054
12055                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12056                                 pci_dev_put(bridge);
12057                                 break;
12058                         }
12059                 }
12060         }
12061
12062         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12063                 static struct tg3_dev_id {
12064                         u32     vendor;
12065                         u32     device;
12066                 } bridge_chipsets[] = {
12067                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12068                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12069                         { },
12070                 };
12071                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12072                 struct pci_dev *bridge = NULL;
12073
12074                 while (pci_id->vendor != 0) {
12075                         bridge = pci_get_device(pci_id->vendor,
12076                                                 pci_id->device,
12077                                                 bridge);
12078                         if (!bridge) {
12079                                 pci_id++;
12080                                 continue;
12081                         }
12082                         if (bridge->subordinate &&
12083                             (bridge->subordinate->number <=
12084                              tp->pdev->bus->number) &&
12085                             (bridge->subordinate->subordinate >=
12086                              tp->pdev->bus->number)) {
12087                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12088                                 pci_dev_put(bridge);
12089                                 break;
12090                         }
12091                 }
12092         }
12093
12094         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12095          * DMA addresses > 40-bit. This bridge may have other additional
12096          * 57xx devices behind it in some 4-port NIC designs for example.
12097          * Any tg3 device found behind the bridge will also need the 40-bit
12098          * DMA workaround.
12099          */
12100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12102                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12103                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12104                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12105         }
12106         else {
12107                 struct pci_dev *bridge = NULL;
12108
12109                 do {
12110                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12111                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
12112                                                 bridge);
12113                         if (bridge && bridge->subordinate &&
12114                             (bridge->subordinate->number <=
12115                              tp->pdev->bus->number) &&
12116                             (bridge->subordinate->subordinate >=
12117                              tp->pdev->bus->number)) {
12118                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12119                                 pci_dev_put(bridge);
12120                                 break;
12121                         }
12122                 } while (bridge);
12123         }
12124
12125         /* Initialize misc host control in PCI block. */
12126         tp->misc_host_ctrl |= (misc_ctrl_reg &
12127                                MISC_HOST_CTRL_CHIPREV);
12128         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12129                                tp->misc_host_ctrl);
12130
12131         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12132                               &cacheline_sz_reg);
12133
12134         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
12135         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
12136         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
12137         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
12138
12139         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12140             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12141                 tp->pdev_peer = tg3_find_peer(tp);
12142
12143         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12147             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12149             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12150             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12151             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12152                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12153
12154         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12155             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12156                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12157
12158         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12159                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12160                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12161                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12162                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12163                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12164                      tp->pdev_peer == tp->pdev))
12165                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12166
12167                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12168                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12169                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12170                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12171                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12172                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12173                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12174                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12175                 } else {
12176                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12177                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12178                                 ASIC_REV_5750 &&
12179                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12180                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12181                 }
12182         }
12183
12184         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12185              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12186                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12187
12188         pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12189         if (pcie_cap != 0) {
12190                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12191
12192                 pcie_set_readrq(tp->pdev, 4096);
12193
12194                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12195                         u16 lnkctl;
12196
12197                         pci_read_config_word(tp->pdev,
12198                                              pcie_cap + PCI_EXP_LNKCTL,
12199                                              &lnkctl);
12200                         if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12201                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12202                 }
12203         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12204                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12205
12206         /* If we have an AMD 762 or VIA K8T800 chipset, write
12207          * reordering to the mailbox registers done by the host
12208          * controller can cause major troubles.  We read back from
12209          * every mailbox register write to force the writes to be
12210          * posted to the chip in order.
12211          */
12212         if (pci_dev_present(write_reorder_chipsets) &&
12213             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12214                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12215
12216         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12217             tp->pci_lat_timer < 64) {
12218                 tp->pci_lat_timer = 64;
12219
12220                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
12221                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
12222                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
12223                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
12224
12225                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12226                                        cacheline_sz_reg);
12227         }
12228
12229         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12230             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12231                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12232                 if (!tp->pcix_cap) {
12233                         printk(KERN_ERR PFX "Cannot find PCI-X "
12234                                             "capability, aborting.\n");
12235                         return -EIO;
12236                 }
12237         }
12238
12239         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12240                               &pci_state_reg);
12241
12242         if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12243                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12244
12245                 /* If this is a 5700 BX chipset, and we are in PCI-X
12246                  * mode, enable register write workaround.
12247                  *
12248                  * The workaround is to use indirect register accesses
12249                  * for all chip writes not to mailbox registers.
12250                  */
12251                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12252                         u32 pm_reg;
12253
12254                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12255
12256                         /* The chip can have it's power management PCI config
12257                          * space registers clobbered due to this bug.
12258                          * So explicitly force the chip into D0 here.
12259                          */
12260                         pci_read_config_dword(tp->pdev,
12261                                               tp->pm_cap + PCI_PM_CTRL,
12262                                               &pm_reg);
12263                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12264                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12265                         pci_write_config_dword(tp->pdev,
12266                                                tp->pm_cap + PCI_PM_CTRL,
12267                                                pm_reg);
12268
12269                         /* Also, force SERR#/PERR# in PCI command. */
12270                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12271                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12272                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12273                 }
12274         }
12275
12276         /* 5700 BX chips need to have their TX producer index mailboxes
12277          * written twice to workaround a bug.
12278          */
12279         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12280                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12281
12282         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12283                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12284         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12285                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12286
12287         /* Chip-specific fixup from Broadcom driver */
12288         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12289             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12290                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12291                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12292         }
12293
12294         /* Default fast path register access methods */
12295         tp->read32 = tg3_read32;
12296         tp->write32 = tg3_write32;
12297         tp->read32_mbox = tg3_read32;
12298         tp->write32_mbox = tg3_write32;
12299         tp->write32_tx_mbox = tg3_write32;
12300         tp->write32_rx_mbox = tg3_write32;
12301
12302         /* Various workaround register access methods */
12303         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12304                 tp->write32 = tg3_write_indirect_reg32;
12305         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12306                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12307                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12308                 /*
12309                  * Back to back register writes can cause problems on these
12310                  * chips, the workaround is to read back all reg writes
12311                  * except those to mailbox regs.
12312                  *
12313                  * See tg3_write_indirect_reg32().
12314                  */
12315                 tp->write32 = tg3_write_flush_reg32;
12316         }
12317
12318
12319         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12320             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12321                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12322                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12323                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12324         }
12325
12326         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12327                 tp->read32 = tg3_read_indirect_reg32;
12328                 tp->write32 = tg3_write_indirect_reg32;
12329                 tp->read32_mbox = tg3_read_indirect_mbox;
12330                 tp->write32_mbox = tg3_write_indirect_mbox;
12331                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12332                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12333
12334                 iounmap(tp->regs);
12335                 tp->regs = NULL;
12336
12337                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12338                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12339                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12340         }
12341         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12342                 tp->read32_mbox = tg3_read32_mbox_5906;
12343                 tp->write32_mbox = tg3_write32_mbox_5906;
12344                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12345                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12346         }
12347
12348         if (tp->write32 == tg3_write_indirect_reg32 ||
12349             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12350              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12351               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12352                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12353
12354         /* Get eeprom hw config before calling tg3_set_power_state().
12355          * In particular, the TG3_FLG2_IS_NIC flag must be
12356          * determined before calling tg3_set_power_state() so that
12357          * we know whether or not to switch out of Vaux power.
12358          * When the flag is set, it means that GPIO1 is used for eeprom
12359          * write protect and also implies that it is a LOM where GPIOs
12360          * are not used to switch power.
12361          */
12362         tg3_get_eeprom_hw_cfg(tp);
12363
12364         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12365                 /* Allow reads and writes to the
12366                  * APE register and memory space.
12367                  */
12368                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12369                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12370                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12371                                        pci_state_reg);
12372         }
12373
12374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12375             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12376             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12377                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12378
12379         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12380          * GPIO1 driven high will bring 5700's external PHY out of reset.
12381          * It is also used as eeprom write protect on LOMs.
12382          */
12383         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12384         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12385             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12386                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12387                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12388         /* Unused GPIO3 must be driven as output on 5752 because there
12389          * are no pull-up resistors on unused GPIO pins.
12390          */
12391         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12392                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12393
12394         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12395                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12396
12397         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12398                 /* Turn off the debug UART. */
12399                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12400                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12401                         /* Keep VMain power. */
12402                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12403                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12404         }
12405
12406         /* Force the chip into D0. */
12407         err = tg3_set_power_state(tp, PCI_D0);
12408         if (err) {
12409                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12410                        pci_name(tp->pdev));
12411                 return err;
12412         }
12413
12414         /* 5700 B0 chips do not support checksumming correctly due
12415          * to hardware bugs.
12416          */
12417         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12418                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12419
12420         /* Derive initial jumbo mode from MTU assigned in
12421          * ether_setup() via the alloc_etherdev() call
12422          */
12423         if (tp->dev->mtu > ETH_DATA_LEN &&
12424             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12425                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12426
12427         /* Determine WakeOnLan speed to use. */
12428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12429             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12430             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12431             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12432                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12433         } else {
12434                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12435         }
12436
12437         /* A few boards don't want Ethernet@WireSpeed phy feature */
12438         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12439             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12440              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12441              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12442             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12443             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12444                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12445
12446         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12447             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12448                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12449         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12450                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12451
12452         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12453                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12454                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12455                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12456                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12457                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12458                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12459                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12460                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12461                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12462                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12463                            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12464                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12465         }
12466
12467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12468             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12469                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12470                 if (tp->phy_otp == 0)
12471                         tp->phy_otp = TG3_OTP_DEFAULT;
12472         }
12473
12474         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12475                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12476         else
12477                 tp->mi_mode = MAC_MI_MODE_BASE;
12478
12479         tp->coalesce_mode = 0;
12480         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12481             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12482                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12483
12484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12485                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12486
12487         err = tg3_mdio_init(tp);
12488         if (err)
12489                 return err;
12490
12491         /* Initialize data/descriptor byte/word swapping. */
12492         val = tr32(GRC_MODE);
12493         val &= GRC_MODE_HOST_STACKUP;
12494         tw32(GRC_MODE, val | tp->grc_mode);
12495
12496         tg3_switch_clocks(tp);
12497
12498         /* Clear this out for sanity. */
12499         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12500
12501         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12502                               &pci_state_reg);
12503         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12504             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12505                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12506
12507                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12508                     chiprevid == CHIPREV_ID_5701_B0 ||
12509                     chiprevid == CHIPREV_ID_5701_B2 ||
12510                     chiprevid == CHIPREV_ID_5701_B5) {
12511                         void __iomem *sram_base;
12512
12513                         /* Write some dummy words into the SRAM status block
12514                          * area, see if it reads back correctly.  If the return
12515                          * value is bad, force enable the PCIX workaround.
12516                          */
12517                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12518
12519                         writel(0x00000000, sram_base);
12520                         writel(0x00000000, sram_base + 4);
12521                         writel(0xffffffff, sram_base + 4);
12522                         if (readl(sram_base) != 0x00000000)
12523                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12524                 }
12525         }
12526
12527         udelay(50);
12528         tg3_nvram_init(tp);
12529
12530         grc_misc_cfg = tr32(GRC_MISC_CFG);
12531         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12532
12533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12534             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12535              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12536                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12537
12538         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12539             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12540                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12541         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12542                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12543                                       HOSTCC_MODE_CLRTICK_TXBD);
12544
12545                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12546                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12547                                        tp->misc_host_ctrl);
12548         }
12549
12550         /* Preserve the APE MAC_MODE bits */
12551         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12552                 tp->mac_mode = tr32(MAC_MODE) |
12553                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12554         else
12555                 tp->mac_mode = TG3_DEF_MAC_MODE;
12556
12557         /* these are limited to 10/100 only */
12558         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12559              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12560             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12561              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12562              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12563               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12564               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12565             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12566              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12567               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12568               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12570                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12571
12572         err = tg3_phy_probe(tp);
12573         if (err) {
12574                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12575                        pci_name(tp->pdev), err);
12576                 /* ... but do not return immediately ... */
12577                 tg3_mdio_fini(tp);
12578         }
12579
12580         tg3_read_partno(tp);
12581         tg3_read_fw_ver(tp);
12582
12583         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12584                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12585         } else {
12586                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12587                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12588                 else
12589                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12590         }
12591
12592         /* 5700 {AX,BX} chips have a broken status block link
12593          * change bit implementation, so we must use the
12594          * status register in those cases.
12595          */
12596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12597                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12598         else
12599                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12600
12601         /* The led_ctrl is set during tg3_phy_probe, here we might
12602          * have to force the link status polling mechanism based
12603          * upon subsystem IDs.
12604          */
12605         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12606             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12607             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12608                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12609                                   TG3_FLAG_USE_LINKCHG_REG);
12610         }
12611
12612         /* For all SERDES we poll the MAC status register. */
12613         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12614                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12615         else
12616                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12617
12618         /* All chips before 5787 can get confused if TX buffers
12619          * straddle the 4GB address boundary in some cases.
12620          */
12621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12622             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12624             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12625             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12626             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12627                 tp->dev->hard_start_xmit = tg3_start_xmit;
12628         else
12629                 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12630
12631         tp->rx_offset = 2;
12632         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12633             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12634                 tp->rx_offset = 0;
12635
12636         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12637
12638         /* Increment the rx prod index on the rx std ring by at most
12639          * 8 for these chips to workaround hw errata.
12640          */
12641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12642             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12643             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12644                 tp->rx_std_max_post = 8;
12645
12646         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12647                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12648                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12649
12650         return err;
12651 }
12652
12653 #ifdef CONFIG_SPARC
12654 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12655 {
12656         struct net_device *dev = tp->dev;
12657         struct pci_dev *pdev = tp->pdev;
12658         struct device_node *dp = pci_device_to_OF_node(pdev);
12659         const unsigned char *addr;
12660         int len;
12661
12662         addr = of_get_property(dp, "local-mac-address", &len);
12663         if (addr && len == 6) {
12664                 memcpy(dev->dev_addr, addr, 6);
12665                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12666                 return 0;
12667         }
12668         return -ENODEV;
12669 }
12670
12671 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12672 {
12673         struct net_device *dev = tp->dev;
12674
12675         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12676         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12677         return 0;
12678 }
12679 #endif
12680
12681 static int __devinit tg3_get_device_address(struct tg3 *tp)
12682 {
12683         struct net_device *dev = tp->dev;
12684         u32 hi, lo, mac_offset;
12685         int addr_ok = 0;
12686
12687 #ifdef CONFIG_SPARC
12688         if (!tg3_get_macaddr_sparc(tp))
12689                 return 0;
12690 #endif
12691
12692         mac_offset = 0x7c;
12693         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12694             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12695                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12696                         mac_offset = 0xcc;
12697                 if (tg3_nvram_lock(tp))
12698                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12699                 else
12700                         tg3_nvram_unlock(tp);
12701         }
12702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12703                 mac_offset = 0x10;
12704
12705         /* First try to get it from MAC address mailbox. */
12706         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12707         if ((hi >> 16) == 0x484b) {
12708                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12709                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12710
12711                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12712                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12713                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12714                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12715                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12716
12717                 /* Some old bootcode may report a 0 MAC address in SRAM */
12718                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12719         }
12720         if (!addr_ok) {
12721                 /* Next, try NVRAM. */
12722                 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12723                     !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12724                         dev->dev_addr[0] = ((hi >> 16) & 0xff);
12725                         dev->dev_addr[1] = ((hi >> 24) & 0xff);
12726                         dev->dev_addr[2] = ((lo >>  0) & 0xff);
12727                         dev->dev_addr[3] = ((lo >>  8) & 0xff);
12728                         dev->dev_addr[4] = ((lo >> 16) & 0xff);
12729                         dev->dev_addr[5] = ((lo >> 24) & 0xff);
12730                 }
12731                 /* Finally just fetch it out of the MAC control regs. */
12732                 else {
12733                         hi = tr32(MAC_ADDR_0_HIGH);
12734                         lo = tr32(MAC_ADDR_0_LOW);
12735
12736                         dev->dev_addr[5] = lo & 0xff;
12737                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12738                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12739                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12740                         dev->dev_addr[1] = hi & 0xff;
12741                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12742                 }
12743         }
12744
12745         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12746 #ifdef CONFIG_SPARC
12747                 if (!tg3_get_default_macaddr_sparc(tp))
12748                         return 0;
12749 #endif
12750                 return -EINVAL;
12751         }
12752         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12753         return 0;
12754 }
12755
12756 #define BOUNDARY_SINGLE_CACHELINE       1
12757 #define BOUNDARY_MULTI_CACHELINE        2
12758
12759 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12760 {
12761         int cacheline_size;
12762         u8 byte;
12763         int goal;
12764
12765         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12766         if (byte == 0)
12767                 cacheline_size = 1024;
12768         else
12769                 cacheline_size = (int) byte * 4;
12770
12771         /* On 5703 and later chips, the boundary bits have no
12772          * effect.
12773          */
12774         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12775             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12776             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12777                 goto out;
12778
12779 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12780         goal = BOUNDARY_MULTI_CACHELINE;
12781 #else
12782 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12783         goal = BOUNDARY_SINGLE_CACHELINE;
12784 #else
12785         goal = 0;
12786 #endif
12787 #endif
12788
12789         if (!goal)
12790                 goto out;
12791
12792         /* PCI controllers on most RISC systems tend to disconnect
12793          * when a device tries to burst across a cache-line boundary.
12794          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12795          *
12796          * Unfortunately, for PCI-E there are only limited
12797          * write-side controls for this, and thus for reads
12798          * we will still get the disconnects.  We'll also waste
12799          * these PCI cycles for both read and write for chips
12800          * other than 5700 and 5701 which do not implement the
12801          * boundary bits.
12802          */
12803         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12804             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12805                 switch (cacheline_size) {
12806                 case 16:
12807                 case 32:
12808                 case 64:
12809                 case 128:
12810                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12811                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12812                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12813                         } else {
12814                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12815                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12816                         }
12817                         break;
12818
12819                 case 256:
12820                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12821                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12822                         break;
12823
12824                 default:
12825                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12826                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12827                         break;
12828                 }
12829         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12830                 switch (cacheline_size) {
12831                 case 16:
12832                 case 32:
12833                 case 64:
12834                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12835                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12836                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12837                                 break;
12838                         }
12839                         /* fallthrough */
12840                 case 128:
12841                 default:
12842                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12843                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12844                         break;
12845                 }
12846         } else {
12847                 switch (cacheline_size) {
12848                 case 16:
12849                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12850                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12851                                         DMA_RWCTRL_WRITE_BNDRY_16);
12852                                 break;
12853                         }
12854                         /* fallthrough */
12855                 case 32:
12856                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12857                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12858                                         DMA_RWCTRL_WRITE_BNDRY_32);
12859                                 break;
12860                         }
12861                         /* fallthrough */
12862                 case 64:
12863                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12864                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12865                                         DMA_RWCTRL_WRITE_BNDRY_64);
12866                                 break;
12867                         }
12868                         /* fallthrough */
12869                 case 128:
12870                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12871                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12872                                         DMA_RWCTRL_WRITE_BNDRY_128);
12873                                 break;
12874                         }
12875                         /* fallthrough */
12876                 case 256:
12877                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12878                                 DMA_RWCTRL_WRITE_BNDRY_256);
12879                         break;
12880                 case 512:
12881                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12882                                 DMA_RWCTRL_WRITE_BNDRY_512);
12883                         break;
12884                 case 1024:
12885                 default:
12886                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12887                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12888                         break;
12889                 }
12890         }
12891
12892 out:
12893         return val;
12894 }
12895
12896 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12897 {
12898         struct tg3_internal_buffer_desc test_desc;
12899         u32 sram_dma_descs;
12900         int i, ret;
12901
12902         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12903
12904         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12905         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12906         tw32(RDMAC_STATUS, 0);
12907         tw32(WDMAC_STATUS, 0);
12908
12909         tw32(BUFMGR_MODE, 0);
12910         tw32(FTQ_RESET, 0);
12911
12912         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12913         test_desc.addr_lo = buf_dma & 0xffffffff;
12914         test_desc.nic_mbuf = 0x00002100;
12915         test_desc.len = size;
12916
12917         /*
12918          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12919          * the *second* time the tg3 driver was getting loaded after an
12920          * initial scan.
12921          *
12922          * Broadcom tells me:
12923          *   ...the DMA engine is connected to the GRC block and a DMA
12924          *   reset may affect the GRC block in some unpredictable way...
12925          *   The behavior of resets to individual blocks has not been tested.
12926          *
12927          * Broadcom noted the GRC reset will also reset all sub-components.
12928          */
12929         if (to_device) {
12930                 test_desc.cqid_sqid = (13 << 8) | 2;
12931
12932                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12933                 udelay(40);
12934         } else {
12935                 test_desc.cqid_sqid = (16 << 8) | 7;
12936
12937                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12938                 udelay(40);
12939         }
12940         test_desc.flags = 0x00000005;
12941
12942         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12943                 u32 val;
12944
12945                 val = *(((u32 *)&test_desc) + i);
12946                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12947                                        sram_dma_descs + (i * sizeof(u32)));
12948                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12949         }
12950         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12951
12952         if (to_device) {
12953                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12954         } else {
12955                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12956         }
12957
12958         ret = -ENODEV;
12959         for (i = 0; i < 40; i++) {
12960                 u32 val;
12961
12962                 if (to_device)
12963                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12964                 else
12965                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12966                 if ((val & 0xffff) == sram_dma_descs) {
12967                         ret = 0;
12968                         break;
12969                 }
12970
12971                 udelay(100);
12972         }
12973
12974         return ret;
12975 }
12976
12977 #define TEST_BUFFER_SIZE        0x2000
12978
12979 static int __devinit tg3_test_dma(struct tg3 *tp)
12980 {
12981         dma_addr_t buf_dma;
12982         u32 *buf, saved_dma_rwctrl;
12983         int ret;
12984
12985         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12986         if (!buf) {
12987                 ret = -ENOMEM;
12988                 goto out_nofree;
12989         }
12990
12991         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12992                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12993
12994         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12995
12996         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12997                 /* DMA read watermark not used on PCIE */
12998                 tp->dma_rwctrl |= 0x00180000;
12999         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13000                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13001                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13002                         tp->dma_rwctrl |= 0x003f0000;
13003                 else
13004                         tp->dma_rwctrl |= 0x003f000f;
13005         } else {
13006                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13007                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13008                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13009                         u32 read_water = 0x7;
13010
13011                         /* If the 5704 is behind the EPB bridge, we can
13012                          * do the less restrictive ONE_DMA workaround for
13013                          * better performance.
13014                          */
13015                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13016                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13017                                 tp->dma_rwctrl |= 0x8000;
13018                         else if (ccval == 0x6 || ccval == 0x7)
13019                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13020
13021                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13022                                 read_water = 4;
13023                         /* Set bit 23 to enable PCIX hw bug fix */
13024                         tp->dma_rwctrl |=
13025                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13026                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13027                                 (1 << 23);
13028                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13029                         /* 5780 always in PCIX mode */
13030                         tp->dma_rwctrl |= 0x00144000;
13031                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13032                         /* 5714 always in PCIX mode */
13033                         tp->dma_rwctrl |= 0x00148000;
13034                 } else {
13035                         tp->dma_rwctrl |= 0x001b000f;
13036                 }
13037         }
13038
13039         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13040             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13041                 tp->dma_rwctrl &= 0xfffffff0;
13042
13043         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13044             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13045                 /* Remove this if it causes problems for some boards. */
13046                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13047
13048                 /* On 5700/5701 chips, we need to set this bit.
13049                  * Otherwise the chip will issue cacheline transactions
13050                  * to streamable DMA memory with not all the byte
13051                  * enables turned on.  This is an error on several
13052                  * RISC PCI controllers, in particular sparc64.
13053                  *
13054                  * On 5703/5704 chips, this bit has been reassigned
13055                  * a different meaning.  In particular, it is used
13056                  * on those chips to enable a PCI-X workaround.
13057                  */
13058                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13059         }
13060
13061         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13062
13063 #if 0
13064         /* Unneeded, already done by tg3_get_invariants.  */
13065         tg3_switch_clocks(tp);
13066 #endif
13067
13068         ret = 0;
13069         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13070             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13071                 goto out;
13072
13073         /* It is best to perform DMA test with maximum write burst size
13074          * to expose the 5700/5701 write DMA bug.
13075          */
13076         saved_dma_rwctrl = tp->dma_rwctrl;
13077         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13078         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13079
13080         while (1) {
13081                 u32 *p = buf, i;
13082
13083                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13084                         p[i] = i;
13085
13086                 /* Send the buffer to the chip. */
13087                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13088                 if (ret) {
13089                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13090                         break;
13091                 }
13092
13093 #if 0
13094                 /* validate data reached card RAM correctly. */
13095                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13096                         u32 val;
13097                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
13098                         if (le32_to_cpu(val) != p[i]) {
13099                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
13100                                 /* ret = -ENODEV here? */
13101                         }
13102                         p[i] = 0;
13103                 }
13104 #endif
13105                 /* Now read it back. */
13106                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13107                 if (ret) {
13108                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13109
13110                         break;
13111                 }
13112
13113                 /* Verify it. */
13114                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13115                         if (p[i] == i)
13116                                 continue;
13117
13118                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13119                             DMA_RWCTRL_WRITE_BNDRY_16) {
13120                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13121                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13122                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13123                                 break;
13124                         } else {
13125                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13126                                 ret = -ENODEV;
13127                                 goto out;
13128                         }
13129                 }
13130
13131                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13132                         /* Success. */
13133                         ret = 0;
13134                         break;
13135                 }
13136         }
13137         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13138             DMA_RWCTRL_WRITE_BNDRY_16) {
13139                 static struct pci_device_id dma_wait_state_chipsets[] = {
13140                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13141                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13142                         { },
13143                 };
13144
13145                 /* DMA test passed without adjusting DMA boundary,
13146                  * now look for chipsets that are known to expose the
13147                  * DMA bug without failing the test.
13148                  */
13149                 if (pci_dev_present(dma_wait_state_chipsets)) {
13150                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13151                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13152                 }
13153                 else
13154                         /* Safe to use the calculated DMA boundary. */
13155                         tp->dma_rwctrl = saved_dma_rwctrl;
13156
13157                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13158         }
13159
13160 out:
13161         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13162 out_nofree:
13163         return ret;
13164 }
13165
13166 static void __devinit tg3_init_link_config(struct tg3 *tp)
13167 {
13168         tp->link_config.advertising =
13169                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13170                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13171                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13172                  ADVERTISED_Autoneg | ADVERTISED_MII);
13173         tp->link_config.speed = SPEED_INVALID;
13174         tp->link_config.duplex = DUPLEX_INVALID;
13175         tp->link_config.autoneg = AUTONEG_ENABLE;
13176         tp->link_config.active_speed = SPEED_INVALID;
13177         tp->link_config.active_duplex = DUPLEX_INVALID;
13178         tp->link_config.phy_is_low_power = 0;
13179         tp->link_config.orig_speed = SPEED_INVALID;
13180         tp->link_config.orig_duplex = DUPLEX_INVALID;
13181         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13182 }
13183
13184 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13185 {
13186         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13187                 tp->bufmgr_config.mbuf_read_dma_low_water =
13188                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13189                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13190                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13191                 tp->bufmgr_config.mbuf_high_water =
13192                         DEFAULT_MB_HIGH_WATER_5705;
13193                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13194                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13195                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13196                         tp->bufmgr_config.mbuf_high_water =
13197                                 DEFAULT_MB_HIGH_WATER_5906;
13198                 }
13199
13200                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13201                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13202                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13203                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13204                 tp->bufmgr_config.mbuf_high_water_jumbo =
13205                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13206         } else {
13207                 tp->bufmgr_config.mbuf_read_dma_low_water =
13208                         DEFAULT_MB_RDMA_LOW_WATER;
13209                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13210                         DEFAULT_MB_MACRX_LOW_WATER;
13211                 tp->bufmgr_config.mbuf_high_water =
13212                         DEFAULT_MB_HIGH_WATER;
13213
13214                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13215                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13216                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13217                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13218                 tp->bufmgr_config.mbuf_high_water_jumbo =
13219                         DEFAULT_MB_HIGH_WATER_JUMBO;
13220         }
13221
13222         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13223         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13224 }
13225
13226 static char * __devinit tg3_phy_string(struct tg3 *tp)
13227 {
13228         switch (tp->phy_id & PHY_ID_MASK) {
13229         case PHY_ID_BCM5400:    return "5400";
13230         case PHY_ID_BCM5401:    return "5401";
13231         case PHY_ID_BCM5411:    return "5411";
13232         case PHY_ID_BCM5701:    return "5701";
13233         case PHY_ID_BCM5703:    return "5703";
13234         case PHY_ID_BCM5704:    return "5704";
13235         case PHY_ID_BCM5705:    return "5705";
13236         case PHY_ID_BCM5750:    return "5750";
13237         case PHY_ID_BCM5752:    return "5752";
13238         case PHY_ID_BCM5714:    return "5714";
13239         case PHY_ID_BCM5780:    return "5780";
13240         case PHY_ID_BCM5755:    return "5755";
13241         case PHY_ID_BCM5787:    return "5787";
13242         case PHY_ID_BCM5784:    return "5784";
13243         case PHY_ID_BCM5756:    return "5722/5756";
13244         case PHY_ID_BCM5906:    return "5906";
13245         case PHY_ID_BCM5761:    return "5761";
13246         case PHY_ID_BCM8002:    return "8002/serdes";
13247         case 0:                 return "serdes";
13248         default:                return "unknown";
13249         }
13250 }
13251
13252 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13253 {
13254         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13255                 strcpy(str, "PCI Express");
13256                 return str;
13257         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13258                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13259
13260                 strcpy(str, "PCIX:");
13261
13262                 if ((clock_ctrl == 7) ||
13263                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13264                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13265                         strcat(str, "133MHz");
13266                 else if (clock_ctrl == 0)
13267                         strcat(str, "33MHz");
13268                 else if (clock_ctrl == 2)
13269                         strcat(str, "50MHz");
13270                 else if (clock_ctrl == 4)
13271                         strcat(str, "66MHz");
13272                 else if (clock_ctrl == 6)
13273                         strcat(str, "100MHz");
13274         } else {
13275                 strcpy(str, "PCI:");
13276                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13277                         strcat(str, "66MHz");
13278                 else
13279                         strcat(str, "33MHz");
13280         }
13281         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13282                 strcat(str, ":32-bit");
13283         else
13284                 strcat(str, ":64-bit");
13285         return str;
13286 }
13287
13288 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13289 {
13290         struct pci_dev *peer;
13291         unsigned int func, devnr = tp->pdev->devfn & ~7;
13292
13293         for (func = 0; func < 8; func++) {
13294                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13295                 if (peer && peer != tp->pdev)
13296                         break;
13297                 pci_dev_put(peer);
13298         }
13299         /* 5704 can be configured in single-port mode, set peer to
13300          * tp->pdev in that case.
13301          */
13302         if (!peer) {
13303                 peer = tp->pdev;
13304                 return peer;
13305         }
13306
13307         /*
13308          * We don't need to keep the refcount elevated; there's no way
13309          * to remove one half of this device without removing the other
13310          */
13311         pci_dev_put(peer);
13312
13313         return peer;
13314 }
13315
13316 static void __devinit tg3_init_coal(struct tg3 *tp)
13317 {
13318         struct ethtool_coalesce *ec = &tp->coal;
13319
13320         memset(ec, 0, sizeof(*ec));
13321         ec->cmd = ETHTOOL_GCOALESCE;
13322         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13323         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13324         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13325         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13326         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13327         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13328         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13329         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13330         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13331
13332         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13333                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13334                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13335                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13336                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13337                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13338         }
13339
13340         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13341                 ec->rx_coalesce_usecs_irq = 0;
13342                 ec->tx_coalesce_usecs_irq = 0;
13343                 ec->stats_block_coalesce_usecs = 0;
13344         }
13345 }
13346
13347 static int __devinit tg3_init_one(struct pci_dev *pdev,
13348                                   const struct pci_device_id *ent)
13349 {
13350         static int tg3_version_printed = 0;
13351         resource_size_t tg3reg_len;
13352         struct net_device *dev;
13353         struct tg3 *tp;
13354         int err, pm_cap;
13355         char str[40];
13356         u64 dma_mask, persist_dma_mask;
13357
13358         if (tg3_version_printed++ == 0)
13359                 printk(KERN_INFO "%s", version);
13360
13361         err = pci_enable_device(pdev);
13362         if (err) {
13363                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13364                        "aborting.\n");
13365                 return err;
13366         }
13367
13368         if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13369                 printk(KERN_ERR PFX "Cannot find proper PCI device "
13370                        "base address, aborting.\n");
13371                 err = -ENODEV;
13372                 goto err_out_disable_pdev;
13373         }
13374
13375         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13376         if (err) {
13377                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13378                        "aborting.\n");
13379                 goto err_out_disable_pdev;
13380         }
13381
13382         pci_set_master(pdev);
13383
13384         /* Find power-management capability. */
13385         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13386         if (pm_cap == 0) {
13387                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13388                        "aborting.\n");
13389                 err = -EIO;
13390                 goto err_out_free_res;
13391         }
13392
13393         dev = alloc_etherdev(sizeof(*tp));
13394         if (!dev) {
13395                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13396                 err = -ENOMEM;
13397                 goto err_out_free_res;
13398         }
13399
13400         SET_NETDEV_DEV(dev, &pdev->dev);
13401
13402 #if TG3_VLAN_TAG_USED
13403         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13404         dev->vlan_rx_register = tg3_vlan_rx_register;
13405 #endif
13406
13407         tp = netdev_priv(dev);
13408         tp->pdev = pdev;
13409         tp->dev = dev;
13410         tp->pm_cap = pm_cap;
13411         tp->rx_mode = TG3_DEF_RX_MODE;
13412         tp->tx_mode = TG3_DEF_TX_MODE;
13413
13414         if (tg3_debug > 0)
13415                 tp->msg_enable = tg3_debug;
13416         else
13417                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13418
13419         /* The word/byte swap controls here control register access byte
13420          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13421          * setting below.
13422          */
13423         tp->misc_host_ctrl =
13424                 MISC_HOST_CTRL_MASK_PCI_INT |
13425                 MISC_HOST_CTRL_WORD_SWAP |
13426                 MISC_HOST_CTRL_INDIR_ACCESS |
13427                 MISC_HOST_CTRL_PCISTATE_RW;
13428
13429         /* The NONFRM (non-frame) byte/word swap controls take effect
13430          * on descriptor entries, anything which isn't packet data.
13431          *
13432          * The StrongARM chips on the board (one for tx, one for rx)
13433          * are running in big-endian mode.
13434          */
13435         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13436                         GRC_MODE_WSWAP_NONFRM_DATA);
13437 #ifdef __BIG_ENDIAN
13438         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13439 #endif
13440         spin_lock_init(&tp->lock);
13441         spin_lock_init(&tp->indirect_lock);
13442         INIT_WORK(&tp->reset_task, tg3_reset_task);
13443
13444         dev->mem_start = pci_resource_start(pdev, BAR_0);
13445         tg3reg_len = pci_resource_len(pdev, BAR_0);
13446         dev->mem_end = dev->mem_start + tg3reg_len;
13447
13448         tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13449         if (!tp->regs) {
13450                 printk(KERN_ERR PFX "Cannot map device registers, "
13451                        "aborting.\n");
13452                 err = -ENOMEM;
13453                 goto err_out_free_dev;
13454         }
13455
13456         tg3_init_link_config(tp);
13457
13458         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13459         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13460         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13461
13462         dev->open = tg3_open;
13463         dev->stop = tg3_close;
13464         dev->get_stats = tg3_get_stats;
13465         dev->set_multicast_list = tg3_set_rx_mode;
13466         dev->set_mac_address = tg3_set_mac_addr;
13467         dev->do_ioctl = tg3_ioctl;
13468         dev->tx_timeout = tg3_tx_timeout;
13469         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13470         dev->ethtool_ops = &tg3_ethtool_ops;
13471         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13472         dev->change_mtu = tg3_change_mtu;
13473         dev->irq = pdev->irq;
13474 #ifdef CONFIG_NET_POLL_CONTROLLER
13475         dev->poll_controller = tg3_poll_controller;
13476 #endif
13477
13478         err = tg3_get_invariants(tp);
13479         if (err) {
13480                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13481                        "aborting.\n");
13482                 goto err_out_iounmap;
13483         }
13484
13485         /* The EPB bridge inside 5714, 5715, and 5780 and any
13486          * device behind the EPB cannot support DMA addresses > 40-bit.
13487          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13488          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13489          * do DMA address check in tg3_start_xmit().
13490          */
13491         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13492                 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13493         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13494                 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13495 #ifdef CONFIG_HIGHMEM
13496                 dma_mask = DMA_64BIT_MASK;
13497 #endif
13498         } else
13499                 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13500
13501         /* Configure DMA attributes. */
13502         if (dma_mask > DMA_32BIT_MASK) {
13503                 err = pci_set_dma_mask(pdev, dma_mask);
13504                 if (!err) {
13505                         dev->features |= NETIF_F_HIGHDMA;
13506                         err = pci_set_consistent_dma_mask(pdev,
13507                                                           persist_dma_mask);
13508                         if (err < 0) {
13509                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13510                                        "DMA for consistent allocations\n");
13511                                 goto err_out_iounmap;
13512                         }
13513                 }
13514         }
13515         if (err || dma_mask == DMA_32BIT_MASK) {
13516                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13517                 if (err) {
13518                         printk(KERN_ERR PFX "No usable DMA configuration, "
13519                                "aborting.\n");
13520                         goto err_out_iounmap;
13521                 }
13522         }
13523
13524         tg3_init_bufmgr_config(tp);
13525
13526         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13527                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13528         }
13529         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13530             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13531             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13532             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13533             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13534                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13535         } else {
13536                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13537         }
13538
13539         /* TSO is on by default on chips that support hardware TSO.
13540          * Firmware TSO on older chips gives lower performance, so it
13541          * is off by default, but can be enabled using ethtool.
13542          */
13543         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13544                 dev->features |= NETIF_F_TSO;
13545                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13546                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13547                         dev->features |= NETIF_F_TSO6;
13548                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13549                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13550                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13551                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13552                         dev->features |= NETIF_F_TSO_ECN;
13553         }
13554
13555
13556         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13557             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13558             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13559                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13560                 tp->rx_pending = 63;
13561         }
13562
13563         err = tg3_get_device_address(tp);
13564         if (err) {
13565                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13566                        "aborting.\n");
13567                 goto err_out_iounmap;
13568         }
13569
13570         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13571                 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13572                         printk(KERN_ERR PFX "Cannot find proper PCI device "
13573                                "base address for APE, aborting.\n");
13574                         err = -ENODEV;
13575                         goto err_out_iounmap;
13576                 }
13577
13578                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13579                 if (!tp->aperegs) {
13580                         printk(KERN_ERR PFX "Cannot map APE registers, "
13581                                "aborting.\n");
13582                         err = -ENOMEM;
13583                         goto err_out_iounmap;
13584                 }
13585
13586                 tg3_ape_lock_init(tp);
13587         }
13588
13589         /*
13590          * Reset chip in case UNDI or EFI driver did not shutdown
13591          * DMA self test will enable WDMAC and we'll see (spurious)
13592          * pending DMA on the PCI bus at that point.
13593          */
13594         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13595             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13596                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13597                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13598         }
13599
13600         err = tg3_test_dma(tp);
13601         if (err) {
13602                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13603                 goto err_out_apeunmap;
13604         }
13605
13606         /* Tigon3 can do ipv4 only... and some chips have buggy
13607          * checksumming.
13608          */
13609         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13610                 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13611                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13612                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13613                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13614                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13615                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13616                         dev->features |= NETIF_F_IPV6_CSUM;
13617
13618                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13619         } else
13620                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13621
13622         /* flow control autonegotiation is default behavior */
13623         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13624         tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13625
13626         tg3_init_coal(tp);
13627
13628         pci_set_drvdata(pdev, dev);
13629
13630         err = register_netdev(dev);
13631         if (err) {
13632                 printk(KERN_ERR PFX "Cannot register net device, "
13633                        "aborting.\n");
13634                 goto err_out_apeunmap;
13635         }
13636
13637         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13638                dev->name,
13639                tp->board_part_number,
13640                tp->pci_chip_rev_id,
13641                tg3_bus_string(tp, str),
13642                dev->dev_addr);
13643
13644         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13645                 printk(KERN_INFO
13646                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13647                        tp->dev->name,
13648                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13649                        tp->mdio_bus->phy_map[PHY_ADDR]->dev.bus_id);
13650         else
13651                 printk(KERN_INFO
13652                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13653                        tp->dev->name, tg3_phy_string(tp),
13654                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13655                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13656                          "10/100/1000Base-T")),
13657                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13658
13659         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13660                dev->name,
13661                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13662                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13663                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13664                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13665                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13666         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13667                dev->name, tp->dma_rwctrl,
13668                (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13669                 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13670
13671         return 0;
13672
13673 err_out_apeunmap:
13674         if (tp->aperegs) {
13675                 iounmap(tp->aperegs);
13676                 tp->aperegs = NULL;
13677         }
13678
13679 err_out_iounmap:
13680         if (tp->regs) {
13681                 iounmap(tp->regs);
13682                 tp->regs = NULL;
13683         }
13684
13685 err_out_free_dev:
13686         free_netdev(dev);
13687
13688 err_out_free_res:
13689         pci_release_regions(pdev);
13690
13691 err_out_disable_pdev:
13692         pci_disable_device(pdev);
13693         pci_set_drvdata(pdev, NULL);
13694         return err;
13695 }
13696
13697 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13698 {
13699         struct net_device *dev = pci_get_drvdata(pdev);
13700
13701         if (dev) {
13702                 struct tg3 *tp = netdev_priv(dev);
13703
13704                 flush_scheduled_work();
13705
13706                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13707                         tg3_phy_fini(tp);
13708                         tg3_mdio_fini(tp);
13709                 }
13710
13711                 unregister_netdev(dev);
13712                 if (tp->aperegs) {
13713                         iounmap(tp->aperegs);
13714                         tp->aperegs = NULL;
13715                 }
13716                 if (tp->regs) {
13717                         iounmap(tp->regs);
13718                         tp->regs = NULL;
13719                 }
13720                 free_netdev(dev);
13721                 pci_release_regions(pdev);
13722                 pci_disable_device(pdev);
13723                 pci_set_drvdata(pdev, NULL);
13724         }
13725 }
13726
13727 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13728 {
13729         struct net_device *dev = pci_get_drvdata(pdev);
13730         struct tg3 *tp = netdev_priv(dev);
13731         pci_power_t target_state;
13732         int err;
13733
13734         /* PCI register 4 needs to be saved whether netif_running() or not.
13735          * MSI address and data need to be saved if using MSI and
13736          * netif_running().
13737          */
13738         pci_save_state(pdev);
13739
13740         if (!netif_running(dev))
13741                 return 0;
13742
13743         flush_scheduled_work();
13744         tg3_phy_stop(tp);
13745         tg3_netif_stop(tp);
13746
13747         del_timer_sync(&tp->timer);
13748
13749         tg3_full_lock(tp, 1);
13750         tg3_disable_ints(tp);
13751         tg3_full_unlock(tp);
13752
13753         netif_device_detach(dev);
13754
13755         tg3_full_lock(tp, 0);
13756         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13757         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13758         tg3_full_unlock(tp);
13759
13760         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13761
13762         err = tg3_set_power_state(tp, target_state);
13763         if (err) {
13764                 int err2;
13765
13766                 tg3_full_lock(tp, 0);
13767
13768                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13769                 err2 = tg3_restart_hw(tp, 1);
13770                 if (err2)
13771                         goto out;
13772
13773                 tp->timer.expires = jiffies + tp->timer_offset;
13774                 add_timer(&tp->timer);
13775
13776                 netif_device_attach(dev);
13777                 tg3_netif_start(tp);
13778
13779 out:
13780                 tg3_full_unlock(tp);
13781
13782                 if (!err2)
13783                         tg3_phy_start(tp);
13784         }
13785
13786         return err;
13787 }
13788
13789 static int tg3_resume(struct pci_dev *pdev)
13790 {
13791         struct net_device *dev = pci_get_drvdata(pdev);
13792         struct tg3 *tp = netdev_priv(dev);
13793         int err;
13794
13795         pci_restore_state(tp->pdev);
13796
13797         if (!netif_running(dev))
13798                 return 0;
13799
13800         err = tg3_set_power_state(tp, PCI_D0);
13801         if (err)
13802                 return err;
13803
13804         netif_device_attach(dev);
13805
13806         tg3_full_lock(tp, 0);
13807
13808         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13809         err = tg3_restart_hw(tp, 1);
13810         if (err)
13811                 goto out;
13812
13813         tp->timer.expires = jiffies + tp->timer_offset;
13814         add_timer(&tp->timer);
13815
13816         tg3_netif_start(tp);
13817
13818 out:
13819         tg3_full_unlock(tp);
13820
13821         if (!err)
13822                 tg3_phy_start(tp);
13823
13824         return err;
13825 }
13826
13827 static struct pci_driver tg3_driver = {
13828         .name           = DRV_MODULE_NAME,
13829         .id_table       = tg3_pci_tbl,
13830         .probe          = tg3_init_one,
13831         .remove         = __devexit_p(tg3_remove_one),
13832         .suspend        = tg3_suspend,
13833         .resume         = tg3_resume
13834 };
13835
13836 static int __init tg3_init(void)
13837 {
13838         return pci_register_driver(&tg3_driver);
13839 }
13840
13841 static void __exit tg3_cleanup(void)
13842 {
13843         pci_unregister_driver(&tg3_driver);
13844 }
13845
13846 module_init(tg3_init);
13847 module_exit(tg3_cleanup);