8483664b9ceee72874c841a9aa7e6911ad4138ca
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 /* length of time before we decide the hardware is borked,
111  * and dev->tx_timeout() should be called to fix the problem
112  */
113
114 #define TG3_TX_TIMEOUT                  (5 * HZ)
115
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU                     60
118 #define TG3_MAX_MTU(tp) \
119         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122  * You can't change the ring sizes, but you can change where you place
123  * them in the NIC onboard memory.
124  */
125 #define TG3_RX_STD_RING_SIZE(tp) \
126         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING         200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
133 #define TG3_RSS_INDIR_TBL_SIZE          128
134
135 /* Do not place this n-ring entries value into the tp struct itself,
136  * we really want to expose these constants to GCC so that modulo et
137  * al.  operations are done with shifts and masks instead of with
138  * hw multiply/modulo instructions.  Another solution would be to
139  * replace things like '% foo' with '& (foo - 1)'.
140  */
141
142 #define TG3_TX_RING_SIZE                512
143 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
144
145 #define TG3_RX_STD_RING_BYTES(tp) \
146         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
152                                  TG3_TX_RING_SIZE)
153 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154
155 #define TG3_DMA_BYTE_ENAB               64
156
157 #define TG3_RX_STD_DMA_SZ               1536
158 #define TG3_RX_JMB_DMA_SZ               9046
159
160 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
161
162 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172  * that are at least dword aligned when used in PCIX mode.  The driver
173  * works around this bug by double copying the packet.  This workaround
174  * is built into the normal double copy length check for efficiency.
175  *
176  * However, the double copy is only necessary on those architectures
177  * where unaligned memory accesses are inefficient.  For those architectures
178  * where unaligned memory accesses incur little penalty, we can reintegrate
179  * the 5701 in the normal rx path.  Doing so saves a device structure
180  * dereference by hardcoding the double copy threshold in place.
181  */
182 #define TG3_RX_COPY_THRESHOLD           256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
185 #else
186         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
187 #endif
188
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
191
192 #define TG3_RAW_IP_ALIGN 2
193
194 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
195
196 #define FIRMWARE_TG3            "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
199
200 static char version[] __devinitdata =
201         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202
203 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION);
207 MODULE_FIRMWARE(FIRMWARE_TG3);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210
211 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug, int, 0);
213 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
295         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
296         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
297         {}
298 };
299
300 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_stats_keys[] = {
305         { "rx_octets" },
306         { "rx_fragments" },
307         { "rx_ucast_packets" },
308         { "rx_mcast_packets" },
309         { "rx_bcast_packets" },
310         { "rx_fcs_errors" },
311         { "rx_align_errors" },
312         { "rx_xon_pause_rcvd" },
313         { "rx_xoff_pause_rcvd" },
314         { "rx_mac_ctrl_rcvd" },
315         { "rx_xoff_entered" },
316         { "rx_frame_too_long_errors" },
317         { "rx_jabbers" },
318         { "rx_undersize_packets" },
319         { "rx_in_length_errors" },
320         { "rx_out_length_errors" },
321         { "rx_64_or_less_octet_packets" },
322         { "rx_65_to_127_octet_packets" },
323         { "rx_128_to_255_octet_packets" },
324         { "rx_256_to_511_octet_packets" },
325         { "rx_512_to_1023_octet_packets" },
326         { "rx_1024_to_1522_octet_packets" },
327         { "rx_1523_to_2047_octet_packets" },
328         { "rx_2048_to_4095_octet_packets" },
329         { "rx_4096_to_8191_octet_packets" },
330         { "rx_8192_to_9022_octet_packets" },
331
332         { "tx_octets" },
333         { "tx_collisions" },
334
335         { "tx_xon_sent" },
336         { "tx_xoff_sent" },
337         { "tx_flow_control" },
338         { "tx_mac_errors" },
339         { "tx_single_collisions" },
340         { "tx_mult_collisions" },
341         { "tx_deferred" },
342         { "tx_excessive_collisions" },
343         { "tx_late_collisions" },
344         { "tx_collide_2times" },
345         { "tx_collide_3times" },
346         { "tx_collide_4times" },
347         { "tx_collide_5times" },
348         { "tx_collide_6times" },
349         { "tx_collide_7times" },
350         { "tx_collide_8times" },
351         { "tx_collide_9times" },
352         { "tx_collide_10times" },
353         { "tx_collide_11times" },
354         { "tx_collide_12times" },
355         { "tx_collide_13times" },
356         { "tx_collide_14times" },
357         { "tx_collide_15times" },
358         { "tx_ucast_packets" },
359         { "tx_mcast_packets" },
360         { "tx_bcast_packets" },
361         { "tx_carrier_sense_errors" },
362         { "tx_discards" },
363         { "tx_errors" },
364
365         { "dma_writeq_full" },
366         { "dma_write_prioq_full" },
367         { "rxbds_empty" },
368         { "rx_discards" },
369         { "rx_errors" },
370         { "rx_threshold_hit" },
371
372         { "dma_readq_full" },
373         { "dma_read_prioq_full" },
374         { "tx_comp_queue_full" },
375
376         { "ring_set_send_prod_index" },
377         { "ring_status_update" },
378         { "nic_irqs" },
379         { "nic_avoided_irqs" },
380         { "nic_tx_threshold_hit" },
381
382         { "mbuf_lwm_thresh_hit" },
383 };
384
385 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
386
387
388 static const struct {
389         const char string[ETH_GSTRING_LEN];
390 } ethtool_test_keys[] = {
391         { "nvram test     (online) " },
392         { "link test      (online) " },
393         { "register test  (offline)" },
394         { "memory test    (offline)" },
395         { "loopback test  (offline)" },
396         { "interrupt test (offline)" },
397 };
398
399 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
400
401
402 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
403 {
404         writel(val, tp->regs + off);
405 }
406
407 static u32 tg3_read32(struct tg3 *tp, u32 off)
408 {
409         return readl(tp->regs + off);
410 }
411
412 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
413 {
414         writel(val, tp->aperegs + off);
415 }
416
417 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
418 {
419         return readl(tp->aperegs + off);
420 }
421
422 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
423 {
424         unsigned long flags;
425
426         spin_lock_irqsave(&tp->indirect_lock, flags);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
428         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
429         spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 }
431
432 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
433 {
434         writel(val, tp->regs + off);
435         readl(tp->regs + off);
436 }
437
438 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
439 {
440         unsigned long flags;
441         u32 val;
442
443         spin_lock_irqsave(&tp->indirect_lock, flags);
444         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
446         spin_unlock_irqrestore(&tp->indirect_lock, flags);
447         return val;
448 }
449
450 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
451 {
452         unsigned long flags;
453
454         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
455                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
456                                        TG3_64BIT_REG_LOW, val);
457                 return;
458         }
459         if (off == TG3_RX_STD_PROD_IDX_REG) {
460                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
461                                        TG3_64BIT_REG_LOW, val);
462                 return;
463         }
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
468         spin_unlock_irqrestore(&tp->indirect_lock, flags);
469
470         /* In indirect mode when disabling interrupts, we also need
471          * to clear the interrupt bit in the GRC local ctrl register.
472          */
473         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
474             (val == 0x1)) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
476                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477         }
478 }
479
480 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
481 {
482         unsigned long flags;
483         u32 val;
484
485         spin_lock_irqsave(&tp->indirect_lock, flags);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
487         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
488         spin_unlock_irqrestore(&tp->indirect_lock, flags);
489         return val;
490 }
491
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493  * where it is unsafe to read back the register without some delay.
494  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
496  */
497 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
498 {
499         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
500                 /* Non-posted methods */
501                 tp->write32(tp, off, val);
502         else {
503                 /* Posted method */
504                 tg3_write32(tp, off, val);
505                 if (usec_wait)
506                         udelay(usec_wait);
507                 tp->read32(tp, off);
508         }
509         /* Wait again after the read for the posted method to guarantee that
510          * the wait time is met.
511          */
512         if (usec_wait)
513                 udelay(usec_wait);
514 }
515
516 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
517 {
518         tp->write32_mbox(tp, off, val);
519         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
520                 tp->read32_mbox(tp, off);
521 }
522
523 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
524 {
525         void __iomem *mbox = tp->regs + off;
526         writel(val, mbox);
527         if (tg3_flag(tp, TXD_MBOX_HWBUG))
528                 writel(val, mbox);
529         if (tg3_flag(tp, MBOX_WRITE_REORDER))
530                 readl(mbox);
531 }
532
533 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
534 {
535         return readl(tp->regs + off + GRCMBOX_BASE);
536 }
537
538 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
539 {
540         writel(val, tp->regs + off + GRCMBOX_BASE);
541 }
542
543 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
548
549 #define tw32(reg, val)                  tp->write32(tp, reg, val)
550 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg)                       tp->read32(tp, reg)
553
554 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
555 {
556         unsigned long flags;
557
558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
559             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
560                 return;
561
562         spin_lock_irqsave(&tp->indirect_lock, flags);
563         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566
567                 /* Always leave this as zero. */
568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
569         } else {
570                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
572
573                 /* Always leave this as zero. */
574                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575         }
576         spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 }
578
579 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
580 {
581         unsigned long flags;
582
583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
584             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
585                 *val = 0;
586                 return;
587         }
588
589         spin_lock_irqsave(&tp->indirect_lock, flags);
590         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
591                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
592                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
593
594                 /* Always leave this as zero. */
595                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
596         } else {
597                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
598                 *val = tr32(TG3PCI_MEM_WIN_DATA);
599
600                 /* Always leave this as zero. */
601                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
602         }
603         spin_unlock_irqrestore(&tp->indirect_lock, flags);
604 }
605
606 static void tg3_ape_lock_init(struct tg3 *tp)
607 {
608         int i;
609         u32 regbase;
610
611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
612                 regbase = TG3_APE_LOCK_GRANT;
613         else
614                 regbase = TG3_APE_PER_LOCK_GRANT;
615
616         /* Make sure the driver hasn't any stale locks. */
617         for (i = 0; i < 8; i++)
618                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
619 }
620
621 static int tg3_ape_lock(struct tg3 *tp, int locknum)
622 {
623         int i, off;
624         int ret = 0;
625         u32 status, req, gnt;
626
627         if (!tg3_flag(tp, ENABLE_APE))
628                 return 0;
629
630         switch (locknum) {
631         case TG3_APE_LOCK_GRC:
632         case TG3_APE_LOCK_MEM:
633                 break;
634         default:
635                 return -EINVAL;
636         }
637
638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
639                 req = TG3_APE_LOCK_REQ;
640                 gnt = TG3_APE_LOCK_GRANT;
641         } else {
642                 req = TG3_APE_PER_LOCK_REQ;
643                 gnt = TG3_APE_PER_LOCK_GRANT;
644         }
645
646         off = 4 * locknum;
647
648         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
649
650         /* Wait for up to 1 millisecond to acquire lock. */
651         for (i = 0; i < 100; i++) {
652                 status = tg3_ape_read32(tp, gnt + off);
653                 if (status == APE_LOCK_GRANT_DRIVER)
654                         break;
655                 udelay(10);
656         }
657
658         if (status != APE_LOCK_GRANT_DRIVER) {
659                 /* Revoke the lock request. */
660                 tg3_ape_write32(tp, gnt + off,
661                                 APE_LOCK_GRANT_DRIVER);
662
663                 ret = -EBUSY;
664         }
665
666         return ret;
667 }
668
669 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
670 {
671         u32 gnt;
672
673         if (!tg3_flag(tp, ENABLE_APE))
674                 return;
675
676         switch (locknum) {
677         case TG3_APE_LOCK_GRC:
678         case TG3_APE_LOCK_MEM:
679                 break;
680         default:
681                 return;
682         }
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
685                 gnt = TG3_APE_LOCK_GRANT;
686         else
687                 gnt = TG3_APE_PER_LOCK_GRANT;
688
689         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
690 }
691
692 static void tg3_disable_ints(struct tg3 *tp)
693 {
694         int i;
695
696         tw32(TG3PCI_MISC_HOST_CTRL,
697              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
698         for (i = 0; i < tp->irq_max; i++)
699                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
700 }
701
702 static void tg3_enable_ints(struct tg3 *tp)
703 {
704         int i;
705
706         tp->irq_sync = 0;
707         wmb();
708
709         tw32(TG3PCI_MISC_HOST_CTRL,
710              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
711
712         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
713         for (i = 0; i < tp->irq_cnt; i++) {
714                 struct tg3_napi *tnapi = &tp->napi[i];
715
716                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717                 if (tg3_flag(tp, 1SHOT_MSI))
718                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719
720                 tp->coal_now |= tnapi->coal_now;
721         }
722
723         /* Force an initial interrupt */
724         if (!tg3_flag(tp, TAGGED_STATUS) &&
725             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
726                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
727         else
728                 tw32(HOSTCC_MODE, tp->coal_now);
729
730         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
731 }
732
733 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
734 {
735         struct tg3 *tp = tnapi->tp;
736         struct tg3_hw_status *sblk = tnapi->hw_status;
737         unsigned int work_exists = 0;
738
739         /* check for phy events */
740         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
741                 if (sblk->status & SD_STATUS_LINK_CHG)
742                         work_exists = 1;
743         }
744         /* check for RX/TX work to do */
745         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
746             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
747                 work_exists = 1;
748
749         return work_exists;
750 }
751
752 /* tg3_int_reenable
753  *  similar to tg3_enable_ints, but it accurately determines whether there
754  *  is new work pending and can return without flushing the PIO write
755  *  which reenables interrupts
756  */
757 static void tg3_int_reenable(struct tg3_napi *tnapi)
758 {
759         struct tg3 *tp = tnapi->tp;
760
761         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
762         mmiowb();
763
764         /* When doing tagged status, this work check is unnecessary.
765          * The last_tag we write above tells the chip which piece of
766          * work we've completed.
767          */
768         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
769                 tw32(HOSTCC_MODE, tp->coalesce_mode |
770                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
771 }
772
773 static void tg3_switch_clocks(struct tg3 *tp)
774 {
775         u32 clock_ctrl;
776         u32 orig_clock_ctrl;
777
778         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
779                 return;
780
781         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
782
783         orig_clock_ctrl = clock_ctrl;
784         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
785                        CLOCK_CTRL_CLKRUN_OENABLE |
786                        0x1f);
787         tp->pci_clock_ctrl = clock_ctrl;
788
789         if (tg3_flag(tp, 5705_PLUS)) {
790                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
791                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
792                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
793                 }
794         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
795                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
796                             clock_ctrl |
797                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
798                             40);
799                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
801                             40);
802         }
803         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
804 }
805
806 #define PHY_BUSY_LOOPS  5000
807
808 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
809 {
810         u32 frame_val;
811         unsigned int loops;
812         int ret;
813
814         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
815                 tw32_f(MAC_MI_MODE,
816                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
817                 udelay(80);
818         }
819
820         *val = 0x0;
821
822         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
823                       MI_COM_PHY_ADDR_MASK);
824         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
825                       MI_COM_REG_ADDR_MASK);
826         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
827
828         tw32_f(MAC_MI_COM, frame_val);
829
830         loops = PHY_BUSY_LOOPS;
831         while (loops != 0) {
832                 udelay(10);
833                 frame_val = tr32(MAC_MI_COM);
834
835                 if ((frame_val & MI_COM_BUSY) == 0) {
836                         udelay(5);
837                         frame_val = tr32(MAC_MI_COM);
838                         break;
839                 }
840                 loops -= 1;
841         }
842
843         ret = -EBUSY;
844         if (loops != 0) {
845                 *val = frame_val & MI_COM_DATA_MASK;
846                 ret = 0;
847         }
848
849         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850                 tw32_f(MAC_MI_MODE, tp->mi_mode);
851                 udelay(80);
852         }
853
854         return ret;
855 }
856
857 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
858 {
859         u32 frame_val;
860         unsigned int loops;
861         int ret;
862
863         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
864             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
865                 return 0;
866
867         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
868                 tw32_f(MAC_MI_MODE,
869                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
870                 udelay(80);
871         }
872
873         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
874                       MI_COM_PHY_ADDR_MASK);
875         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
876                       MI_COM_REG_ADDR_MASK);
877         frame_val |= (val & MI_COM_DATA_MASK);
878         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
879
880         tw32_f(MAC_MI_COM, frame_val);
881
882         loops = PHY_BUSY_LOOPS;
883         while (loops != 0) {
884                 udelay(10);
885                 frame_val = tr32(MAC_MI_COM);
886                 if ((frame_val & MI_COM_BUSY) == 0) {
887                         udelay(5);
888                         frame_val = tr32(MAC_MI_COM);
889                         break;
890                 }
891                 loops -= 1;
892         }
893
894         ret = -EBUSY;
895         if (loops != 0)
896                 ret = 0;
897
898         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
899                 tw32_f(MAC_MI_MODE, tp->mi_mode);
900                 udelay(80);
901         }
902
903         return ret;
904 }
905
906 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
907 {
908         int err;
909
910         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
911         if (err)
912                 goto done;
913
914         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
915         if (err)
916                 goto done;
917
918         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
919                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
920         if (err)
921                 goto done;
922
923         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
924
925 done:
926         return err;
927 }
928
929 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
930 {
931         int err;
932
933         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
934         if (err)
935                 goto done;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
942                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
943         if (err)
944                 goto done;
945
946         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
947
948 done:
949         return err;
950 }
951
952 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
953 {
954         int err;
955
956         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
957         if (!err)
958                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
959
960         return err;
961 }
962
963 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
964 {
965         int err;
966
967         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
968         if (!err)
969                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
970
971         return err;
972 }
973
974 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
975 {
976         int err;
977
978         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
979                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
980                            MII_TG3_AUXCTL_SHDWSEL_MISC);
981         if (!err)
982                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
983
984         return err;
985 }
986
987 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
988 {
989         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
990                 set |= MII_TG3_AUXCTL_MISC_WREN;
991
992         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
993 }
994
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998                              MII_TG3_AUXCTL_ACTL_TX_6DB)
999
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1003
1004 static int tg3_bmcr_reset(struct tg3 *tp)
1005 {
1006         u32 phy_control;
1007         int limit, err;
1008
1009         /* OK, reset it, and poll the BMCR_RESET bit until it
1010          * clears or we time out.
1011          */
1012         phy_control = BMCR_RESET;
1013         err = tg3_writephy(tp, MII_BMCR, phy_control);
1014         if (err != 0)
1015                 return -EBUSY;
1016
1017         limit = 5000;
1018         while (limit--) {
1019                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1020                 if (err != 0)
1021                         return -EBUSY;
1022
1023                 if ((phy_control & BMCR_RESET) == 0) {
1024                         udelay(40);
1025                         break;
1026                 }
1027                 udelay(10);
1028         }
1029         if (limit < 0)
1030                 return -EBUSY;
1031
1032         return 0;
1033 }
1034
1035 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1036 {
1037         struct tg3 *tp = bp->priv;
1038         u32 val;
1039
1040         spin_lock_bh(&tp->lock);
1041
1042         if (tg3_readphy(tp, reg, &val))
1043                 val = -EIO;
1044
1045         spin_unlock_bh(&tp->lock);
1046
1047         return val;
1048 }
1049
1050 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1051 {
1052         struct tg3 *tp = bp->priv;
1053         u32 ret = 0;
1054
1055         spin_lock_bh(&tp->lock);
1056
1057         if (tg3_writephy(tp, reg, val))
1058                 ret = -EIO;
1059
1060         spin_unlock_bh(&tp->lock);
1061
1062         return ret;
1063 }
1064
1065 static int tg3_mdio_reset(struct mii_bus *bp)
1066 {
1067         return 0;
1068 }
1069
1070 static void tg3_mdio_config_5785(struct tg3 *tp)
1071 {
1072         u32 val;
1073         struct phy_device *phydev;
1074
1075         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1076         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1077         case PHY_ID_BCM50610:
1078         case PHY_ID_BCM50610M:
1079                 val = MAC_PHYCFG2_50610_LED_MODES;
1080                 break;
1081         case PHY_ID_BCMAC131:
1082                 val = MAC_PHYCFG2_AC131_LED_MODES;
1083                 break;
1084         case PHY_ID_RTL8211C:
1085                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1086                 break;
1087         case PHY_ID_RTL8201E:
1088                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1089                 break;
1090         default:
1091                 return;
1092         }
1093
1094         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1095                 tw32(MAC_PHYCFG2, val);
1096
1097                 val = tr32(MAC_PHYCFG1);
1098                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1099                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1100                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1101                 tw32(MAC_PHYCFG1, val);
1102
1103                 return;
1104         }
1105
1106         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1107                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1108                        MAC_PHYCFG2_FMODE_MASK_MASK |
1109                        MAC_PHYCFG2_GMODE_MASK_MASK |
1110                        MAC_PHYCFG2_ACT_MASK_MASK   |
1111                        MAC_PHYCFG2_QUAL_MASK_MASK |
1112                        MAC_PHYCFG2_INBAND_ENABLE;
1113
1114         tw32(MAC_PHYCFG2, val);
1115
1116         val = tr32(MAC_PHYCFG1);
1117         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1118                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1119         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1120                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1121                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1124         }
1125         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1126                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1127         tw32(MAC_PHYCFG1, val);
1128
1129         val = tr32(MAC_EXT_RGMII_MODE);
1130         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1131                  MAC_RGMII_MODE_RX_QUALITY |
1132                  MAC_RGMII_MODE_RX_ACTIVITY |
1133                  MAC_RGMII_MODE_RX_ENG_DET |
1134                  MAC_RGMII_MODE_TX_ENABLE |
1135                  MAC_RGMII_MODE_TX_LOWPWR |
1136                  MAC_RGMII_MODE_TX_RESET);
1137         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1138                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1139                         val |= MAC_RGMII_MODE_RX_INT_B |
1140                                MAC_RGMII_MODE_RX_QUALITY |
1141                                MAC_RGMII_MODE_RX_ACTIVITY |
1142                                MAC_RGMII_MODE_RX_ENG_DET;
1143                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1144                         val |= MAC_RGMII_MODE_TX_ENABLE |
1145                                MAC_RGMII_MODE_TX_LOWPWR |
1146                                MAC_RGMII_MODE_TX_RESET;
1147         }
1148         tw32(MAC_EXT_RGMII_MODE, val);
1149 }
1150
1151 static void tg3_mdio_start(struct tg3 *tp)
1152 {
1153         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1154         tw32_f(MAC_MI_MODE, tp->mi_mode);
1155         udelay(80);
1156
1157         if (tg3_flag(tp, MDIOBUS_INITED) &&
1158             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1159                 tg3_mdio_config_5785(tp);
1160 }
1161
1162 static int tg3_mdio_init(struct tg3 *tp)
1163 {
1164         int i;
1165         u32 reg;
1166         struct phy_device *phydev;
1167
1168         if (tg3_flag(tp, 5717_PLUS)) {
1169                 u32 is_serdes;
1170
1171                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1172
1173                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1174                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1175                 else
1176                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1177                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1178                 if (is_serdes)
1179                         tp->phy_addr += 7;
1180         } else
1181                 tp->phy_addr = TG3_PHY_MII_ADDR;
1182
1183         tg3_mdio_start(tp);
1184
1185         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1186                 return 0;
1187
1188         tp->mdio_bus = mdiobus_alloc();
1189         if (tp->mdio_bus == NULL)
1190                 return -ENOMEM;
1191
1192         tp->mdio_bus->name     = "tg3 mdio bus";
1193         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1194                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1195         tp->mdio_bus->priv     = tp;
1196         tp->mdio_bus->parent   = &tp->pdev->dev;
1197         tp->mdio_bus->read     = &tg3_mdio_read;
1198         tp->mdio_bus->write    = &tg3_mdio_write;
1199         tp->mdio_bus->reset    = &tg3_mdio_reset;
1200         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1201         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1202
1203         for (i = 0; i < PHY_MAX_ADDR; i++)
1204                 tp->mdio_bus->irq[i] = PHY_POLL;
1205
1206         /* The bus registration will look for all the PHYs on the mdio bus.
1207          * Unfortunately, it does not ensure the PHY is powered up before
1208          * accessing the PHY ID registers.  A chip reset is the
1209          * quickest way to bring the device back to an operational state..
1210          */
1211         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1212                 tg3_bmcr_reset(tp);
1213
1214         i = mdiobus_register(tp->mdio_bus);
1215         if (i) {
1216                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1217                 mdiobus_free(tp->mdio_bus);
1218                 return i;
1219         }
1220
1221         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1222
1223         if (!phydev || !phydev->drv) {
1224                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1225                 mdiobus_unregister(tp->mdio_bus);
1226                 mdiobus_free(tp->mdio_bus);
1227                 return -ENODEV;
1228         }
1229
1230         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1231         case PHY_ID_BCM57780:
1232                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1233                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1234                 break;
1235         case PHY_ID_BCM50610:
1236         case PHY_ID_BCM50610M:
1237                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1238                                      PHY_BRCM_RX_REFCLK_UNUSED |
1239                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1240                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1241                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1242                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1243                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1244                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1247                 /* fallthru */
1248         case PHY_ID_RTL8211C:
1249                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1250                 break;
1251         case PHY_ID_RTL8201E:
1252         case PHY_ID_BCMAC131:
1253                 phydev->interface = PHY_INTERFACE_MODE_MII;
1254                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1255                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1256                 break;
1257         }
1258
1259         tg3_flag_set(tp, MDIOBUS_INITED);
1260
1261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1262                 tg3_mdio_config_5785(tp);
1263
1264         return 0;
1265 }
1266
1267 static void tg3_mdio_fini(struct tg3 *tp)
1268 {
1269         if (tg3_flag(tp, MDIOBUS_INITED)) {
1270                 tg3_flag_clear(tp, MDIOBUS_INITED);
1271                 mdiobus_unregister(tp->mdio_bus);
1272                 mdiobus_free(tp->mdio_bus);
1273         }
1274 }
1275
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3 *tp)
1278 {
1279         u32 val;
1280
1281         val = tr32(GRC_RX_CPU_EVENT);
1282         val |= GRC_RX_CPU_DRIVER_EVENT;
1283         tw32_f(GRC_RX_CPU_EVENT, val);
1284
1285         tp->last_event_jiffies = jiffies;
1286 }
1287
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1289
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3 *tp)
1292 {
1293         int i;
1294         unsigned int delay_cnt;
1295         long time_remain;
1296
1297         /* If enough time has passed, no wait is necessary. */
1298         time_remain = (long)(tp->last_event_jiffies + 1 +
1299                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1300                       (long)jiffies;
1301         if (time_remain < 0)
1302                 return;
1303
1304         /* Check if we can shorten the wait time. */
1305         delay_cnt = jiffies_to_usecs(time_remain);
1306         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1307                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1308         delay_cnt = (delay_cnt >> 3) + 1;
1309
1310         for (i = 0; i < delay_cnt; i++) {
1311                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1312                         break;
1313                 udelay(8);
1314         }
1315 }
1316
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3 *tp)
1319 {
1320         u32 reg;
1321         u32 val;
1322
1323         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1324                 return;
1325
1326         tg3_wait_for_event_ack(tp);
1327
1328         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1331
1332         val = 0;
1333         if (!tg3_readphy(tp, MII_BMCR, &reg))
1334                 val = reg << 16;
1335         if (!tg3_readphy(tp, MII_BMSR, &reg))
1336                 val |= (reg & 0xffff);
1337         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1338
1339         val = 0;
1340         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1341                 val = reg << 16;
1342         if (!tg3_readphy(tp, MII_LPA, &reg))
1343                 val |= (reg & 0xffff);
1344         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1345
1346         val = 0;
1347         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1348                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1349                         val = reg << 16;
1350                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1351                         val |= (reg & 0xffff);
1352         }
1353         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1354
1355         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1356                 val = reg << 16;
1357         else
1358                 val = 0;
1359         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1360
1361         tg3_generate_fw_event(tp);
1362 }
1363
1364 static void tg3_link_report(struct tg3 *tp)
1365 {
1366         if (!netif_carrier_ok(tp->dev)) {
1367                 netif_info(tp, link, tp->dev, "Link is down\n");
1368                 tg3_ump_link_report(tp);
1369         } else if (netif_msg_link(tp)) {
1370                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1371                             (tp->link_config.active_speed == SPEED_1000 ?
1372                              1000 :
1373                              (tp->link_config.active_speed == SPEED_100 ?
1374                               100 : 10)),
1375                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1376                              "full" : "half"));
1377
1378                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1379                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1380                             "on" : "off",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1382                             "on" : "off");
1383
1384                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1385                         netdev_info(tp->dev, "EEE is %s\n",
1386                                     tp->setlpicnt ? "enabled" : "disabled");
1387
1388                 tg3_ump_link_report(tp);
1389         }
1390 }
1391
1392 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1393 {
1394         u16 miireg;
1395
1396         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1397                 miireg = ADVERTISE_PAUSE_CAP;
1398         else if (flow_ctrl & FLOW_CTRL_TX)
1399                 miireg = ADVERTISE_PAUSE_ASYM;
1400         else if (flow_ctrl & FLOW_CTRL_RX)
1401                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1402         else
1403                 miireg = 0;
1404
1405         return miireg;
1406 }
1407
1408 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1409 {
1410         u16 miireg;
1411
1412         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1413                 miireg = ADVERTISE_1000XPAUSE;
1414         else if (flow_ctrl & FLOW_CTRL_TX)
1415                 miireg = ADVERTISE_1000XPSE_ASYM;
1416         else if (flow_ctrl & FLOW_CTRL_RX)
1417                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1418         else
1419                 miireg = 0;
1420
1421         return miireg;
1422 }
1423
1424 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1425 {
1426         u8 cap = 0;
1427
1428         if (lcladv & ADVERTISE_1000XPAUSE) {
1429                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1430                         if (rmtadv & LPA_1000XPAUSE)
1431                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1432                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1433                                 cap = FLOW_CTRL_RX;
1434                 } else {
1435                         if (rmtadv & LPA_1000XPAUSE)
1436                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1437                 }
1438         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1439                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1440                         cap = FLOW_CTRL_TX;
1441         }
1442
1443         return cap;
1444 }
1445
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1447 {
1448         u8 autoneg;
1449         u8 flowctrl = 0;
1450         u32 old_rx_mode = tp->rx_mode;
1451         u32 old_tx_mode = tp->tx_mode;
1452
1453         if (tg3_flag(tp, USE_PHYLIB))
1454                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1455         else
1456                 autoneg = tp->link_config.autoneg;
1457
1458         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1459                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1460                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1461                 else
1462                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1463         } else
1464                 flowctrl = tp->link_config.flowctrl;
1465
1466         tp->link_config.active_flowctrl = flowctrl;
1467
1468         if (flowctrl & FLOW_CTRL_RX)
1469                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1470         else
1471                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1472
1473         if (old_rx_mode != tp->rx_mode)
1474                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1475
1476         if (flowctrl & FLOW_CTRL_TX)
1477                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1478         else
1479                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1480
1481         if (old_tx_mode != tp->tx_mode)
1482                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1483 }
1484
1485 static void tg3_adjust_link(struct net_device *dev)
1486 {
1487         u8 oldflowctrl, linkmesg = 0;
1488         u32 mac_mode, lcl_adv, rmt_adv;
1489         struct tg3 *tp = netdev_priv(dev);
1490         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1491
1492         spin_lock_bh(&tp->lock);
1493
1494         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1495                                     MAC_MODE_HALF_DUPLEX);
1496
1497         oldflowctrl = tp->link_config.active_flowctrl;
1498
1499         if (phydev->link) {
1500                 lcl_adv = 0;
1501                 rmt_adv = 0;
1502
1503                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1504                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1505                 else if (phydev->speed == SPEED_1000 ||
1506                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1507                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1508                 else
1509                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1510
1511                 if (phydev->duplex == DUPLEX_HALF)
1512                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1513                 else {
1514                         lcl_adv = tg3_advert_flowctrl_1000T(
1515                                   tp->link_config.flowctrl);
1516
1517                         if (phydev->pause)
1518                                 rmt_adv = LPA_PAUSE_CAP;
1519                         if (phydev->asym_pause)
1520                                 rmt_adv |= LPA_PAUSE_ASYM;
1521                 }
1522
1523                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1524         } else
1525                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1526
1527         if (mac_mode != tp->mac_mode) {
1528                 tp->mac_mode = mac_mode;
1529                 tw32_f(MAC_MODE, tp->mac_mode);
1530                 udelay(40);
1531         }
1532
1533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1534                 if (phydev->speed == SPEED_10)
1535                         tw32(MAC_MI_STAT,
1536                              MAC_MI_STAT_10MBPS_MODE |
1537                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1538                 else
1539                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540         }
1541
1542         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1543                 tw32(MAC_TX_LENGTHS,
1544                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1545                       (6 << TX_LENGTHS_IPG_SHIFT) |
1546                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1547         else
1548                 tw32(MAC_TX_LENGTHS,
1549                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1550                       (6 << TX_LENGTHS_IPG_SHIFT) |
1551                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1552
1553         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1554             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1555             phydev->speed != tp->link_config.active_speed ||
1556             phydev->duplex != tp->link_config.active_duplex ||
1557             oldflowctrl != tp->link_config.active_flowctrl)
1558                 linkmesg = 1;
1559
1560         tp->link_config.active_speed = phydev->speed;
1561         tp->link_config.active_duplex = phydev->duplex;
1562
1563         spin_unlock_bh(&tp->lock);
1564
1565         if (linkmesg)
1566                 tg3_link_report(tp);
1567 }
1568
1569 static int tg3_phy_init(struct tg3 *tp)
1570 {
1571         struct phy_device *phydev;
1572
1573         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1574                 return 0;
1575
1576         /* Bring the PHY back to a known state. */
1577         tg3_bmcr_reset(tp);
1578
1579         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1580
1581         /* Attach the MAC to the PHY. */
1582         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1583                              phydev->dev_flags, phydev->interface);
1584         if (IS_ERR(phydev)) {
1585                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1586                 return PTR_ERR(phydev);
1587         }
1588
1589         /* Mask with MAC supported features. */
1590         switch (phydev->interface) {
1591         case PHY_INTERFACE_MODE_GMII:
1592         case PHY_INTERFACE_MODE_RGMII:
1593                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1594                         phydev->supported &= (PHY_GBIT_FEATURES |
1595                                               SUPPORTED_Pause |
1596                                               SUPPORTED_Asym_Pause);
1597                         break;
1598                 }
1599                 /* fallthru */
1600         case PHY_INTERFACE_MODE_MII:
1601                 phydev->supported &= (PHY_BASIC_FEATURES |
1602                                       SUPPORTED_Pause |
1603                                       SUPPORTED_Asym_Pause);
1604                 break;
1605         default:
1606                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1607                 return -EINVAL;
1608         }
1609
1610         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1611
1612         phydev->advertising = phydev->supported;
1613
1614         return 0;
1615 }
1616
1617 static void tg3_phy_start(struct tg3 *tp)
1618 {
1619         struct phy_device *phydev;
1620
1621         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1622                 return;
1623
1624         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1625
1626         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1627                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1628                 phydev->speed = tp->link_config.orig_speed;
1629                 phydev->duplex = tp->link_config.orig_duplex;
1630                 phydev->autoneg = tp->link_config.orig_autoneg;
1631                 phydev->advertising = tp->link_config.orig_advertising;
1632         }
1633
1634         phy_start(phydev);
1635
1636         phy_start_aneg(phydev);
1637 }
1638
1639 static void tg3_phy_stop(struct tg3 *tp)
1640 {
1641         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1642                 return;
1643
1644         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 }
1646
1647 static void tg3_phy_fini(struct tg3 *tp)
1648 {
1649         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1650                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1651                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652         }
1653 }
1654
1655 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1656 {
1657         u32 phytest;
1658
1659         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1660                 u32 phy;
1661
1662                 tg3_writephy(tp, MII_TG3_FET_TEST,
1663                              phytest | MII_TG3_FET_SHADOW_EN);
1664                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1665                         if (enable)
1666                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667                         else
1668                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1670                 }
1671                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672         }
1673 }
1674
1675 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1676 {
1677         u32 reg;
1678
1679         if (!tg3_flag(tp, 5705_PLUS) ||
1680             (tg3_flag(tp, 5717_PLUS) &&
1681              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1682                 return;
1683
1684         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1685                 tg3_phy_fet_toggle_apd(tp, enable);
1686                 return;
1687         }
1688
1689         reg = MII_TG3_MISC_SHDW_WREN |
1690               MII_TG3_MISC_SHDW_SCR5_SEL |
1691               MII_TG3_MISC_SHDW_SCR5_LPED |
1692               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1693               MII_TG3_MISC_SHDW_SCR5_SDTL |
1694               MII_TG3_MISC_SHDW_SCR5_C125OE;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1696                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1697
1698         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1699
1700
1701         reg = MII_TG3_MISC_SHDW_WREN |
1702               MII_TG3_MISC_SHDW_APD_SEL |
1703               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1704         if (enable)
1705                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1706
1707         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1708 }
1709
1710 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1711 {
1712         u32 phy;
1713
1714         if (!tg3_flag(tp, 5705_PLUS) ||
1715             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1716                 return;
1717
1718         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1719                 u32 ephy;
1720
1721                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1722                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1723
1724                         tg3_writephy(tp, MII_TG3_FET_TEST,
1725                                      ephy | MII_TG3_FET_SHADOW_EN);
1726                         if (!tg3_readphy(tp, reg, &phy)) {
1727                                 if (enable)
1728                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729                                 else
1730                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 tg3_writephy(tp, reg, phy);
1732                         }
1733                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1734                 }
1735         } else {
1736                 int ret;
1737
1738                 ret = tg3_phy_auxctl_read(tp,
1739                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1740                 if (!ret) {
1741                         if (enable)
1742                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743                         else
1744                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         tg3_phy_auxctl_write(tp,
1746                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1747                 }
1748         }
1749 }
1750
1751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1752 {
1753         int ret;
1754         u32 val;
1755
1756         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1757                 return;
1758
1759         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1760         if (!ret)
1761                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1762                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1763 }
1764
1765 static void tg3_phy_apply_otp(struct tg3 *tp)
1766 {
1767         u32 otp, phy;
1768
1769         if (!tp->phy_otp)
1770                 return;
1771
1772         otp = tp->phy_otp;
1773
1774         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1775                 return;
1776
1777         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1778         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1779         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1780
1781         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1782               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1783         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1784
1785         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1786         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1787         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1788
1789         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1790         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1791
1792         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1793         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1794
1795         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1796               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1797         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1798
1799         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1800 }
1801
1802 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1803 {
1804         u32 val;
1805
1806         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1807                 return;
1808
1809         tp->setlpicnt = 0;
1810
1811         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1812             current_link_up == 1 &&
1813             tp->link_config.active_duplex == DUPLEX_FULL &&
1814             (tp->link_config.active_speed == SPEED_100 ||
1815              tp->link_config.active_speed == SPEED_1000)) {
1816                 u32 eeectl;
1817
1818                 if (tp->link_config.active_speed == SPEED_1000)
1819                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1820                 else
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1822
1823                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1824
1825                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1826                                   TG3_CL45_D7_EEERES_STAT, &val);
1827
1828                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1829                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1830                         tp->setlpicnt = 2;
1831         }
1832
1833         if (!tp->setlpicnt) {
1834                 val = tr32(TG3_CPMU_EEE_MODE);
1835                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1836         }
1837 }
1838
1839 static void tg3_phy_eee_enable(struct tg3 *tp)
1840 {
1841         u32 val;
1842
1843         if (tp->link_config.active_speed == SPEED_1000 &&
1844             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1846              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1847             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1848                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1849                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1850         }
1851
1852         val = tr32(TG3_CPMU_EEE_MODE);
1853         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1854 }
1855
1856 static int tg3_wait_macro_done(struct tg3 *tp)
1857 {
1858         int limit = 100;
1859
1860         while (limit--) {
1861                 u32 tmp32;
1862
1863                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1864                         if ((tmp32 & 0x1000) == 0)
1865                                 break;
1866                 }
1867         }
1868         if (limit < 0)
1869                 return -EBUSY;
1870
1871         return 0;
1872 }
1873
1874 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1875 {
1876         static const u32 test_pat[4][6] = {
1877         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1881         };
1882         int chan;
1883
1884         for (chan = 0; chan < 4; chan++) {
1885                 int i;
1886
1887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1888                              (chan * 0x2000) | 0x0200);
1889                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1890
1891                 for (i = 0; i < 6; i++)
1892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1893                                      test_pat[chan][i]);
1894
1895                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1896                 if (tg3_wait_macro_done(tp)) {
1897                         *resetp = 1;
1898                         return -EBUSY;
1899                 }
1900
1901                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902                              (chan * 0x2000) | 0x0200);
1903                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1904                 if (tg3_wait_macro_done(tp)) {
1905                         *resetp = 1;
1906                         return -EBUSY;
1907                 }
1908
1909                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1910                 if (tg3_wait_macro_done(tp)) {
1911                         *resetp = 1;
1912                         return -EBUSY;
1913                 }
1914
1915                 for (i = 0; i < 6; i += 2) {
1916                         u32 low, high;
1917
1918                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1919                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1920                             tg3_wait_macro_done(tp)) {
1921                                 *resetp = 1;
1922                                 return -EBUSY;
1923                         }
1924                         low &= 0x7fff;
1925                         high &= 0x000f;
1926                         if (low != test_pat[chan][i] ||
1927                             high != test_pat[chan][i+1]) {
1928                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1929                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1930                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1931
1932                                 return -EBUSY;
1933                         }
1934                 }
1935         }
1936
1937         return 0;
1938 }
1939
1940 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1941 {
1942         int chan;
1943
1944         for (chan = 0; chan < 4; chan++) {
1945                 int i;
1946
1947                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1948                              (chan * 0x2000) | 0x0200);
1949                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1950                 for (i = 0; i < 6; i++)
1951                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1952                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1953                 if (tg3_wait_macro_done(tp))
1954                         return -EBUSY;
1955         }
1956
1957         return 0;
1958 }
1959
1960 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1961 {
1962         u32 reg32, phy9_orig;
1963         int retries, do_phy_reset, err;
1964
1965         retries = 10;
1966         do_phy_reset = 1;
1967         do {
1968                 if (do_phy_reset) {
1969                         err = tg3_bmcr_reset(tp);
1970                         if (err)
1971                                 return err;
1972                         do_phy_reset = 0;
1973                 }
1974
1975                 /* Disable transmitter and interrupt.  */
1976                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1977                         continue;
1978
1979                 reg32 |= 0x3000;
1980                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1981
1982                 /* Set full-duplex, 1000 mbps.  */
1983                 tg3_writephy(tp, MII_BMCR,
1984                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1985
1986                 /* Set to master mode.  */
1987                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1988                         continue;
1989
1990                 tg3_writephy(tp, MII_TG3_CTRL,
1991                              (MII_TG3_CTRL_AS_MASTER |
1992                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1993
1994                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1995                 if (err)
1996                         return err;
1997
1998                 /* Block the PHY control access.  */
1999                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2000
2001                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2002                 if (!err)
2003                         break;
2004         } while (--retries);
2005
2006         err = tg3_phy_reset_chanpat(tp);
2007         if (err)
2008                 return err;
2009
2010         tg3_phydsp_write(tp, 0x8005, 0x0000);
2011
2012         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2013         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2014
2015         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2016
2017         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2018
2019         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2020                 reg32 &= ~0x3000;
2021                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2022         } else if (!err)
2023                 err = -EBUSY;
2024
2025         return err;
2026 }
2027
2028 /* This will reset the tigon3 PHY if there is no valid
2029  * link unless the FORCE argument is non-zero.
2030  */
2031 static int tg3_phy_reset(struct tg3 *tp)
2032 {
2033         u32 val, cpmuctrl;
2034         int err;
2035
2036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2037                 val = tr32(GRC_MISC_CFG);
2038                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2039                 udelay(40);
2040         }
2041         err  = tg3_readphy(tp, MII_BMSR, &val);
2042         err |= tg3_readphy(tp, MII_BMSR, &val);
2043         if (err != 0)
2044                 return -EBUSY;
2045
2046         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2047                 netif_carrier_off(tp->dev);
2048                 tg3_link_report(tp);
2049         }
2050
2051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2054                 err = tg3_phy_reset_5703_4_5(tp);
2055                 if (err)
2056                         return err;
2057                 goto out;
2058         }
2059
2060         cpmuctrl = 0;
2061         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2062             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2063                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2064                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2065                         tw32(TG3_CPMU_CTRL,
2066                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2067         }
2068
2069         err = tg3_bmcr_reset(tp);
2070         if (err)
2071                 return err;
2072
2073         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2074                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2075                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2076
2077                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2078         }
2079
2080         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2081             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2082                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2083                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2084                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2085                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2086                         udelay(40);
2087                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2088                 }
2089         }
2090
2091         if (tg3_flag(tp, 5717_PLUS) &&
2092             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2093                 return 0;
2094
2095         tg3_phy_apply_otp(tp);
2096
2097         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2098                 tg3_phy_toggle_apd(tp, true);
2099         else
2100                 tg3_phy_toggle_apd(tp, false);
2101
2102 out:
2103         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2104             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2105                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2106                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2107                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2108         }
2109
2110         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2111                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113         }
2114
2115         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2116                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2118                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2119                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2120                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2121                 }
2122         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2123                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2124                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2125                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2126                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2127                                 tg3_writephy(tp, MII_TG3_TEST1,
2128                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2129                         } else
2130                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2131
2132                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2133                 }
2134         }
2135
2136         /* Set Extended packet length bit (bit 14) on all chips that */
2137         /* support jumbo frames */
2138         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2139                 /* Cannot do read-modify-write on 5401 */
2140                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2141         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2142                 /* Set bit 14 with read-modify-write to preserve other bits */
2143                 err = tg3_phy_auxctl_read(tp,
2144                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2145                 if (!err)
2146                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2147                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2148         }
2149
2150         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151          * jumbo frames transmission.
2152          */
2153         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2154                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2155                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2156                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2157         }
2158
2159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2160                 /* adjust output voltage */
2161                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2162         }
2163
2164         tg3_phy_toggle_automdix(tp, 1);
2165         tg3_phy_set_wirespeed(tp);
2166         return 0;
2167 }
2168
2169 static void tg3_frob_aux_power(struct tg3 *tp)
2170 {
2171         bool need_vaux = false;
2172
2173         /* The GPIOs do something completely different on 57765. */
2174         if (!tg3_flag(tp, IS_NIC) ||
2175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2176             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2177                 return;
2178
2179         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2181              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2183             tp->pdev_peer != tp->pdev) {
2184                 struct net_device *dev_peer;
2185
2186                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2187
2188                 /* remove_one() may have been run on the peer. */
2189                 if (dev_peer) {
2190                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2191
2192                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2193                                 return;
2194
2195                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2196                             tg3_flag(tp_peer, ENABLE_ASF))
2197                                 need_vaux = true;
2198                 }
2199         }
2200
2201         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2202                 need_vaux = true;
2203
2204         if (need_vaux) {
2205                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2206                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2207                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208                                     (GRC_LCLCTRL_GPIO_OE0 |
2209                                      GRC_LCLCTRL_GPIO_OE1 |
2210                                      GRC_LCLCTRL_GPIO_OE2 |
2211                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2212                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2213                                     100);
2214                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2215                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2216                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2218                                              GRC_LCLCTRL_GPIO_OE1 |
2219                                              GRC_LCLCTRL_GPIO_OE2 |
2220                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2221                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2222                                              tp->grc_local_ctrl;
2223                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2224
2225                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2226                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227
2228                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2229                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2230                 } else {
2231                         u32 no_gpio2;
2232                         u32 grc_local_ctrl = 0;
2233
2234                         /* Workaround to prevent overdrawing Amps. */
2235                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2236                             ASIC_REV_5714) {
2237                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2238                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239                                             grc_local_ctrl, 100);
2240                         }
2241
2242                         /* On 5753 and variants, GPIO2 cannot be used. */
2243                         no_gpio2 = tp->nic_sram_data_cfg &
2244                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2245
2246                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2247                                          GRC_LCLCTRL_GPIO_OE1 |
2248                                          GRC_LCLCTRL_GPIO_OE2 |
2249                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2250                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2251                         if (no_gpio2) {
2252                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2253                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2254                         }
2255                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2256                                                     grc_local_ctrl, 100);
2257
2258                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2259
2260                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2261                                                     grc_local_ctrl, 100);
2262
2263                         if (!no_gpio2) {
2264                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2265                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266                                             grc_local_ctrl, 100);
2267                         }
2268                 }
2269         } else {
2270                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2271                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2272                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273                                     (GRC_LCLCTRL_GPIO_OE1 |
2274                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275
2276                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277                                     GRC_LCLCTRL_GPIO_OE1, 100);
2278
2279                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2280                                     (GRC_LCLCTRL_GPIO_OE1 |
2281                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2282                 }
2283         }
2284 }
2285
2286 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2287 {
2288         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2289                 return 1;
2290         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2291                 if (speed != SPEED_10)
2292                         return 1;
2293         } else if (speed == SPEED_10)
2294                 return 1;
2295
2296         return 0;
2297 }
2298
2299 static int tg3_setup_phy(struct tg3 *, int);
2300
2301 #define RESET_KIND_SHUTDOWN     0
2302 #define RESET_KIND_INIT         1
2303 #define RESET_KIND_SUSPEND      2
2304
2305 static void tg3_write_sig_post_reset(struct tg3 *, int);
2306 static int tg3_halt_cpu(struct tg3 *, u32);
2307
2308 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2309 {
2310         u32 val;
2311
2312         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2313                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2314                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2316
2317                         sg_dig_ctrl |=
2318                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2319                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2320                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2321                 }
2322                 return;
2323         }
2324
2325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2326                 tg3_bmcr_reset(tp);
2327                 val = tr32(GRC_MISC_CFG);
2328                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2329                 udelay(40);
2330                 return;
2331         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2332                 u32 phytest;
2333                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2334                         u32 phy;
2335
2336                         tg3_writephy(tp, MII_ADVERTISE, 0);
2337                         tg3_writephy(tp, MII_BMCR,
2338                                      BMCR_ANENABLE | BMCR_ANRESTART);
2339
2340                         tg3_writephy(tp, MII_TG3_FET_TEST,
2341                                      phytest | MII_TG3_FET_SHADOW_EN);
2342                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2343                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2344                                 tg3_writephy(tp,
2345                                              MII_TG3_FET_SHDW_AUXMODE4,
2346                                              phy);
2347                         }
2348                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2349                 }
2350                 return;
2351         } else if (do_low_power) {
2352                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2353                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2354
2355                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2356                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2357                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2358                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2359         }
2360
2361         /* The PHY should not be powered down on some chips because
2362          * of bugs.
2363          */
2364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2366             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2367              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2368                 return;
2369
2370         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2371             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2372                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2373                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2374                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2375                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2376         }
2377
2378         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2379 }
2380
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3 *tp)
2383 {
2384         if (tg3_flag(tp, NVRAM)) {
2385                 int i;
2386
2387                 if (tp->nvram_lock_cnt == 0) {
2388                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2389                         for (i = 0; i < 8000; i++) {
2390                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2391                                         break;
2392                                 udelay(20);
2393                         }
2394                         if (i == 8000) {
2395                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2396                                 return -ENODEV;
2397                         }
2398                 }
2399                 tp->nvram_lock_cnt++;
2400         }
2401         return 0;
2402 }
2403
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3 *tp)
2406 {
2407         if (tg3_flag(tp, NVRAM)) {
2408                 if (tp->nvram_lock_cnt > 0)
2409                         tp->nvram_lock_cnt--;
2410                 if (tp->nvram_lock_cnt == 0)
2411                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2412         }
2413 }
2414
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3 *tp)
2417 {
2418         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2419                 u32 nvaccess = tr32(NVRAM_ACCESS);
2420
2421                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2422         }
2423 }
2424
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3 *tp)
2427 {
2428         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2429                 u32 nvaccess = tr32(NVRAM_ACCESS);
2430
2431                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2432         }
2433 }
2434
2435 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2436                                         u32 offset, u32 *val)
2437 {
2438         u32 tmp;
2439         int i;
2440
2441         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2442                 return -EINVAL;
2443
2444         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2445                                         EEPROM_ADDR_DEVID_MASK |
2446                                         EEPROM_ADDR_READ);
2447         tw32(GRC_EEPROM_ADDR,
2448              tmp |
2449              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2450              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2451               EEPROM_ADDR_ADDR_MASK) |
2452              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2453
2454         for (i = 0; i < 1000; i++) {
2455                 tmp = tr32(GRC_EEPROM_ADDR);
2456
2457                 if (tmp & EEPROM_ADDR_COMPLETE)
2458                         break;
2459                 msleep(1);
2460         }
2461         if (!(tmp & EEPROM_ADDR_COMPLETE))
2462                 return -EBUSY;
2463
2464         tmp = tr32(GRC_EEPROM_DATA);
2465
2466         /*
2467          * The data will always be opposite the native endian
2468          * format.  Perform a blind byteswap to compensate.
2469          */
2470         *val = swab32(tmp);
2471
2472         return 0;
2473 }
2474
2475 #define NVRAM_CMD_TIMEOUT 10000
2476
2477 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2478 {
2479         int i;
2480
2481         tw32(NVRAM_CMD, nvram_cmd);
2482         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2483                 udelay(10);
2484                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2485                         udelay(10);
2486                         break;
2487                 }
2488         }
2489
2490         if (i == NVRAM_CMD_TIMEOUT)
2491                 return -EBUSY;
2492
2493         return 0;
2494 }
2495
2496 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2497 {
2498         if (tg3_flag(tp, NVRAM) &&
2499             tg3_flag(tp, NVRAM_BUFFERED) &&
2500             tg3_flag(tp, FLASH) &&
2501             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2502             (tp->nvram_jedecnum == JEDEC_ATMEL))
2503
2504                 addr = ((addr / tp->nvram_pagesize) <<
2505                         ATMEL_AT45DB0X1B_PAGE_POS) +
2506                        (addr % tp->nvram_pagesize);
2507
2508         return addr;
2509 }
2510
2511 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2512 {
2513         if (tg3_flag(tp, NVRAM) &&
2514             tg3_flag(tp, NVRAM_BUFFERED) &&
2515             tg3_flag(tp, FLASH) &&
2516             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2517             (tp->nvram_jedecnum == JEDEC_ATMEL))
2518
2519                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2520                         tp->nvram_pagesize) +
2521                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2522
2523         return addr;
2524 }
2525
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527  * the byteswapping settings for all other register accesses.
2528  * tg3 devices are BE devices, so on a BE machine, the data
2529  * returned will be exactly as it is seen in NVRAM.  On a LE
2530  * machine, the 32-bit value will be byteswapped.
2531  */
2532 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2533 {
2534         int ret;
2535
2536         if (!tg3_flag(tp, NVRAM))
2537                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2538
2539         offset = tg3_nvram_phys_addr(tp, offset);
2540
2541         if (offset > NVRAM_ADDR_MSK)
2542                 return -EINVAL;
2543
2544         ret = tg3_nvram_lock(tp);
2545         if (ret)
2546                 return ret;
2547
2548         tg3_enable_nvram_access(tp);
2549
2550         tw32(NVRAM_ADDR, offset);
2551         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2552                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2553
2554         if (ret == 0)
2555                 *val = tr32(NVRAM_RDDATA);
2556
2557         tg3_disable_nvram_access(tp);
2558
2559         tg3_nvram_unlock(tp);
2560
2561         return ret;
2562 }
2563
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2566 {
2567         u32 v;
2568         int res = tg3_nvram_read(tp, offset, &v);
2569         if (!res)
2570                 *val = cpu_to_be32(v);
2571         return res;
2572 }
2573
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2576 {
2577         u32 addr_high, addr_low;
2578         int i;
2579
2580         addr_high = ((tp->dev->dev_addr[0] << 8) |
2581                      tp->dev->dev_addr[1]);
2582         addr_low = ((tp->dev->dev_addr[2] << 24) |
2583                     (tp->dev->dev_addr[3] << 16) |
2584                     (tp->dev->dev_addr[4] <<  8) |
2585                     (tp->dev->dev_addr[5] <<  0));
2586         for (i = 0; i < 4; i++) {
2587                 if (i == 1 && skip_mac_1)
2588                         continue;
2589                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2590                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2591         }
2592
2593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2595                 for (i = 0; i < 12; i++) {
2596                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2597                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2598                 }
2599         }
2600
2601         addr_high = (tp->dev->dev_addr[0] +
2602                      tp->dev->dev_addr[1] +
2603                      tp->dev->dev_addr[2] +
2604                      tp->dev->dev_addr[3] +
2605                      tp->dev->dev_addr[4] +
2606                      tp->dev->dev_addr[5]) &
2607                 TX_BACKOFF_SEED_MASK;
2608         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2609 }
2610
2611 static void tg3_enable_register_access(struct tg3 *tp)
2612 {
2613         /*
2614          * Make sure register accesses (indirect or otherwise) will function
2615          * correctly.
2616          */
2617         pci_write_config_dword(tp->pdev,
2618                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2619 }
2620
2621 static int tg3_power_up(struct tg3 *tp)
2622 {
2623         tg3_enable_register_access(tp);
2624
2625         pci_set_power_state(tp->pdev, PCI_D0);
2626
2627         /* Switch out of Vaux if it is a NIC */
2628         if (tg3_flag(tp, IS_NIC))
2629                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2630
2631         return 0;
2632 }
2633
2634 static int tg3_power_down_prepare(struct tg3 *tp)
2635 {
2636         u32 misc_host_ctrl;
2637         bool device_should_wake, do_low_power;
2638
2639         tg3_enable_register_access(tp);
2640
2641         /* Restore the CLKREQ setting. */
2642         if (tg3_flag(tp, CLKREQ_BUG)) {
2643                 u16 lnkctl;
2644
2645                 pci_read_config_word(tp->pdev,
2646                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2647                                      &lnkctl);
2648                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2649                 pci_write_config_word(tp->pdev,
2650                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2651                                       lnkctl);
2652         }
2653
2654         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2655         tw32(TG3PCI_MISC_HOST_CTRL,
2656              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2657
2658         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2659                              tg3_flag(tp, WOL_ENABLE);
2660
2661         if (tg3_flag(tp, USE_PHYLIB)) {
2662                 do_low_power = false;
2663                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2664                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665                         struct phy_device *phydev;
2666                         u32 phyid, advertising;
2667
2668                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2669
2670                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2671
2672                         tp->link_config.orig_speed = phydev->speed;
2673                         tp->link_config.orig_duplex = phydev->duplex;
2674                         tp->link_config.orig_autoneg = phydev->autoneg;
2675                         tp->link_config.orig_advertising = phydev->advertising;
2676
2677                         advertising = ADVERTISED_TP |
2678                                       ADVERTISED_Pause |
2679                                       ADVERTISED_Autoneg |
2680                                       ADVERTISED_10baseT_Half;
2681
2682                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2683                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2684                                         advertising |=
2685                                                 ADVERTISED_100baseT_Half |
2686                                                 ADVERTISED_100baseT_Full |
2687                                                 ADVERTISED_10baseT_Full;
2688                                 else
2689                                         advertising |= ADVERTISED_10baseT_Full;
2690                         }
2691
2692                         phydev->advertising = advertising;
2693
2694                         phy_start_aneg(phydev);
2695
2696                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2697                         if (phyid != PHY_ID_BCMAC131) {
2698                                 phyid &= PHY_BCM_OUI_MASK;
2699                                 if (phyid == PHY_BCM_OUI_1 ||
2700                                     phyid == PHY_BCM_OUI_2 ||
2701                                     phyid == PHY_BCM_OUI_3)
2702                                         do_low_power = true;
2703                         }
2704                 }
2705         } else {
2706                 do_low_power = true;
2707
2708                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2709                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2710                         tp->link_config.orig_speed = tp->link_config.speed;
2711                         tp->link_config.orig_duplex = tp->link_config.duplex;
2712                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2713                 }
2714
2715                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2716                         tp->link_config.speed = SPEED_10;
2717                         tp->link_config.duplex = DUPLEX_HALF;
2718                         tp->link_config.autoneg = AUTONEG_ENABLE;
2719                         tg3_setup_phy(tp, 0);
2720                 }
2721         }
2722
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2724                 u32 val;
2725
2726                 val = tr32(GRC_VCPU_EXT_CTRL);
2727                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2728         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2729                 int i;
2730                 u32 val;
2731
2732                 for (i = 0; i < 200; i++) {
2733                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2734                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2735                                 break;
2736                         msleep(1);
2737                 }
2738         }
2739         if (tg3_flag(tp, WOL_CAP))
2740                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2741                                                      WOL_DRV_STATE_SHUTDOWN |
2742                                                      WOL_DRV_WOL |
2743                                                      WOL_SET_MAGIC_PKT);
2744
2745         if (device_should_wake) {
2746                 u32 mac_mode;
2747
2748                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2749                         if (do_low_power &&
2750                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2751                                 tg3_phy_auxctl_write(tp,
2752                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2753                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2754                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2755                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2756                                 udelay(40);
2757                         }
2758
2759                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2760                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2761                         else
2762                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2763
2764                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2765                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2766                             ASIC_REV_5700) {
2767                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2768                                              SPEED_100 : SPEED_10;
2769                                 if (tg3_5700_link_polarity(tp, speed))
2770                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2771                                 else
2772                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2773                         }
2774                 } else {
2775                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2776                 }
2777
2778                 if (!tg3_flag(tp, 5750_PLUS))
2779                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2780
2781                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2782                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2783                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2784                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2785
2786                 if (tg3_flag(tp, ENABLE_APE))
2787                         mac_mode |= MAC_MODE_APE_TX_EN |
2788                                     MAC_MODE_APE_RX_EN |
2789                                     MAC_MODE_TDE_ENABLE;
2790
2791                 tw32_f(MAC_MODE, mac_mode);
2792                 udelay(100);
2793
2794                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2795                 udelay(10);
2796         }
2797
2798         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2799             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2800              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2801                 u32 base_val;
2802
2803                 base_val = tp->pci_clock_ctrl;
2804                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2805                              CLOCK_CTRL_TXCLK_DISABLE);
2806
2807                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2808                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2809         } else if (tg3_flag(tp, 5780_CLASS) ||
2810                    tg3_flag(tp, CPMU_PRESENT) ||
2811                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2812                 /* do nothing */
2813         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2814                 u32 newbits1, newbits2;
2815
2816                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2817                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2818                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2819                                     CLOCK_CTRL_TXCLK_DISABLE |
2820                                     CLOCK_CTRL_ALTCLK);
2821                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2822                 } else if (tg3_flag(tp, 5705_PLUS)) {
2823                         newbits1 = CLOCK_CTRL_625_CORE;
2824                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2825                 } else {
2826                         newbits1 = CLOCK_CTRL_ALTCLK;
2827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2828                 }
2829
2830                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2831                             40);
2832
2833                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2834                             40);
2835
2836                 if (!tg3_flag(tp, 5705_PLUS)) {
2837                         u32 newbits3;
2838
2839                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2840                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2841                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2842                                             CLOCK_CTRL_TXCLK_DISABLE |
2843                                             CLOCK_CTRL_44MHZ_CORE);
2844                         } else {
2845                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2846                         }
2847
2848                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2849                                     tp->pci_clock_ctrl | newbits3, 40);
2850                 }
2851         }
2852
2853         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2854                 tg3_power_down_phy(tp, do_low_power);
2855
2856         tg3_frob_aux_power(tp);
2857
2858         /* Workaround for unstable PLL clock */
2859         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2860             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2861                 u32 val = tr32(0x7d00);
2862
2863                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2864                 tw32(0x7d00, val);
2865                 if (!tg3_flag(tp, ENABLE_ASF)) {
2866                         int err;
2867
2868                         err = tg3_nvram_lock(tp);
2869                         tg3_halt_cpu(tp, RX_CPU_BASE);
2870                         if (!err)
2871                                 tg3_nvram_unlock(tp);
2872                 }
2873         }
2874
2875         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2876
2877         return 0;
2878 }
2879
2880 static void tg3_power_down(struct tg3 *tp)
2881 {
2882         tg3_power_down_prepare(tp);
2883
2884         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2885         pci_set_power_state(tp->pdev, PCI_D3hot);
2886 }
2887
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2889 {
2890         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2891         case MII_TG3_AUX_STAT_10HALF:
2892                 *speed = SPEED_10;
2893                 *duplex = DUPLEX_HALF;
2894                 break;
2895
2896         case MII_TG3_AUX_STAT_10FULL:
2897                 *speed = SPEED_10;
2898                 *duplex = DUPLEX_FULL;
2899                 break;
2900
2901         case MII_TG3_AUX_STAT_100HALF:
2902                 *speed = SPEED_100;
2903                 *duplex = DUPLEX_HALF;
2904                 break;
2905
2906         case MII_TG3_AUX_STAT_100FULL:
2907                 *speed = SPEED_100;
2908                 *duplex = DUPLEX_FULL;
2909                 break;
2910
2911         case MII_TG3_AUX_STAT_1000HALF:
2912                 *speed = SPEED_1000;
2913                 *duplex = DUPLEX_HALF;
2914                 break;
2915
2916         case MII_TG3_AUX_STAT_1000FULL:
2917                 *speed = SPEED_1000;
2918                 *duplex = DUPLEX_FULL;
2919                 break;
2920
2921         default:
2922                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2924                                  SPEED_10;
2925                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2926                                   DUPLEX_HALF;
2927                         break;
2928                 }
2929                 *speed = SPEED_INVALID;
2930                 *duplex = DUPLEX_INVALID;
2931                 break;
2932         }
2933 }
2934
2935 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2936 {
2937         int err = 0;
2938         u32 val, new_adv;
2939
2940         new_adv = ADVERTISE_CSMA;
2941         if (advertise & ADVERTISED_10baseT_Half)
2942                 new_adv |= ADVERTISE_10HALF;
2943         if (advertise & ADVERTISED_10baseT_Full)
2944                 new_adv |= ADVERTISE_10FULL;
2945         if (advertise & ADVERTISED_100baseT_Half)
2946                 new_adv |= ADVERTISE_100HALF;
2947         if (advertise & ADVERTISED_100baseT_Full)
2948                 new_adv |= ADVERTISE_100FULL;
2949
2950         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2951
2952         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2953         if (err)
2954                 goto done;
2955
2956         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2957                 goto done;
2958
2959         new_adv = 0;
2960         if (advertise & ADVERTISED_1000baseT_Half)
2961                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2962         if (advertise & ADVERTISED_1000baseT_Full)
2963                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2964
2965         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2966             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2967                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2968                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2969
2970         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2971         if (err)
2972                 goto done;
2973
2974         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2975                 goto done;
2976
2977         tw32(TG3_CPMU_EEE_MODE,
2978              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2979
2980         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2981         if (!err) {
2982                 u32 err2;
2983
2984                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2985                 case ASIC_REV_5717:
2986                 case ASIC_REV_57765:
2987                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2988                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2989                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2990                         /* Fall through */
2991                 case ASIC_REV_5719:
2992                         val = MII_TG3_DSP_TAP26_ALNOKO |
2993                               MII_TG3_DSP_TAP26_RMRXSTO |
2994                               MII_TG3_DSP_TAP26_OPCSINPT;
2995                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2996                 }
2997
2998                 val = 0;
2999                 /* Advertise 100-BaseTX EEE ability */
3000                 if (advertise & ADVERTISED_100baseT_Full)
3001                         val |= MDIO_AN_EEE_ADV_100TX;
3002                 /* Advertise 1000-BaseT EEE ability */
3003                 if (advertise & ADVERTISED_1000baseT_Full)
3004                         val |= MDIO_AN_EEE_ADV_1000T;
3005                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3006
3007                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3008                 if (!err)
3009                         err = err2;
3010         }
3011
3012 done:
3013         return err;
3014 }
3015
3016 static void tg3_phy_copper_begin(struct tg3 *tp)
3017 {
3018         u32 new_adv;
3019         int i;
3020
3021         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3022                 new_adv = ADVERTISED_10baseT_Half |
3023                           ADVERTISED_10baseT_Full;
3024                 if (tg3_flag(tp, WOL_SPEED_100MB))
3025                         new_adv |= ADVERTISED_100baseT_Half |
3026                                    ADVERTISED_100baseT_Full;
3027
3028                 tg3_phy_autoneg_cfg(tp, new_adv,
3029                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3030         } else if (tp->link_config.speed == SPEED_INVALID) {
3031                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3032                         tp->link_config.advertising &=
3033                                 ~(ADVERTISED_1000baseT_Half |
3034                                   ADVERTISED_1000baseT_Full);
3035
3036                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3037                                     tp->link_config.flowctrl);
3038         } else {
3039                 /* Asking for a specific link mode. */
3040                 if (tp->link_config.speed == SPEED_1000) {
3041                         if (tp->link_config.duplex == DUPLEX_FULL)
3042                                 new_adv = ADVERTISED_1000baseT_Full;
3043                         else
3044                                 new_adv = ADVERTISED_1000baseT_Half;
3045                 } else if (tp->link_config.speed == SPEED_100) {
3046                         if (tp->link_config.duplex == DUPLEX_FULL)
3047                                 new_adv = ADVERTISED_100baseT_Full;
3048                         else
3049                                 new_adv = ADVERTISED_100baseT_Half;
3050                 } else {
3051                         if (tp->link_config.duplex == DUPLEX_FULL)
3052                                 new_adv = ADVERTISED_10baseT_Full;
3053                         else
3054                                 new_adv = ADVERTISED_10baseT_Half;
3055                 }
3056
3057                 tg3_phy_autoneg_cfg(tp, new_adv,
3058                                     tp->link_config.flowctrl);
3059         }
3060
3061         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3062             tp->link_config.speed != SPEED_INVALID) {
3063                 u32 bmcr, orig_bmcr;
3064
3065                 tp->link_config.active_speed = tp->link_config.speed;
3066                 tp->link_config.active_duplex = tp->link_config.duplex;
3067
3068                 bmcr = 0;
3069                 switch (tp->link_config.speed) {
3070                 default:
3071                 case SPEED_10:
3072                         break;
3073
3074                 case SPEED_100:
3075                         bmcr |= BMCR_SPEED100;
3076                         break;
3077
3078                 case SPEED_1000:
3079                         bmcr |= TG3_BMCR_SPEED1000;
3080                         break;
3081                 }
3082
3083                 if (tp->link_config.duplex == DUPLEX_FULL)
3084                         bmcr |= BMCR_FULLDPLX;
3085
3086                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3087                     (bmcr != orig_bmcr)) {
3088                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3089                         for (i = 0; i < 1500; i++) {
3090                                 u32 tmp;
3091
3092                                 udelay(10);
3093                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3094                                     tg3_readphy(tp, MII_BMSR, &tmp))
3095                                         continue;
3096                                 if (!(tmp & BMSR_LSTATUS)) {
3097                                         udelay(40);
3098                                         break;
3099                                 }
3100                         }
3101                         tg3_writephy(tp, MII_BMCR, bmcr);
3102                         udelay(40);
3103                 }
3104         } else {
3105                 tg3_writephy(tp, MII_BMCR,
3106                              BMCR_ANENABLE | BMCR_ANRESTART);
3107         }
3108 }
3109
3110 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3111 {
3112         int err;
3113
3114         /* Turn off tap power management. */
3115         /* Set Extended packet length bit */
3116         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3117
3118         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3119         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3120         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3121         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3122         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3123
3124         udelay(40);
3125
3126         return err;
3127 }
3128
3129 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3130 {
3131         u32 adv_reg, all_mask = 0;
3132
3133         if (mask & ADVERTISED_10baseT_Half)
3134                 all_mask |= ADVERTISE_10HALF;
3135         if (mask & ADVERTISED_10baseT_Full)
3136                 all_mask |= ADVERTISE_10FULL;
3137         if (mask & ADVERTISED_100baseT_Half)
3138                 all_mask |= ADVERTISE_100HALF;
3139         if (mask & ADVERTISED_100baseT_Full)
3140                 all_mask |= ADVERTISE_100FULL;
3141
3142         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3143                 return 0;
3144
3145         if ((adv_reg & all_mask) != all_mask)
3146                 return 0;
3147         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3148                 u32 tg3_ctrl;
3149
3150                 all_mask = 0;
3151                 if (mask & ADVERTISED_1000baseT_Half)
3152                         all_mask |= ADVERTISE_1000HALF;
3153                 if (mask & ADVERTISED_1000baseT_Full)
3154                         all_mask |= ADVERTISE_1000FULL;
3155
3156                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3157                         return 0;
3158
3159                 if ((tg3_ctrl & all_mask) != all_mask)
3160                         return 0;
3161         }
3162         return 1;
3163 }
3164
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3166 {
3167         u32 curadv, reqadv;
3168
3169         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3170                 return 1;
3171
3172         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3173         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3174
3175         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3176                 if (curadv != reqadv)
3177                         return 0;
3178
3179                 if (tg3_flag(tp, PAUSE_AUTONEG))
3180                         tg3_readphy(tp, MII_LPA, rmtadv);
3181         } else {
3182                 /* Reprogram the advertisement register, even if it
3183                  * does not affect the current link.  If the link
3184                  * gets renegotiated in the future, we can save an
3185                  * additional renegotiation cycle by advertising
3186                  * it correctly in the first place.
3187                  */
3188                 if (curadv != reqadv) {
3189                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3190                                      ADVERTISE_PAUSE_ASYM);
3191                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3192                 }
3193         }
3194
3195         return 1;
3196 }
3197
3198 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3199 {
3200         int current_link_up;
3201         u32 bmsr, val;
3202         u32 lcl_adv, rmt_adv;
3203         u16 current_speed;
3204         u8 current_duplex;
3205         int i, err;
3206
3207         tw32(MAC_EVENT, 0);
3208
3209         tw32_f(MAC_STATUS,
3210              (MAC_STATUS_SYNC_CHANGED |
3211               MAC_STATUS_CFG_CHANGED |
3212               MAC_STATUS_MI_COMPLETION |
3213               MAC_STATUS_LNKSTATE_CHANGED));
3214         udelay(40);
3215
3216         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3217                 tw32_f(MAC_MI_MODE,
3218                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3219                 udelay(80);
3220         }
3221
3222         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3223
3224         /* Some third-party PHYs need to be reset on link going
3225          * down.
3226          */
3227         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3228              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3229              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3230             netif_carrier_ok(tp->dev)) {
3231                 tg3_readphy(tp, MII_BMSR, &bmsr);
3232                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3233                     !(bmsr & BMSR_LSTATUS))
3234                         force_reset = 1;
3235         }
3236         if (force_reset)
3237                 tg3_phy_reset(tp);
3238
3239         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3240                 tg3_readphy(tp, MII_BMSR, &bmsr);
3241                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3242                     !tg3_flag(tp, INIT_COMPLETE))
3243                         bmsr = 0;
3244
3245                 if (!(bmsr & BMSR_LSTATUS)) {
3246                         err = tg3_init_5401phy_dsp(tp);
3247                         if (err)
3248                                 return err;
3249
3250                         tg3_readphy(tp, MII_BMSR, &bmsr);
3251                         for (i = 0; i < 1000; i++) {
3252                                 udelay(10);
3253                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3254                                     (bmsr & BMSR_LSTATUS)) {
3255                                         udelay(40);
3256                                         break;
3257                                 }
3258                         }
3259
3260                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3261                             TG3_PHY_REV_BCM5401_B0 &&
3262                             !(bmsr & BMSR_LSTATUS) &&
3263                             tp->link_config.active_speed == SPEED_1000) {
3264                                 err = tg3_phy_reset(tp);
3265                                 if (!err)
3266                                         err = tg3_init_5401phy_dsp(tp);
3267                                 if (err)
3268                                         return err;
3269                         }
3270                 }
3271         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3272                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3273                 /* 5701 {A0,B0} CRC bug workaround */
3274                 tg3_writephy(tp, 0x15, 0x0a75);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3277                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3278         }
3279
3280         /* Clear pending interrupts... */
3281         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3283
3284         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3285                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3286         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3287                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3288
3289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3290             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3291                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3292                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3293                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3294                 else
3295                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3296         }
3297
3298         current_link_up = 0;
3299         current_speed = SPEED_INVALID;
3300         current_duplex = DUPLEX_INVALID;
3301
3302         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3303                 err = tg3_phy_auxctl_read(tp,
3304                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3305                                           &val);
3306                 if (!err && !(val & (1 << 10))) {
3307                         tg3_phy_auxctl_write(tp,
3308                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309                                              val | (1 << 10));
3310                         goto relink;
3311                 }
3312         }
3313
3314         bmsr = 0;
3315         for (i = 0; i < 100; i++) {
3316                 tg3_readphy(tp, MII_BMSR, &bmsr);
3317                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3318                     (bmsr & BMSR_LSTATUS))
3319                         break;
3320                 udelay(40);
3321         }
3322
3323         if (bmsr & BMSR_LSTATUS) {
3324                 u32 aux_stat, bmcr;
3325
3326                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3327                 for (i = 0; i < 2000; i++) {
3328                         udelay(10);
3329                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3330                             aux_stat)
3331                                 break;
3332                 }
3333
3334                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3335                                              &current_speed,
3336                                              &current_duplex);
3337
3338                 bmcr = 0;
3339                 for (i = 0; i < 200; i++) {
3340                         tg3_readphy(tp, MII_BMCR, &bmcr);
3341                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3342                                 continue;
3343                         if (bmcr && bmcr != 0x7fff)
3344                                 break;
3345                         udelay(10);
3346                 }
3347
3348                 lcl_adv = 0;
3349                 rmt_adv = 0;
3350
3351                 tp->link_config.active_speed = current_speed;
3352                 tp->link_config.active_duplex = current_duplex;
3353
3354                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3355                         if ((bmcr & BMCR_ANENABLE) &&
3356                             tg3_copper_is_advertising_all(tp,
3357                                                 tp->link_config.advertising)) {
3358                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3359                                                                   &rmt_adv))
3360                                         current_link_up = 1;
3361                         }
3362                 } else {
3363                         if (!(bmcr & BMCR_ANENABLE) &&
3364                             tp->link_config.speed == current_speed &&
3365                             tp->link_config.duplex == current_duplex &&
3366                             tp->link_config.flowctrl ==
3367                             tp->link_config.active_flowctrl) {
3368                                 current_link_up = 1;
3369                         }
3370                 }
3371
3372                 if (current_link_up == 1 &&
3373                     tp->link_config.active_duplex == DUPLEX_FULL)
3374                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3375         }
3376
3377 relink:
3378         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3379                 tg3_phy_copper_begin(tp);
3380
3381                 tg3_readphy(tp, MII_BMSR, &bmsr);
3382                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3383                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3384                         current_link_up = 1;
3385         }
3386
3387         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3388         if (current_link_up == 1) {
3389                 if (tp->link_config.active_speed == SPEED_100 ||
3390                     tp->link_config.active_speed == SPEED_10)
3391                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3392                 else
3393                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3394         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3395                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3396         else
3397                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398
3399         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3400         if (tp->link_config.active_duplex == DUPLEX_HALF)
3401                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3402
3403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3404                 if (current_link_up == 1 &&
3405                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3406                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3407                 else
3408                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3409         }
3410
3411         /* ??? Without this setting Netgear GA302T PHY does not
3412          * ??? send/receive packets...
3413          */
3414         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3415             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3416                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3417                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3418                 udelay(80);
3419         }
3420
3421         tw32_f(MAC_MODE, tp->mac_mode);
3422         udelay(40);
3423
3424         tg3_phy_eee_adjust(tp, current_link_up);
3425
3426         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3427                 /* Polled via timer. */
3428                 tw32_f(MAC_EVENT, 0);
3429         } else {
3430                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3431         }
3432         udelay(40);
3433
3434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3435             current_link_up == 1 &&
3436             tp->link_config.active_speed == SPEED_1000 &&
3437             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3438                 udelay(120);
3439                 tw32_f(MAC_STATUS,
3440                      (MAC_STATUS_SYNC_CHANGED |
3441                       MAC_STATUS_CFG_CHANGED));
3442                 udelay(40);
3443                 tg3_write_mem(tp,
3444                               NIC_SRAM_FIRMWARE_MBOX,
3445                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3446         }
3447
3448         /* Prevent send BD corruption. */
3449         if (tg3_flag(tp, CLKREQ_BUG)) {
3450                 u16 oldlnkctl, newlnkctl;
3451
3452                 pci_read_config_word(tp->pdev,
3453                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3454                                      &oldlnkctl);
3455                 if (tp->link_config.active_speed == SPEED_100 ||
3456                     tp->link_config.active_speed == SPEED_10)
3457                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3458                 else
3459                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3460                 if (newlnkctl != oldlnkctl)
3461                         pci_write_config_word(tp->pdev,
3462                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3463                                               newlnkctl);
3464         }
3465
3466         if (current_link_up != netif_carrier_ok(tp->dev)) {
3467                 if (current_link_up)
3468                         netif_carrier_on(tp->dev);
3469                 else
3470                         netif_carrier_off(tp->dev);
3471                 tg3_link_report(tp);
3472         }
3473
3474         return 0;
3475 }
3476
3477 struct tg3_fiber_aneginfo {
3478         int state;
3479 #define ANEG_STATE_UNKNOWN              0
3480 #define ANEG_STATE_AN_ENABLE            1
3481 #define ANEG_STATE_RESTART_INIT         2
3482 #define ANEG_STATE_RESTART              3
3483 #define ANEG_STATE_DISABLE_LINK_OK      4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3485 #define ANEG_STATE_ABILITY_DETECT       6
3486 #define ANEG_STATE_ACK_DETECT_INIT      7
3487 #define ANEG_STATE_ACK_DETECT           8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3489 #define ANEG_STATE_COMPLETE_ACK         10
3490 #define ANEG_STATE_IDLE_DETECT_INIT     11
3491 #define ANEG_STATE_IDLE_DETECT          12
3492 #define ANEG_STATE_LINK_OK              13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3495
3496         u32 flags;
3497 #define MR_AN_ENABLE            0x00000001
3498 #define MR_RESTART_AN           0x00000002
3499 #define MR_AN_COMPLETE          0x00000004
3500 #define MR_PAGE_RX              0x00000008
3501 #define MR_NP_LOADED            0x00000010
3502 #define MR_TOGGLE_TX            0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3510 #define MR_TOGGLE_RX            0x00002000
3511 #define MR_NP_RX                0x00004000
3512
3513 #define MR_LINK_OK              0x80000000
3514
3515         unsigned long link_time, cur_time;
3516
3517         u32 ability_match_cfg;
3518         int ability_match_count;
3519
3520         char ability_match, idle_match, ack_match;
3521
3522         u32 txconfig, rxconfig;
3523 #define ANEG_CFG_NP             0x00000080
3524 #define ANEG_CFG_ACK            0x00000040
3525 #define ANEG_CFG_RF2            0x00000020
3526 #define ANEG_CFG_RF1            0x00000010
3527 #define ANEG_CFG_PS2            0x00000001
3528 #define ANEG_CFG_PS1            0x00008000
3529 #define ANEG_CFG_HD             0x00004000
3530 #define ANEG_CFG_FD             0x00002000
3531 #define ANEG_CFG_INVAL          0x00001f06
3532
3533 };
3534 #define ANEG_OK         0
3535 #define ANEG_DONE       1
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED     -1
3538
3539 #define ANEG_STATE_SETTLE_TIME  10000
3540
3541 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3542                                    struct tg3_fiber_aneginfo *ap)
3543 {
3544         u16 flowctrl;
3545         unsigned long delta;
3546         u32 rx_cfg_reg;
3547         int ret;
3548
3549         if (ap->state == ANEG_STATE_UNKNOWN) {
3550                 ap->rxconfig = 0;
3551                 ap->link_time = 0;
3552                 ap->cur_time = 0;
3553                 ap->ability_match_cfg = 0;
3554                 ap->ability_match_count = 0;
3555                 ap->ability_match = 0;
3556                 ap->idle_match = 0;
3557                 ap->ack_match = 0;
3558         }
3559         ap->cur_time++;
3560
3561         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3562                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3563
3564                 if (rx_cfg_reg != ap->ability_match_cfg) {
3565                         ap->ability_match_cfg = rx_cfg_reg;
3566                         ap->ability_match = 0;
3567                         ap->ability_match_count = 0;
3568                 } else {
3569                         if (++ap->ability_match_count > 1) {
3570                                 ap->ability_match = 1;
3571                                 ap->ability_match_cfg = rx_cfg_reg;
3572                         }
3573                 }
3574                 if (rx_cfg_reg & ANEG_CFG_ACK)
3575                         ap->ack_match = 1;
3576                 else
3577                         ap->ack_match = 0;
3578
3579                 ap->idle_match = 0;
3580         } else {
3581                 ap->idle_match = 1;
3582                 ap->ability_match_cfg = 0;
3583                 ap->ability_match_count = 0;
3584                 ap->ability_match = 0;
3585                 ap->ack_match = 0;
3586
3587                 rx_cfg_reg = 0;
3588         }
3589
3590         ap->rxconfig = rx_cfg_reg;
3591         ret = ANEG_OK;
3592
3593         switch (ap->state) {
3594         case ANEG_STATE_UNKNOWN:
3595                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3596                         ap->state = ANEG_STATE_AN_ENABLE;
3597
3598                 /* fallthru */
3599         case ANEG_STATE_AN_ENABLE:
3600                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3601                 if (ap->flags & MR_AN_ENABLE) {
3602                         ap->link_time = 0;
3603                         ap->cur_time = 0;
3604                         ap->ability_match_cfg = 0;
3605                         ap->ability_match_count = 0;
3606                         ap->ability_match = 0;
3607                         ap->idle_match = 0;
3608                         ap->ack_match = 0;
3609
3610                         ap->state = ANEG_STATE_RESTART_INIT;
3611                 } else {
3612                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3613                 }
3614                 break;
3615
3616         case ANEG_STATE_RESTART_INIT:
3617                 ap->link_time = ap->cur_time;
3618                 ap->flags &= ~(MR_NP_LOADED);
3619                 ap->txconfig = 0;
3620                 tw32(MAC_TX_AUTO_NEG, 0);
3621                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3622                 tw32_f(MAC_MODE, tp->mac_mode);
3623                 udelay(40);
3624
3625                 ret = ANEG_TIMER_ENAB;
3626                 ap->state = ANEG_STATE_RESTART;
3627
3628                 /* fallthru */
3629         case ANEG_STATE_RESTART:
3630                 delta = ap->cur_time - ap->link_time;
3631                 if (delta > ANEG_STATE_SETTLE_TIME)
3632                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3633                 else
3634                         ret = ANEG_TIMER_ENAB;
3635                 break;
3636
3637         case ANEG_STATE_DISABLE_LINK_OK:
3638                 ret = ANEG_DONE;
3639                 break;
3640
3641         case ANEG_STATE_ABILITY_DETECT_INIT:
3642                 ap->flags &= ~(MR_TOGGLE_TX);
3643                 ap->txconfig = ANEG_CFG_FD;
3644                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3645                 if (flowctrl & ADVERTISE_1000XPAUSE)
3646                         ap->txconfig |= ANEG_CFG_PS1;
3647                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3648                         ap->txconfig |= ANEG_CFG_PS2;
3649                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3650                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3651                 tw32_f(MAC_MODE, tp->mac_mode);
3652                 udelay(40);
3653
3654                 ap->state = ANEG_STATE_ABILITY_DETECT;
3655                 break;
3656
3657         case ANEG_STATE_ABILITY_DETECT:
3658                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3659                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3660                 break;
3661
3662         case ANEG_STATE_ACK_DETECT_INIT:
3663                 ap->txconfig |= ANEG_CFG_ACK;
3664                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3665                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3666                 tw32_f(MAC_MODE, tp->mac_mode);
3667                 udelay(40);
3668
3669                 ap->state = ANEG_STATE_ACK_DETECT;
3670
3671                 /* fallthru */
3672         case ANEG_STATE_ACK_DETECT:
3673                 if (ap->ack_match != 0) {
3674                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3675                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3676                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3677                         } else {
3678                                 ap->state = ANEG_STATE_AN_ENABLE;
3679                         }
3680                 } else if (ap->ability_match != 0 &&
3681                            ap->rxconfig == 0) {
3682                         ap->state = ANEG_STATE_AN_ENABLE;
3683                 }
3684                 break;
3685
3686         case ANEG_STATE_COMPLETE_ACK_INIT:
3687                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3688                         ret = ANEG_FAILED;
3689                         break;
3690                 }
3691                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3692                                MR_LP_ADV_HALF_DUPLEX |
3693                                MR_LP_ADV_SYM_PAUSE |
3694                                MR_LP_ADV_ASYM_PAUSE |
3695                                MR_LP_ADV_REMOTE_FAULT1 |
3696                                MR_LP_ADV_REMOTE_FAULT2 |
3697                                MR_LP_ADV_NEXT_PAGE |
3698                                MR_TOGGLE_RX |
3699                                MR_NP_RX);
3700                 if (ap->rxconfig & ANEG_CFG_FD)
3701                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3702                 if (ap->rxconfig & ANEG_CFG_HD)
3703                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3704                 if (ap->rxconfig & ANEG_CFG_PS1)
3705                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3706                 if (ap->rxconfig & ANEG_CFG_PS2)
3707                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3708                 if (ap->rxconfig & ANEG_CFG_RF1)
3709                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3710                 if (ap->rxconfig & ANEG_CFG_RF2)
3711                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3712                 if (ap->rxconfig & ANEG_CFG_NP)
3713                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3714
3715                 ap->link_time = ap->cur_time;
3716
3717                 ap->flags ^= (MR_TOGGLE_TX);
3718                 if (ap->rxconfig & 0x0008)
3719                         ap->flags |= MR_TOGGLE_RX;
3720                 if (ap->rxconfig & ANEG_CFG_NP)
3721                         ap->flags |= MR_NP_RX;
3722                 ap->flags |= MR_PAGE_RX;
3723
3724                 ap->state = ANEG_STATE_COMPLETE_ACK;
3725                 ret = ANEG_TIMER_ENAB;
3726                 break;
3727
3728         case ANEG_STATE_COMPLETE_ACK:
3729                 if (ap->ability_match != 0 &&
3730                     ap->rxconfig == 0) {
3731                         ap->state = ANEG_STATE_AN_ENABLE;
3732                         break;
3733                 }
3734                 delta = ap->cur_time - ap->link_time;
3735                 if (delta > ANEG_STATE_SETTLE_TIME) {
3736                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3737                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3738                         } else {
3739                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3740                                     !(ap->flags & MR_NP_RX)) {
3741                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3742                                 } else {
3743                                         ret = ANEG_FAILED;
3744                                 }
3745                         }
3746                 }
3747                 break;
3748
3749         case ANEG_STATE_IDLE_DETECT_INIT:
3750                 ap->link_time = ap->cur_time;
3751                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3752                 tw32_f(MAC_MODE, tp->mac_mode);
3753                 udelay(40);
3754
3755                 ap->state = ANEG_STATE_IDLE_DETECT;
3756                 ret = ANEG_TIMER_ENAB;
3757                 break;
3758
3759         case ANEG_STATE_IDLE_DETECT:
3760                 if (ap->ability_match != 0 &&
3761                     ap->rxconfig == 0) {
3762                         ap->state = ANEG_STATE_AN_ENABLE;
3763                         break;
3764                 }
3765                 delta = ap->cur_time - ap->link_time;
3766                 if (delta > ANEG_STATE_SETTLE_TIME) {
3767                         /* XXX another gem from the Broadcom driver :( */
3768                         ap->state = ANEG_STATE_LINK_OK;
3769                 }
3770                 break;
3771
3772         case ANEG_STATE_LINK_OK:
3773                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3774                 ret = ANEG_DONE;
3775                 break;
3776
3777         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3778                 /* ??? unimplemented */
3779                 break;
3780
3781         case ANEG_STATE_NEXT_PAGE_WAIT:
3782                 /* ??? unimplemented */
3783                 break;
3784
3785         default:
3786                 ret = ANEG_FAILED;
3787                 break;
3788         }
3789
3790         return ret;
3791 }
3792
3793 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3794 {
3795         int res = 0;
3796         struct tg3_fiber_aneginfo aninfo;
3797         int status = ANEG_FAILED;
3798         unsigned int tick;
3799         u32 tmp;
3800
3801         tw32_f(MAC_TX_AUTO_NEG, 0);
3802
3803         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3804         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3805         udelay(40);
3806
3807         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3808         udelay(40);
3809
3810         memset(&aninfo, 0, sizeof(aninfo));
3811         aninfo.flags |= MR_AN_ENABLE;
3812         aninfo.state = ANEG_STATE_UNKNOWN;
3813         aninfo.cur_time = 0;
3814         tick = 0;
3815         while (++tick < 195000) {
3816                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3817                 if (status == ANEG_DONE || status == ANEG_FAILED)
3818                         break;
3819
3820                 udelay(1);
3821         }
3822
3823         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3824         tw32_f(MAC_MODE, tp->mac_mode);
3825         udelay(40);
3826
3827         *txflags = aninfo.txconfig;
3828         *rxflags = aninfo.flags;
3829
3830         if (status == ANEG_DONE &&
3831             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3832                              MR_LP_ADV_FULL_DUPLEX)))
3833                 res = 1;
3834
3835         return res;
3836 }
3837
3838 static void tg3_init_bcm8002(struct tg3 *tp)
3839 {
3840         u32 mac_status = tr32(MAC_STATUS);
3841         int i;
3842
3843         /* Reset when initting first time or we have a link. */
3844         if (tg3_flag(tp, INIT_COMPLETE) &&
3845             !(mac_status & MAC_STATUS_PCS_SYNCED))
3846                 return;
3847
3848         /* Set PLL lock range. */
3849         tg3_writephy(tp, 0x16, 0x8007);
3850
3851         /* SW reset */
3852         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3853
3854         /* Wait for reset to complete. */
3855         /* XXX schedule_timeout() ... */
3856         for (i = 0; i < 500; i++)
3857                 udelay(10);
3858
3859         /* Config mode; select PMA/Ch 1 regs. */
3860         tg3_writephy(tp, 0x10, 0x8411);
3861
3862         /* Enable auto-lock and comdet, select txclk for tx. */
3863         tg3_writephy(tp, 0x11, 0x0a10);
3864
3865         tg3_writephy(tp, 0x18, 0x00a0);
3866         tg3_writephy(tp, 0x16, 0x41ff);
3867
3868         /* Assert and deassert POR. */
3869         tg3_writephy(tp, 0x13, 0x0400);
3870         udelay(40);
3871         tg3_writephy(tp, 0x13, 0x0000);
3872
3873         tg3_writephy(tp, 0x11, 0x0a50);
3874         udelay(40);
3875         tg3_writephy(tp, 0x11, 0x0a10);
3876
3877         /* Wait for signal to stabilize */
3878         /* XXX schedule_timeout() ... */
3879         for (i = 0; i < 15000; i++)
3880                 udelay(10);
3881
3882         /* Deselect the channel register so we can read the PHYID
3883          * later.
3884          */
3885         tg3_writephy(tp, 0x10, 0x8011);
3886 }
3887
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3889 {
3890         u16 flowctrl;
3891         u32 sg_dig_ctrl, sg_dig_status;
3892         u32 serdes_cfg, expected_sg_dig_ctrl;
3893         int workaround, port_a;
3894         int current_link_up;
3895
3896         serdes_cfg = 0;
3897         expected_sg_dig_ctrl = 0;
3898         workaround = 0;
3899         port_a = 1;
3900         current_link_up = 0;
3901
3902         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3903             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3904                 workaround = 1;
3905                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3906                         port_a = 0;
3907
3908                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909                 /* preserve bits 20-23 for voltage regulator */
3910                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3911         }
3912
3913         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3914
3915         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3916                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3917                         if (workaround) {
3918                                 u32 val = serdes_cfg;
3919
3920                                 if (port_a)
3921                                         val |= 0xc010000;
3922                                 else
3923                                         val |= 0x4010000;
3924                                 tw32_f(MAC_SERDES_CFG, val);
3925                         }
3926
3927                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3928                 }
3929                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3930                         tg3_setup_flow_control(tp, 0, 0);
3931                         current_link_up = 1;
3932                 }
3933                 goto out;
3934         }
3935
3936         /* Want auto-negotiation.  */
3937         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3938
3939         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3940         if (flowctrl & ADVERTISE_1000XPAUSE)
3941                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3942         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3943                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3944
3945         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3946                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3947                     tp->serdes_counter &&
3948                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3949                                     MAC_STATUS_RCVD_CFG)) ==
3950                      MAC_STATUS_PCS_SYNCED)) {
3951                         tp->serdes_counter--;
3952                         current_link_up = 1;
3953                         goto out;
3954                 }
3955 restart_autoneg:
3956                 if (workaround)
3957                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3958                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3959                 udelay(5);
3960                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3961
3962                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3963                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3964         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3965                                  MAC_STATUS_SIGNAL_DET)) {
3966                 sg_dig_status = tr32(SG_DIG_STATUS);
3967                 mac_status = tr32(MAC_STATUS);
3968
3969                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3970                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3971                         u32 local_adv = 0, remote_adv = 0;
3972
3973                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3974                                 local_adv |= ADVERTISE_1000XPAUSE;
3975                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3976                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3977
3978                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3979                                 remote_adv |= LPA_1000XPAUSE;
3980                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3981                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3982
3983                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3984                         current_link_up = 1;
3985                         tp->serdes_counter = 0;
3986                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3988                         if (tp->serdes_counter)
3989                                 tp->serdes_counter--;
3990                         else {
3991                                 if (workaround) {
3992                                         u32 val = serdes_cfg;
3993
3994                                         if (port_a)
3995                                                 val |= 0xc010000;
3996                                         else
3997                                                 val |= 0x4010000;
3998
3999                                         tw32_f(MAC_SERDES_CFG, val);
4000                                 }
4001
4002                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4003                                 udelay(40);
4004
4005                                 /* Link parallel detection - link is up */
4006                                 /* only if we have PCS_SYNC and not */
4007                                 /* receiving config code words */
4008                                 mac_status = tr32(MAC_STATUS);
4009                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4010                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4011                                         tg3_setup_flow_control(tp, 0, 0);
4012                                         current_link_up = 1;
4013                                         tp->phy_flags |=
4014                                                 TG3_PHYFLG_PARALLEL_DETECT;
4015                                         tp->serdes_counter =
4016                                                 SERDES_PARALLEL_DET_TIMEOUT;
4017                                 } else
4018                                         goto restart_autoneg;
4019                         }
4020                 }
4021         } else {
4022                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4023                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4024         }
4025
4026 out:
4027         return current_link_up;
4028 }
4029
4030 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4031 {
4032         int current_link_up = 0;
4033
4034         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4035                 goto out;
4036
4037         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4038                 u32 txflags, rxflags;
4039                 int i;
4040
4041                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4042                         u32 local_adv = 0, remote_adv = 0;
4043
4044                         if (txflags & ANEG_CFG_PS1)
4045                                 local_adv |= ADVERTISE_1000XPAUSE;
4046                         if (txflags & ANEG_CFG_PS2)
4047                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4048
4049                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4050                                 remote_adv |= LPA_1000XPAUSE;
4051                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4052                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4053
4054                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4055
4056                         current_link_up = 1;
4057                 }
4058                 for (i = 0; i < 30; i++) {
4059                         udelay(20);
4060                         tw32_f(MAC_STATUS,
4061                                (MAC_STATUS_SYNC_CHANGED |
4062                                 MAC_STATUS_CFG_CHANGED));
4063                         udelay(40);
4064                         if ((tr32(MAC_STATUS) &
4065                              (MAC_STATUS_SYNC_CHANGED |
4066                               MAC_STATUS_CFG_CHANGED)) == 0)
4067                                 break;
4068                 }
4069
4070                 mac_status = tr32(MAC_STATUS);
4071                 if (current_link_up == 0 &&
4072                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4073                     !(mac_status & MAC_STATUS_RCVD_CFG))
4074                         current_link_up = 1;
4075         } else {
4076                 tg3_setup_flow_control(tp, 0, 0);
4077
4078                 /* Forcing 1000FD link up. */
4079                 current_link_up = 1;
4080
4081                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4082                 udelay(40);
4083
4084                 tw32_f(MAC_MODE, tp->mac_mode);
4085                 udelay(40);
4086         }
4087
4088 out:
4089         return current_link_up;
4090 }
4091
4092 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4093 {
4094         u32 orig_pause_cfg;
4095         u16 orig_active_speed;
4096         u8 orig_active_duplex;
4097         u32 mac_status;
4098         int current_link_up;
4099         int i;
4100
4101         orig_pause_cfg = tp->link_config.active_flowctrl;
4102         orig_active_speed = tp->link_config.active_speed;
4103         orig_active_duplex = tp->link_config.active_duplex;
4104
4105         if (!tg3_flag(tp, HW_AUTONEG) &&
4106             netif_carrier_ok(tp->dev) &&
4107             tg3_flag(tp, INIT_COMPLETE)) {
4108                 mac_status = tr32(MAC_STATUS);
4109                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4110                                MAC_STATUS_SIGNAL_DET |
4111                                MAC_STATUS_CFG_CHANGED |
4112                                MAC_STATUS_RCVD_CFG);
4113                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4114                                    MAC_STATUS_SIGNAL_DET)) {
4115                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4116                                             MAC_STATUS_CFG_CHANGED));
4117                         return 0;
4118                 }
4119         }
4120
4121         tw32_f(MAC_TX_AUTO_NEG, 0);
4122
4123         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4124         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4125         tw32_f(MAC_MODE, tp->mac_mode);
4126         udelay(40);
4127
4128         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4129                 tg3_init_bcm8002(tp);
4130
4131         /* Enable link change event even when serdes polling.  */
4132         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4133         udelay(40);
4134
4135         current_link_up = 0;
4136         mac_status = tr32(MAC_STATUS);
4137
4138         if (tg3_flag(tp, HW_AUTONEG))
4139                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4140         else
4141                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4142
4143         tp->napi[0].hw_status->status =
4144                 (SD_STATUS_UPDATED |
4145                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4146
4147         for (i = 0; i < 100; i++) {
4148                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4149                                     MAC_STATUS_CFG_CHANGED));
4150                 udelay(5);
4151                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4152                                          MAC_STATUS_CFG_CHANGED |
4153                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4154                         break;
4155         }
4156
4157         mac_status = tr32(MAC_STATUS);
4158         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4159                 current_link_up = 0;
4160                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4161                     tp->serdes_counter == 0) {
4162                         tw32_f(MAC_MODE, (tp->mac_mode |
4163                                           MAC_MODE_SEND_CONFIGS));
4164                         udelay(1);
4165                         tw32_f(MAC_MODE, tp->mac_mode);
4166                 }
4167         }
4168
4169         if (current_link_up == 1) {
4170                 tp->link_config.active_speed = SPEED_1000;
4171                 tp->link_config.active_duplex = DUPLEX_FULL;
4172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173                                     LED_CTRL_LNKLED_OVERRIDE |
4174                                     LED_CTRL_1000MBPS_ON));
4175         } else {
4176                 tp->link_config.active_speed = SPEED_INVALID;
4177                 tp->link_config.active_duplex = DUPLEX_INVALID;
4178                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4179                                     LED_CTRL_LNKLED_OVERRIDE |
4180                                     LED_CTRL_TRAFFIC_OVERRIDE));
4181         }
4182
4183         if (current_link_up != netif_carrier_ok(tp->dev)) {
4184                 if (current_link_up)
4185                         netif_carrier_on(tp->dev);
4186                 else
4187                         netif_carrier_off(tp->dev);
4188                 tg3_link_report(tp);
4189         } else {
4190                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4191                 if (orig_pause_cfg != now_pause_cfg ||
4192                     orig_active_speed != tp->link_config.active_speed ||
4193                     orig_active_duplex != tp->link_config.active_duplex)
4194                         tg3_link_report(tp);
4195         }
4196
4197         return 0;
4198 }
4199
4200 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4201 {
4202         int current_link_up, err = 0;
4203         u32 bmsr, bmcr;
4204         u16 current_speed;
4205         u8 current_duplex;
4206         u32 local_adv, remote_adv;
4207
4208         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4209         tw32_f(MAC_MODE, tp->mac_mode);
4210         udelay(40);
4211
4212         tw32(MAC_EVENT, 0);
4213
4214         tw32_f(MAC_STATUS,
4215              (MAC_STATUS_SYNC_CHANGED |
4216               MAC_STATUS_CFG_CHANGED |
4217               MAC_STATUS_MI_COMPLETION |
4218               MAC_STATUS_LNKSTATE_CHANGED));
4219         udelay(40);
4220
4221         if (force_reset)
4222                 tg3_phy_reset(tp);
4223
4224         current_link_up = 0;
4225         current_speed = SPEED_INVALID;
4226         current_duplex = DUPLEX_INVALID;
4227
4228         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4231                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4232                         bmsr |= BMSR_LSTATUS;
4233                 else
4234                         bmsr &= ~BMSR_LSTATUS;
4235         }
4236
4237         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4238
4239         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4240             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4241                 /* do nothing, just check for link up at the end */
4242         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4243                 u32 adv, new_adv;
4244
4245                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4246                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4247                                   ADVERTISE_1000XPAUSE |
4248                                   ADVERTISE_1000XPSE_ASYM |
4249                                   ADVERTISE_SLCT);
4250
4251                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4252
4253                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4254                         new_adv |= ADVERTISE_1000XHALF;
4255                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4256                         new_adv |= ADVERTISE_1000XFULL;
4257
4258                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4259                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4260                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4261                         tg3_writephy(tp, MII_BMCR, bmcr);
4262
4263                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4264                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4265                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266
4267                         return err;
4268                 }
4269         } else {
4270                 u32 new_bmcr;
4271
4272                 bmcr &= ~BMCR_SPEED1000;
4273                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4274
4275                 if (tp->link_config.duplex == DUPLEX_FULL)
4276                         new_bmcr |= BMCR_FULLDPLX;
4277
4278                 if (new_bmcr != bmcr) {
4279                         /* BMCR_SPEED1000 is a reserved bit that needs
4280                          * to be set on write.
4281                          */
4282                         new_bmcr |= BMCR_SPEED1000;
4283
4284                         /* Force a linkdown */
4285                         if (netif_carrier_ok(tp->dev)) {
4286                                 u32 adv;
4287
4288                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4289                                 adv &= ~(ADVERTISE_1000XFULL |
4290                                          ADVERTISE_1000XHALF |
4291                                          ADVERTISE_SLCT);
4292                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4293                                 tg3_writephy(tp, MII_BMCR, bmcr |
4294                                                            BMCR_ANRESTART |
4295                                                            BMCR_ANENABLE);
4296                                 udelay(10);
4297                                 netif_carrier_off(tp->dev);
4298                         }
4299                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4300                         bmcr = new_bmcr;
4301                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4303                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4304                             ASIC_REV_5714) {
4305                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4306                                         bmsr |= BMSR_LSTATUS;
4307                                 else
4308                                         bmsr &= ~BMSR_LSTATUS;
4309                         }
4310                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4311                 }
4312         }
4313
4314         if (bmsr & BMSR_LSTATUS) {
4315                 current_speed = SPEED_1000;
4316                 current_link_up = 1;
4317                 if (bmcr & BMCR_FULLDPLX)
4318                         current_duplex = DUPLEX_FULL;
4319                 else
4320                         current_duplex = DUPLEX_HALF;
4321
4322                 local_adv = 0;
4323                 remote_adv = 0;
4324
4325                 if (bmcr & BMCR_ANENABLE) {
4326                         u32 common;
4327
4328                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4329                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4330                         common = local_adv & remote_adv;
4331                         if (common & (ADVERTISE_1000XHALF |
4332                                       ADVERTISE_1000XFULL)) {
4333                                 if (common & ADVERTISE_1000XFULL)
4334                                         current_duplex = DUPLEX_FULL;
4335                                 else
4336                                         current_duplex = DUPLEX_HALF;
4337                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4338                                 /* Link is up via parallel detect */
4339                         } else {
4340                                 current_link_up = 0;
4341                         }
4342                 }
4343         }
4344
4345         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4346                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4347
4348         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4349         if (tp->link_config.active_duplex == DUPLEX_HALF)
4350                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4351
4352         tw32_f(MAC_MODE, tp->mac_mode);
4353         udelay(40);
4354
4355         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4356
4357         tp->link_config.active_speed = current_speed;
4358         tp->link_config.active_duplex = current_duplex;
4359
4360         if (current_link_up != netif_carrier_ok(tp->dev)) {
4361                 if (current_link_up)
4362                         netif_carrier_on(tp->dev);
4363                 else {
4364                         netif_carrier_off(tp->dev);
4365                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4366                 }
4367                 tg3_link_report(tp);
4368         }
4369         return err;
4370 }
4371
4372 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4373 {
4374         if (tp->serdes_counter) {
4375                 /* Give autoneg time to complete. */
4376                 tp->serdes_counter--;
4377                 return;
4378         }
4379
4380         if (!netif_carrier_ok(tp->dev) &&
4381             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4382                 u32 bmcr;
4383
4384                 tg3_readphy(tp, MII_BMCR, &bmcr);
4385                 if (bmcr & BMCR_ANENABLE) {
4386                         u32 phy1, phy2;
4387
4388                         /* Select shadow register 0x1f */
4389                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4390                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4391
4392                         /* Select expansion interrupt status register */
4393                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4394                                          MII_TG3_DSP_EXP1_INT_STAT);
4395                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4397
4398                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4399                                 /* We have signal detect and not receiving
4400                                  * config code words, link is up by parallel
4401                                  * detection.
4402                                  */
4403
4404                                 bmcr &= ~BMCR_ANENABLE;
4405                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4406                                 tg3_writephy(tp, MII_BMCR, bmcr);
4407                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4408                         }
4409                 }
4410         } else if (netif_carrier_ok(tp->dev) &&
4411                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4412                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4413                 u32 phy2;
4414
4415                 /* Select expansion interrupt status register */
4416                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4417                                  MII_TG3_DSP_EXP1_INT_STAT);
4418                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4419                 if (phy2 & 0x20) {
4420                         u32 bmcr;
4421
4422                         /* Config code words received, turn on autoneg. */
4423                         tg3_readphy(tp, MII_BMCR, &bmcr);
4424                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4425
4426                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427
4428                 }
4429         }
4430 }
4431
4432 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4433 {
4434         u32 val;
4435         int err;
4436
4437         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4438                 err = tg3_setup_fiber_phy(tp, force_reset);
4439         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4440                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4441         else
4442                 err = tg3_setup_copper_phy(tp, force_reset);
4443
4444         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4445                 u32 scale;
4446
4447                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4448                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4449                         scale = 65;
4450                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4451                         scale = 6;
4452                 else
4453                         scale = 12;
4454
4455                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4456                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4457                 tw32(GRC_MISC_CFG, val);
4458         }
4459
4460         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4461               (6 << TX_LENGTHS_IPG_SHIFT);
4462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4463                 val |= tr32(MAC_TX_LENGTHS) &
4464                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4465                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4466
4467         if (tp->link_config.active_speed == SPEED_1000 &&
4468             tp->link_config.active_duplex == DUPLEX_HALF)
4469                 tw32(MAC_TX_LENGTHS, val |
4470                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4471         else
4472                 tw32(MAC_TX_LENGTHS, val |
4473                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4474
4475         if (!tg3_flag(tp, 5705_PLUS)) {
4476                 if (netif_carrier_ok(tp->dev)) {
4477                         tw32(HOSTCC_STAT_COAL_TICKS,
4478                              tp->coal.stats_block_coalesce_usecs);
4479                 } else {
4480                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4481                 }
4482         }
4483
4484         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4485                 val = tr32(PCIE_PWR_MGMT_THRESH);
4486                 if (!netif_carrier_ok(tp->dev))
4487                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4488                               tp->pwrmgmt_thresh;
4489                 else
4490                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4491                 tw32(PCIE_PWR_MGMT_THRESH, val);
4492         }
4493
4494         return err;
4495 }
4496
4497 static inline int tg3_irq_sync(struct tg3 *tp)
4498 {
4499         return tp->irq_sync;
4500 }
4501
4502 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4503 {
4504         int i;
4505
4506         dst = (u32 *)((u8 *)dst + off);
4507         for (i = 0; i < len; i += sizeof(u32))
4508                 *dst++ = tr32(off + i);
4509 }
4510
4511 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4512 {
4513         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4514         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4515         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4516         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4517         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4518         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4519         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4520         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4521         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4522         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4524         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4525         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4526         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4527         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4528         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4529         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4530         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4531         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4532
4533         if (tg3_flag(tp, SUPPORT_MSIX))
4534                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4535
4536         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4537         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4538         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4539         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4540         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4542         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4543         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4544
4545         if (!tg3_flag(tp, 5705_PLUS)) {
4546                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4547                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4548                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4549         }
4550
4551         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4552         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4553         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4554         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4555         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4556
4557         if (tg3_flag(tp, NVRAM))
4558                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4559 }
4560
4561 static void tg3_dump_state(struct tg3 *tp)
4562 {
4563         int i;
4564         u32 *regs;
4565
4566         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4567         if (!regs) {
4568                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4569                 return;
4570         }
4571
4572         if (tg3_flag(tp, PCI_EXPRESS)) {
4573                 /* Read up to but not including private PCI registers */
4574                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4575                         regs[i / sizeof(u32)] = tr32(i);
4576         } else
4577                 tg3_dump_legacy_regs(tp, regs);
4578
4579         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4580                 if (!regs[i + 0] && !regs[i + 1] &&
4581                     !regs[i + 2] && !regs[i + 3])
4582                         continue;
4583
4584                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4585                            i * 4,
4586                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4587         }
4588
4589         kfree(regs);
4590
4591         for (i = 0; i < tp->irq_cnt; i++) {
4592                 struct tg3_napi *tnapi = &tp->napi[i];
4593
4594                 /* SW status block */
4595                 netdev_err(tp->dev,
4596                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4597                            i,
4598                            tnapi->hw_status->status,
4599                            tnapi->hw_status->status_tag,
4600                            tnapi->hw_status->rx_jumbo_consumer,
4601                            tnapi->hw_status->rx_consumer,
4602                            tnapi->hw_status->rx_mini_consumer,
4603                            tnapi->hw_status->idx[0].rx_producer,
4604                            tnapi->hw_status->idx[0].tx_consumer);
4605
4606                 netdev_err(tp->dev,
4607                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4608                            i,
4609                            tnapi->last_tag, tnapi->last_irq_tag,
4610                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4611                            tnapi->rx_rcb_ptr,
4612                            tnapi->prodring.rx_std_prod_idx,
4613                            tnapi->prodring.rx_std_cons_idx,
4614                            tnapi->prodring.rx_jmb_prod_idx,
4615                            tnapi->prodring.rx_jmb_cons_idx);
4616         }
4617 }
4618
4619 /* This is called whenever we suspect that the system chipset is re-
4620  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621  * is bogus tx completions. We try to recover by setting the
4622  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4623  * in the workqueue.
4624  */
4625 static void tg3_tx_recover(struct tg3 *tp)
4626 {
4627         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4628                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4629
4630         netdev_warn(tp->dev,
4631                     "The system may be re-ordering memory-mapped I/O "
4632                     "cycles to the network device, attempting to recover. "
4633                     "Please report the problem to the driver maintainer "
4634                     "and include system chipset information.\n");
4635
4636         spin_lock(&tp->lock);
4637         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4638         spin_unlock(&tp->lock);
4639 }
4640
4641 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4642 {
4643         /* Tell compiler to fetch tx indices from memory. */
4644         barrier();
4645         return tnapi->tx_pending -
4646                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4647 }
4648
4649 /* Tigon3 never reports partial packet sends.  So we do not
4650  * need special logic to handle SKBs that have not had all
4651  * of their frags sent yet, like SunGEM does.
4652  */
4653 static void tg3_tx(struct tg3_napi *tnapi)
4654 {
4655         struct tg3 *tp = tnapi->tp;
4656         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4657         u32 sw_idx = tnapi->tx_cons;
4658         struct netdev_queue *txq;
4659         int index = tnapi - tp->napi;
4660
4661         if (tg3_flag(tp, ENABLE_TSS))
4662                 index--;
4663
4664         txq = netdev_get_tx_queue(tp->dev, index);
4665
4666         while (sw_idx != hw_idx) {
4667                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4668                 struct sk_buff *skb = ri->skb;
4669                 int i, tx_bug = 0;
4670
4671                 if (unlikely(skb == NULL)) {
4672                         tg3_tx_recover(tp);
4673                         return;
4674                 }
4675
4676                 pci_unmap_single(tp->pdev,
4677                                  dma_unmap_addr(ri, mapping),
4678                                  skb_headlen(skb),
4679                                  PCI_DMA_TODEVICE);
4680
4681                 ri->skb = NULL;
4682
4683                 sw_idx = NEXT_TX(sw_idx);
4684
4685                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4686                         ri = &tnapi->tx_buffers[sw_idx];
4687                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4688                                 tx_bug = 1;
4689
4690                         pci_unmap_page(tp->pdev,
4691                                        dma_unmap_addr(ri, mapping),
4692                                        skb_shinfo(skb)->frags[i].size,
4693                                        PCI_DMA_TODEVICE);
4694                         sw_idx = NEXT_TX(sw_idx);
4695                 }
4696
4697                 dev_kfree_skb(skb);
4698
4699                 if (unlikely(tx_bug)) {
4700                         tg3_tx_recover(tp);
4701                         return;
4702                 }
4703         }
4704
4705         tnapi->tx_cons = sw_idx;
4706
4707         /* Need to make the tx_cons update visible to tg3_start_xmit()
4708          * before checking for netif_queue_stopped().  Without the
4709          * memory barrier, there is a small possibility that tg3_start_xmit()
4710          * will miss it and cause the queue to be stopped forever.
4711          */
4712         smp_mb();
4713
4714         if (unlikely(netif_tx_queue_stopped(txq) &&
4715                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4716                 __netif_tx_lock(txq, smp_processor_id());
4717                 if (netif_tx_queue_stopped(txq) &&
4718                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4719                         netif_tx_wake_queue(txq);
4720                 __netif_tx_unlock(txq);
4721         }
4722 }
4723
4724 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4725 {
4726         if (!ri->skb)
4727                 return;
4728
4729         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4730                          map_sz, PCI_DMA_FROMDEVICE);
4731         dev_kfree_skb_any(ri->skb);
4732         ri->skb = NULL;
4733 }
4734
4735 /* Returns size of skb allocated or < 0 on error.
4736  *
4737  * We only need to fill in the address because the other members
4738  * of the RX descriptor are invariant, see tg3_init_rings.
4739  *
4740  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4741  * posting buffers we only dirty the first cache line of the RX
4742  * descriptor (containing the address).  Whereas for the RX status
4743  * buffers the cpu only reads the last cacheline of the RX descriptor
4744  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4745  */
4746 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4747                             u32 opaque_key, u32 dest_idx_unmasked)
4748 {
4749         struct tg3_rx_buffer_desc *desc;
4750         struct ring_info *map;
4751         struct sk_buff *skb;
4752         dma_addr_t mapping;
4753         int skb_size, dest_idx;
4754
4755         switch (opaque_key) {
4756         case RXD_OPAQUE_RING_STD:
4757                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4758                 desc = &tpr->rx_std[dest_idx];
4759                 map = &tpr->rx_std_buffers[dest_idx];
4760                 skb_size = tp->rx_pkt_map_sz;
4761                 break;
4762
4763         case RXD_OPAQUE_RING_JUMBO:
4764                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4765                 desc = &tpr->rx_jmb[dest_idx].std;
4766                 map = &tpr->rx_jmb_buffers[dest_idx];
4767                 skb_size = TG3_RX_JMB_MAP_SZ;
4768                 break;
4769
4770         default:
4771                 return -EINVAL;
4772         }
4773
4774         /* Do not overwrite any of the map or rp information
4775          * until we are sure we can commit to a new buffer.
4776          *
4777          * Callers depend upon this behavior and assume that
4778          * we leave everything unchanged if we fail.
4779          */
4780         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4781         if (skb == NULL)
4782                 return -ENOMEM;
4783
4784         skb_reserve(skb, tp->rx_offset);
4785
4786         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4787                                  PCI_DMA_FROMDEVICE);
4788         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4789                 dev_kfree_skb(skb);
4790                 return -EIO;
4791         }
4792
4793         map->skb = skb;
4794         dma_unmap_addr_set(map, mapping, mapping);
4795
4796         desc->addr_hi = ((u64)mapping >> 32);
4797         desc->addr_lo = ((u64)mapping & 0xffffffff);
4798
4799         return skb_size;
4800 }
4801
4802 /* We only need to move over in the address because the other
4803  * members of the RX descriptor are invariant.  See notes above
4804  * tg3_alloc_rx_skb for full details.
4805  */
4806 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4807                            struct tg3_rx_prodring_set *dpr,
4808                            u32 opaque_key, int src_idx,
4809                            u32 dest_idx_unmasked)
4810 {
4811         struct tg3 *tp = tnapi->tp;
4812         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4813         struct ring_info *src_map, *dest_map;
4814         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4815         int dest_idx;
4816
4817         switch (opaque_key) {
4818         case RXD_OPAQUE_RING_STD:
4819                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4820                 dest_desc = &dpr->rx_std[dest_idx];
4821                 dest_map = &dpr->rx_std_buffers[dest_idx];
4822                 src_desc = &spr->rx_std[src_idx];
4823                 src_map = &spr->rx_std_buffers[src_idx];
4824                 break;
4825
4826         case RXD_OPAQUE_RING_JUMBO:
4827                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4828                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4829                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4830                 src_desc = &spr->rx_jmb[src_idx].std;
4831                 src_map = &spr->rx_jmb_buffers[src_idx];
4832                 break;
4833
4834         default:
4835                 return;
4836         }
4837
4838         dest_map->skb = src_map->skb;
4839         dma_unmap_addr_set(dest_map, mapping,
4840                            dma_unmap_addr(src_map, mapping));
4841         dest_desc->addr_hi = src_desc->addr_hi;
4842         dest_desc->addr_lo = src_desc->addr_lo;
4843
4844         /* Ensure that the update to the skb happens after the physical
4845          * addresses have been transferred to the new BD location.
4846          */
4847         smp_wmb();
4848
4849         src_map->skb = NULL;
4850 }
4851
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853  * buffers to the chip, and one special ring the chip uses to report
4854  * status back to the host.
4855  *
4856  * The special ring reports the status of received packets to the
4857  * host.  The chip does not write into the original descriptor the
4858  * RX buffer was obtained from.  The chip simply takes the original
4859  * descriptor as provided by the host, updates the status and length
4860  * field, then writes this into the next status ring entry.
4861  *
4862  * Each ring the host uses to post buffers to the chip is described
4863  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4864  * it is first placed into the on-chip ram.  When the packet's length
4865  * is known, it walks down the TG3_BDINFO entries to select the ring.
4866  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867  * which is within the range of the new packet's length is chosen.
4868  *
4869  * The "separate ring for rx status" scheme may sound queer, but it makes
4870  * sense from a cache coherency perspective.  If only the host writes
4871  * to the buffer post rings, and only the chip writes to the rx status
4872  * rings, then cache lines never move beyond shared-modified state.
4873  * If both the host and chip were to write into the same ring, cache line
4874  * eviction could occur since both entities want it in an exclusive state.
4875  */
4876 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4877 {
4878         struct tg3 *tp = tnapi->tp;
4879         u32 work_mask, rx_std_posted = 0;
4880         u32 std_prod_idx, jmb_prod_idx;
4881         u32 sw_idx = tnapi->rx_rcb_ptr;
4882         u16 hw_idx;
4883         int received;
4884         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4885
4886         hw_idx = *(tnapi->rx_rcb_prod_idx);
4887         /*
4888          * We need to order the read of hw_idx and the read of
4889          * the opaque cookie.
4890          */
4891         rmb();
4892         work_mask = 0;
4893         received = 0;
4894         std_prod_idx = tpr->rx_std_prod_idx;
4895         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4896         while (sw_idx != hw_idx && budget > 0) {
4897                 struct ring_info *ri;
4898                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4899                 unsigned int len;
4900                 struct sk_buff *skb;
4901                 dma_addr_t dma_addr;
4902                 u32 opaque_key, desc_idx, *post_ptr;
4903
4904                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4905                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4906                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4907                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4908                         dma_addr = dma_unmap_addr(ri, mapping);
4909                         skb = ri->skb;
4910                         post_ptr = &std_prod_idx;
4911                         rx_std_posted++;
4912                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4913                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4914                         dma_addr = dma_unmap_addr(ri, mapping);
4915                         skb = ri->skb;
4916                         post_ptr = &jmb_prod_idx;
4917                 } else
4918                         goto next_pkt_nopost;
4919
4920                 work_mask |= opaque_key;
4921
4922                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4923                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4924                 drop_it:
4925                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4926                                        desc_idx, *post_ptr);
4927                 drop_it_no_recycle:
4928                         /* Other statistics kept track of by card. */
4929                         tp->rx_dropped++;
4930                         goto next_pkt;
4931                 }
4932
4933                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4934                       ETH_FCS_LEN;
4935
4936                 if (len > TG3_RX_COPY_THRESH(tp)) {
4937                         int skb_size;
4938
4939                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4940                                                     *post_ptr);
4941                         if (skb_size < 0)
4942                                 goto drop_it;
4943
4944                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4945                                          PCI_DMA_FROMDEVICE);
4946
4947                         /* Ensure that the update to the skb happens
4948                          * after the usage of the old DMA mapping.
4949                          */
4950                         smp_wmb();
4951
4952                         ri->skb = NULL;
4953
4954                         skb_put(skb, len);
4955                 } else {
4956                         struct sk_buff *copy_skb;
4957
4958                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4959                                        desc_idx, *post_ptr);
4960
4961                         copy_skb = netdev_alloc_skb(tp->dev, len +
4962                                                     TG3_RAW_IP_ALIGN);
4963                         if (copy_skb == NULL)
4964                                 goto drop_it_no_recycle;
4965
4966                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4967                         skb_put(copy_skb, len);
4968                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4970                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4971
4972                         /* We'll reuse the original ring buffer. */
4973                         skb = copy_skb;
4974                 }
4975
4976                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4977                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4978                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4979                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4980                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4981                 else
4982                         skb_checksum_none_assert(skb);
4983
4984                 skb->protocol = eth_type_trans(skb, tp->dev);
4985
4986                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4987                     skb->protocol != htons(ETH_P_8021Q)) {
4988                         dev_kfree_skb(skb);
4989                         goto drop_it_no_recycle;
4990                 }
4991
4992                 if (desc->type_flags & RXD_FLAG_VLAN &&
4993                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4994                         __vlan_hwaccel_put_tag(skb,
4995                                                desc->err_vlan & RXD_VLAN_MASK);
4996
4997                 napi_gro_receive(&tnapi->napi, skb);
4998
4999                 received++;
5000                 budget--;
5001
5002 next_pkt:
5003                 (*post_ptr)++;
5004
5005                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5006                         tpr->rx_std_prod_idx = std_prod_idx &
5007                                                tp->rx_std_ring_mask;
5008                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5009                                      tpr->rx_std_prod_idx);
5010                         work_mask &= ~RXD_OPAQUE_RING_STD;
5011                         rx_std_posted = 0;
5012                 }
5013 next_pkt_nopost:
5014                 sw_idx++;
5015                 sw_idx &= tp->rx_ret_ring_mask;
5016
5017                 /* Refresh hw_idx to see if there is new work */
5018                 if (sw_idx == hw_idx) {
5019                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5020                         rmb();
5021                 }
5022         }
5023
5024         /* ACK the status ring. */
5025         tnapi->rx_rcb_ptr = sw_idx;
5026         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5027
5028         /* Refill RX ring(s). */
5029         if (!tg3_flag(tp, ENABLE_RSS)) {
5030                 if (work_mask & RXD_OPAQUE_RING_STD) {
5031                         tpr->rx_std_prod_idx = std_prod_idx &
5032                                                tp->rx_std_ring_mask;
5033                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5034                                      tpr->rx_std_prod_idx);
5035                 }
5036                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5037                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5038                                                tp->rx_jmb_ring_mask;
5039                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5040                                      tpr->rx_jmb_prod_idx);
5041                 }
5042                 mmiowb();
5043         } else if (work_mask) {
5044                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045                  * updated before the producer indices can be updated.
5046                  */
5047                 smp_wmb();
5048
5049                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5050                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5051
5052                 if (tnapi != &tp->napi[1])
5053                         napi_schedule(&tp->napi[1].napi);
5054         }
5055
5056         return received;
5057 }
5058
5059 static void tg3_poll_link(struct tg3 *tp)
5060 {
5061         /* handle link change and other phy events */
5062         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5063                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5064
5065                 if (sblk->status & SD_STATUS_LINK_CHG) {
5066                         sblk->status = SD_STATUS_UPDATED |
5067                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5068                         spin_lock(&tp->lock);
5069                         if (tg3_flag(tp, USE_PHYLIB)) {
5070                                 tw32_f(MAC_STATUS,
5071                                      (MAC_STATUS_SYNC_CHANGED |
5072                                       MAC_STATUS_CFG_CHANGED |
5073                                       MAC_STATUS_MI_COMPLETION |
5074                                       MAC_STATUS_LNKSTATE_CHANGED));
5075                                 udelay(40);
5076                         } else
5077                                 tg3_setup_phy(tp, 0);
5078                         spin_unlock(&tp->lock);
5079                 }
5080         }
5081 }
5082
5083 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5084                                 struct tg3_rx_prodring_set *dpr,
5085                                 struct tg3_rx_prodring_set *spr)
5086 {
5087         u32 si, di, cpycnt, src_prod_idx;
5088         int i, err = 0;
5089
5090         while (1) {
5091                 src_prod_idx = spr->rx_std_prod_idx;
5092
5093                 /* Make sure updates to the rx_std_buffers[] entries and the
5094                  * standard producer index are seen in the correct order.
5095                  */
5096                 smp_rmb();
5097
5098                 if (spr->rx_std_cons_idx == src_prod_idx)
5099                         break;
5100
5101                 if (spr->rx_std_cons_idx < src_prod_idx)
5102                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5103                 else
5104                         cpycnt = tp->rx_std_ring_mask + 1 -
5105                                  spr->rx_std_cons_idx;
5106
5107                 cpycnt = min(cpycnt,
5108                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5109
5110                 si = spr->rx_std_cons_idx;
5111                 di = dpr->rx_std_prod_idx;
5112
5113                 for (i = di; i < di + cpycnt; i++) {
5114                         if (dpr->rx_std_buffers[i].skb) {
5115                                 cpycnt = i - di;
5116                                 err = -ENOSPC;
5117                                 break;
5118                         }
5119                 }
5120
5121                 if (!cpycnt)
5122                         break;
5123
5124                 /* Ensure that updates to the rx_std_buffers ring and the
5125                  * shadowed hardware producer ring from tg3_recycle_skb() are
5126                  * ordered correctly WRT the skb check above.
5127                  */
5128                 smp_rmb();
5129
5130                 memcpy(&dpr->rx_std_buffers[di],
5131                        &spr->rx_std_buffers[si],
5132                        cpycnt * sizeof(struct ring_info));
5133
5134                 for (i = 0; i < cpycnt; i++, di++, si++) {
5135                         struct tg3_rx_buffer_desc *sbd, *dbd;
5136                         sbd = &spr->rx_std[si];
5137                         dbd = &dpr->rx_std[di];
5138                         dbd->addr_hi = sbd->addr_hi;
5139                         dbd->addr_lo = sbd->addr_lo;
5140                 }
5141
5142                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5143                                        tp->rx_std_ring_mask;
5144                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5145                                        tp->rx_std_ring_mask;
5146         }
5147
5148         while (1) {
5149                 src_prod_idx = spr->rx_jmb_prod_idx;
5150
5151                 /* Make sure updates to the rx_jmb_buffers[] entries and
5152                  * the jumbo producer index are seen in the correct order.
5153                  */
5154                 smp_rmb();
5155
5156                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5157                         break;
5158
5159                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5160                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5161                 else
5162                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5163                                  spr->rx_jmb_cons_idx;
5164
5165                 cpycnt = min(cpycnt,
5166                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5167
5168                 si = spr->rx_jmb_cons_idx;
5169                 di = dpr->rx_jmb_prod_idx;
5170
5171                 for (i = di; i < di + cpycnt; i++) {
5172                         if (dpr->rx_jmb_buffers[i].skb) {
5173                                 cpycnt = i - di;
5174                                 err = -ENOSPC;
5175                                 break;
5176                         }
5177                 }
5178
5179                 if (!cpycnt)
5180                         break;
5181
5182                 /* Ensure that updates to the rx_jmb_buffers ring and the
5183                  * shadowed hardware producer ring from tg3_recycle_skb() are
5184                  * ordered correctly WRT the skb check above.
5185                  */
5186                 smp_rmb();
5187
5188                 memcpy(&dpr->rx_jmb_buffers[di],
5189                        &spr->rx_jmb_buffers[si],
5190                        cpycnt * sizeof(struct ring_info));
5191
5192                 for (i = 0; i < cpycnt; i++, di++, si++) {
5193                         struct tg3_rx_buffer_desc *sbd, *dbd;
5194                         sbd = &spr->rx_jmb[si].std;
5195                         dbd = &dpr->rx_jmb[di].std;
5196                         dbd->addr_hi = sbd->addr_hi;
5197                         dbd->addr_lo = sbd->addr_lo;
5198                 }
5199
5200                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5201                                        tp->rx_jmb_ring_mask;
5202                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5203                                        tp->rx_jmb_ring_mask;
5204         }
5205
5206         return err;
5207 }
5208
5209 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5210 {
5211         struct tg3 *tp = tnapi->tp;
5212
5213         /* run TX completion thread */
5214         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5215                 tg3_tx(tnapi);
5216                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5217                         return work_done;
5218         }
5219
5220         /* run RX thread, within the bounds set by NAPI.
5221          * All RX "locking" is done by ensuring outside
5222          * code synchronizes with tg3->napi.poll()
5223          */
5224         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5225                 work_done += tg3_rx(tnapi, budget - work_done);
5226
5227         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5228                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5229                 int i, err = 0;
5230                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5231                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5232
5233                 for (i = 1; i < tp->irq_cnt; i++)
5234                         err |= tg3_rx_prodring_xfer(tp, dpr,
5235                                                     &tp->napi[i].prodring);
5236
5237                 wmb();
5238
5239                 if (std_prod_idx != dpr->rx_std_prod_idx)
5240                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241                                      dpr->rx_std_prod_idx);
5242
5243                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5244                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5245                                      dpr->rx_jmb_prod_idx);
5246
5247                 mmiowb();
5248
5249                 if (err)
5250                         tw32_f(HOSTCC_MODE, tp->coal_now);
5251         }
5252
5253         return work_done;
5254 }
5255
5256 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5257 {
5258         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5259         struct tg3 *tp = tnapi->tp;
5260         int work_done = 0;
5261         struct tg3_hw_status *sblk = tnapi->hw_status;
5262
5263         while (1) {
5264                 work_done = tg3_poll_work(tnapi, work_done, budget);
5265
5266                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5267                         goto tx_recovery;
5268
5269                 if (unlikely(work_done >= budget))
5270                         break;
5271
5272                 /* tp->last_tag is used in tg3_int_reenable() below
5273                  * to tell the hw how much work has been processed,
5274                  * so we must read it before checking for more work.
5275                  */
5276                 tnapi->last_tag = sblk->status_tag;
5277                 tnapi->last_irq_tag = tnapi->last_tag;
5278                 rmb();
5279
5280                 /* check for RX/TX work to do */
5281                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5282                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5283                         napi_complete(napi);
5284                         /* Reenable interrupts. */
5285                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5286                         mmiowb();
5287                         break;
5288                 }
5289         }
5290
5291         return work_done;
5292
5293 tx_recovery:
5294         /* work_done is guaranteed to be less than budget. */
5295         napi_complete(napi);
5296         schedule_work(&tp->reset_task);
5297         return work_done;
5298 }
5299
5300 static void tg3_process_error(struct tg3 *tp)
5301 {
5302         u32 val;
5303         bool real_error = false;
5304
5305         if (tg3_flag(tp, ERROR_PROCESSED))
5306                 return;
5307
5308         /* Check Flow Attention register */
5309         val = tr32(HOSTCC_FLOW_ATTN);
5310         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5311                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5312                 real_error = true;
5313         }
5314
5315         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5316                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5317                 real_error = true;
5318         }
5319
5320         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5321                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5322                 real_error = true;
5323         }
5324
5325         if (!real_error)
5326                 return;
5327
5328         tg3_dump_state(tp);
5329
5330         tg3_flag_set(tp, ERROR_PROCESSED);
5331         schedule_work(&tp->reset_task);
5332 }
5333
5334 static int tg3_poll(struct napi_struct *napi, int budget)
5335 {
5336         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5337         struct tg3 *tp = tnapi->tp;
5338         int work_done = 0;
5339         struct tg3_hw_status *sblk = tnapi->hw_status;
5340
5341         while (1) {
5342                 if (sblk->status & SD_STATUS_ERROR)
5343                         tg3_process_error(tp);
5344
5345                 tg3_poll_link(tp);
5346
5347                 work_done = tg3_poll_work(tnapi, work_done, budget);
5348
5349                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5350                         goto tx_recovery;
5351
5352                 if (unlikely(work_done >= budget))
5353                         break;
5354
5355                 if (tg3_flag(tp, TAGGED_STATUS)) {
5356                         /* tp->last_tag is used in tg3_int_reenable() below
5357                          * to tell the hw how much work has been processed,
5358                          * so we must read it before checking for more work.
5359                          */
5360                         tnapi->last_tag = sblk->status_tag;
5361                         tnapi->last_irq_tag = tnapi->last_tag;
5362                         rmb();
5363                 } else
5364                         sblk->status &= ~SD_STATUS_UPDATED;
5365
5366                 if (likely(!tg3_has_work(tnapi))) {
5367                         napi_complete(napi);
5368                         tg3_int_reenable(tnapi);
5369                         break;
5370                 }
5371         }
5372
5373         return work_done;
5374
5375 tx_recovery:
5376         /* work_done is guaranteed to be less than budget. */
5377         napi_complete(napi);
5378         schedule_work(&tp->reset_task);
5379         return work_done;
5380 }
5381
5382 static void tg3_napi_disable(struct tg3 *tp)
5383 {
5384         int i;
5385
5386         for (i = tp->irq_cnt - 1; i >= 0; i--)
5387                 napi_disable(&tp->napi[i].napi);
5388 }
5389
5390 static void tg3_napi_enable(struct tg3 *tp)
5391 {
5392         int i;
5393
5394         for (i = 0; i < tp->irq_cnt; i++)
5395                 napi_enable(&tp->napi[i].napi);
5396 }
5397
5398 static void tg3_napi_init(struct tg3 *tp)
5399 {
5400         int i;
5401
5402         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5403         for (i = 1; i < tp->irq_cnt; i++)
5404                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5405 }
5406
5407 static void tg3_napi_fini(struct tg3 *tp)
5408 {
5409         int i;
5410
5411         for (i = 0; i < tp->irq_cnt; i++)
5412                 netif_napi_del(&tp->napi[i].napi);
5413 }
5414
5415 static inline void tg3_netif_stop(struct tg3 *tp)
5416 {
5417         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5418         tg3_napi_disable(tp);
5419         netif_tx_disable(tp->dev);
5420 }
5421
5422 static inline void tg3_netif_start(struct tg3 *tp)
5423 {
5424         /* NOTE: unconditional netif_tx_wake_all_queues is only
5425          * appropriate so long as all callers are assured to
5426          * have free tx slots (such as after tg3_init_hw)
5427          */
5428         netif_tx_wake_all_queues(tp->dev);
5429
5430         tg3_napi_enable(tp);
5431         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5432         tg3_enable_ints(tp);
5433 }
5434
5435 static void tg3_irq_quiesce(struct tg3 *tp)
5436 {
5437         int i;
5438
5439         BUG_ON(tp->irq_sync);
5440
5441         tp->irq_sync = 1;
5442         smp_mb();
5443
5444         for (i = 0; i < tp->irq_cnt; i++)
5445                 synchronize_irq(tp->napi[i].irq_vec);
5446 }
5447
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450  * with as well.  Most of the time, this is not necessary except when
5451  * shutting down the device.
5452  */
5453 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5454 {
5455         spin_lock_bh(&tp->lock);
5456         if (irq_sync)
5457                 tg3_irq_quiesce(tp);
5458 }
5459
5460 static inline void tg3_full_unlock(struct tg3 *tp)
5461 {
5462         spin_unlock_bh(&tp->lock);
5463 }
5464
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466  * after sending MSI so driver doesn't have to do it.
5467  */
5468 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5469 {
5470         struct tg3_napi *tnapi = dev_id;
5471         struct tg3 *tp = tnapi->tp;
5472
5473         prefetch(tnapi->hw_status);
5474         if (tnapi->rx_rcb)
5475                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5476
5477         if (likely(!tg3_irq_sync(tp)))
5478                 napi_schedule(&tnapi->napi);
5479
5480         return IRQ_HANDLED;
5481 }
5482
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484  * flush status block and interrupt mailbox. PCI ordering rules
5485  * guarantee that MSI will arrive after the status block.
5486  */
5487 static irqreturn_t tg3_msi(int irq, void *dev_id)
5488 {
5489         struct tg3_napi *tnapi = dev_id;
5490         struct tg3 *tp = tnapi->tp;
5491
5492         prefetch(tnapi->hw_status);
5493         if (tnapi->rx_rcb)
5494                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5495         /*
5496          * Writing any value to intr-mbox-0 clears PCI INTA# and
5497          * chip-internal interrupt pending events.
5498          * Writing non-zero to intr-mbox-0 additional tells the
5499          * NIC to stop sending us irqs, engaging "in-intr-handler"
5500          * event coalescing.
5501          */
5502         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5503         if (likely(!tg3_irq_sync(tp)))
5504                 napi_schedule(&tnapi->napi);
5505
5506         return IRQ_RETVAL(1);
5507 }
5508
5509 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5510 {
5511         struct tg3_napi *tnapi = dev_id;
5512         struct tg3 *tp = tnapi->tp;
5513         struct tg3_hw_status *sblk = tnapi->hw_status;
5514         unsigned int handled = 1;
5515
5516         /* In INTx mode, it is possible for the interrupt to arrive at
5517          * the CPU before the status block posted prior to the interrupt.
5518          * Reading the PCI State register will confirm whether the
5519          * interrupt is ours and will flush the status block.
5520          */
5521         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5522                 if (tg3_flag(tp, CHIP_RESETTING) ||
5523                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5524                         handled = 0;
5525                         goto out;
5526                 }
5527         }
5528
5529         /*
5530          * Writing any value to intr-mbox-0 clears PCI INTA# and
5531          * chip-internal interrupt pending events.
5532          * Writing non-zero to intr-mbox-0 additional tells the
5533          * NIC to stop sending us irqs, engaging "in-intr-handler"
5534          * event coalescing.
5535          *
5536          * Flush the mailbox to de-assert the IRQ immediately to prevent
5537          * spurious interrupts.  The flush impacts performance but
5538          * excessive spurious interrupts can be worse in some cases.
5539          */
5540         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5541         if (tg3_irq_sync(tp))
5542                 goto out;
5543         sblk->status &= ~SD_STATUS_UPDATED;
5544         if (likely(tg3_has_work(tnapi))) {
5545                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5546                 napi_schedule(&tnapi->napi);
5547         } else {
5548                 /* No work, shared interrupt perhaps?  re-enable
5549                  * interrupts, and flush that PCI write
5550                  */
5551                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5552                                0x00000000);
5553         }
5554 out:
5555         return IRQ_RETVAL(handled);
5556 }
5557
5558 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5559 {
5560         struct tg3_napi *tnapi = dev_id;
5561         struct tg3 *tp = tnapi->tp;
5562         struct tg3_hw_status *sblk = tnapi->hw_status;
5563         unsigned int handled = 1;
5564
5565         /* In INTx mode, it is possible for the interrupt to arrive at
5566          * the CPU before the status block posted prior to the interrupt.
5567          * Reading the PCI State register will confirm whether the
5568          * interrupt is ours and will flush the status block.
5569          */
5570         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5571                 if (tg3_flag(tp, CHIP_RESETTING) ||
5572                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5573                         handled = 0;
5574                         goto out;
5575                 }
5576         }
5577
5578         /*
5579          * writing any value to intr-mbox-0 clears PCI INTA# and
5580          * chip-internal interrupt pending events.
5581          * writing non-zero to intr-mbox-0 additional tells the
5582          * NIC to stop sending us irqs, engaging "in-intr-handler"
5583          * event coalescing.
5584          *
5585          * Flush the mailbox to de-assert the IRQ immediately to prevent
5586          * spurious interrupts.  The flush impacts performance but
5587          * excessive spurious interrupts can be worse in some cases.
5588          */
5589         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5590
5591         /*
5592          * In a shared interrupt configuration, sometimes other devices'
5593          * interrupts will scream.  We record the current status tag here
5594          * so that the above check can report that the screaming interrupts
5595          * are unhandled.  Eventually they will be silenced.
5596          */
5597         tnapi->last_irq_tag = sblk->status_tag;
5598
5599         if (tg3_irq_sync(tp))
5600                 goto out;
5601
5602         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5603
5604         napi_schedule(&tnapi->napi);
5605
5606 out:
5607         return IRQ_RETVAL(handled);
5608 }
5609
5610 /* ISR for interrupt test */
5611 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5612 {
5613         struct tg3_napi *tnapi = dev_id;
5614         struct tg3 *tp = tnapi->tp;
5615         struct tg3_hw_status *sblk = tnapi->hw_status;
5616
5617         if ((sblk->status & SD_STATUS_UPDATED) ||
5618             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5619                 tg3_disable_ints(tp);
5620                 return IRQ_RETVAL(1);
5621         }
5622         return IRQ_RETVAL(0);
5623 }
5624
5625 static int tg3_init_hw(struct tg3 *, int);
5626 static int tg3_halt(struct tg3 *, int, int);
5627
5628 /* Restart hardware after configuration changes, self-test, etc.
5629  * Invoked with tp->lock held.
5630  */
5631 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5632         __releases(tp->lock)
5633         __acquires(tp->lock)
5634 {
5635         int err;
5636
5637         err = tg3_init_hw(tp, reset_phy);
5638         if (err) {
5639                 netdev_err(tp->dev,
5640                            "Failed to re-initialize device, aborting\n");
5641                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5642                 tg3_full_unlock(tp);
5643                 del_timer_sync(&tp->timer);
5644                 tp->irq_sync = 0;
5645                 tg3_napi_enable(tp);
5646                 dev_close(tp->dev);
5647                 tg3_full_lock(tp, 0);
5648         }
5649         return err;
5650 }
5651
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device *dev)
5654 {
5655         int i;
5656         struct tg3 *tp = netdev_priv(dev);
5657
5658         for (i = 0; i < tp->irq_cnt; i++)
5659                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5660 }
5661 #endif
5662
5663 static void tg3_reset_task(struct work_struct *work)
5664 {
5665         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5666         int err;
5667         unsigned int restart_timer;
5668
5669         tg3_full_lock(tp, 0);
5670
5671         if (!netif_running(tp->dev)) {
5672                 tg3_full_unlock(tp);
5673                 return;
5674         }
5675
5676         tg3_full_unlock(tp);
5677
5678         tg3_phy_stop(tp);
5679
5680         tg3_netif_stop(tp);
5681
5682         tg3_full_lock(tp, 1);
5683
5684         restart_timer = tg3_flag(tp, RESTART_TIMER);
5685         tg3_flag_clear(tp, RESTART_TIMER);
5686
5687         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5688                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5689                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5690                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5691                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5692         }
5693
5694         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5695         err = tg3_init_hw(tp, 1);
5696         if (err)
5697                 goto out;
5698
5699         tg3_netif_start(tp);
5700
5701         if (restart_timer)
5702                 mod_timer(&tp->timer, jiffies + 1);
5703
5704 out:
5705         tg3_full_unlock(tp);
5706
5707         if (!err)
5708                 tg3_phy_start(tp);
5709 }
5710
5711 static void tg3_tx_timeout(struct net_device *dev)
5712 {
5713         struct tg3 *tp = netdev_priv(dev);
5714
5715         if (netif_msg_tx_err(tp)) {
5716                 netdev_err(dev, "transmit timed out, resetting\n");
5717                 tg3_dump_state(tp);
5718         }
5719
5720         schedule_work(&tp->reset_task);
5721 }
5722
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5725 {
5726         u32 base = (u32) mapping & 0xffffffff;
5727
5728         return (base > 0xffffdcc0) && (base + len + 8 < base);
5729 }
5730
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5733                                           int len)
5734 {
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736         if (tg3_flag(tp, 40BIT_DMA_BUG))
5737                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5738         return 0;
5739 #else
5740         return 0;
5741 #endif
5742 }
5743
5744 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5745                         dma_addr_t mapping, int len, u32 flags,
5746                         u32 mss_and_is_end)
5747 {
5748         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5749         int is_end = (mss_and_is_end & 0x1);
5750         u32 mss = (mss_and_is_end >> 1);
5751         u32 vlan_tag = 0;
5752
5753         if (is_end)
5754                 flags |= TXD_FLAG_END;
5755         if (flags & TXD_FLAG_VLAN) {
5756                 vlan_tag = flags >> 16;
5757                 flags &= 0xffff;
5758         }
5759         vlan_tag |= (mss << TXD_MSS_SHIFT);
5760
5761         txd->addr_hi = ((u64) mapping >> 32);
5762         txd->addr_lo = ((u64) mapping & 0xffffffff);
5763         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5764         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5765 }
5766
5767 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5768                                 struct sk_buff *skb, int last)
5769 {
5770         int i;
5771         u32 entry = tnapi->tx_prod;
5772         struct ring_info *txb = &tnapi->tx_buffers[entry];
5773
5774         pci_unmap_single(tnapi->tp->pdev,
5775                          dma_unmap_addr(txb, mapping),
5776                          skb_headlen(skb),
5777                          PCI_DMA_TODEVICE);
5778         for (i = 0; i < last; i++) {
5779                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5780
5781                 entry = NEXT_TX(entry);
5782                 txb = &tnapi->tx_buffers[entry];
5783
5784                 pci_unmap_page(tnapi->tp->pdev,
5785                                dma_unmap_addr(txb, mapping),
5786                                frag->size, PCI_DMA_TODEVICE);
5787         }
5788 }
5789
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5792                                        struct sk_buff *skb,
5793                                        u32 base_flags, u32 mss)
5794 {
5795         struct tg3 *tp = tnapi->tp;
5796         struct sk_buff *new_skb;
5797         dma_addr_t new_addr = 0;
5798         u32 entry = tnapi->tx_prod;
5799         int ret = 0;
5800
5801         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5802                 new_skb = skb_copy(skb, GFP_ATOMIC);
5803         else {
5804                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5805
5806                 new_skb = skb_copy_expand(skb,
5807                                           skb_headroom(skb) + more_headroom,
5808                                           skb_tailroom(skb), GFP_ATOMIC);
5809         }
5810
5811         if (!new_skb) {
5812                 ret = -1;
5813         } else {
5814                 /* New SKB is guaranteed to be linear. */
5815                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5816                                           PCI_DMA_TODEVICE);
5817                 /* Make sure the mapping succeeded */
5818                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5819                         ret = -1;
5820                         dev_kfree_skb(new_skb);
5821
5822                 /* Make sure new skb does not cross any 4G boundaries.
5823                  * Drop the packet if it does.
5824                  */
5825                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5826                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5827                                          PCI_DMA_TODEVICE);
5828                         ret = -1;
5829                         dev_kfree_skb(new_skb);
5830                 } else {
5831                         tnapi->tx_buffers[entry].skb = new_skb;
5832                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5833                                            mapping, new_addr);
5834
5835                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5836                                     base_flags, 1 | (mss << 1));
5837                 }
5838         }
5839
5840         dev_kfree_skb(skb);
5841
5842         return ret;
5843 }
5844
5845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5846
5847 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5848  * TSO header is greater than 80 bytes.
5849  */
5850 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5851 {
5852         struct sk_buff *segs, *nskb;
5853         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5854
5855         /* Estimate the number of fragments in the worst case */
5856         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5857                 netif_stop_queue(tp->dev);
5858
5859                 /* netif_tx_stop_queue() must be done before checking
5860                  * checking tx index in tg3_tx_avail() below, because in
5861                  * tg3_tx(), we update tx index before checking for
5862                  * netif_tx_queue_stopped().
5863                  */
5864                 smp_mb();
5865                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5866                         return NETDEV_TX_BUSY;
5867
5868                 netif_wake_queue(tp->dev);
5869         }
5870
5871         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5872         if (IS_ERR(segs))
5873                 goto tg3_tso_bug_end;
5874
5875         do {
5876                 nskb = segs;
5877                 segs = segs->next;
5878                 nskb->next = NULL;
5879                 tg3_start_xmit(nskb, tp->dev);
5880         } while (segs);
5881
5882 tg3_tso_bug_end:
5883         dev_kfree_skb(skb);
5884
5885         return NETDEV_TX_OK;
5886 }
5887
5888 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5889  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5890  */
5891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5892 {
5893         struct tg3 *tp = netdev_priv(dev);
5894         u32 len, entry, base_flags, mss;
5895         int i = -1, would_hit_hwbug;
5896         dma_addr_t mapping;
5897         struct tg3_napi *tnapi;
5898         struct netdev_queue *txq;
5899         unsigned int last;
5900
5901         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5902         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5903         if (tg3_flag(tp, ENABLE_TSS))
5904                 tnapi++;
5905
5906         /* We are running in BH disabled context with netif_tx_lock
5907          * and TX reclaim runs via tp->napi.poll inside of a software
5908          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5909          * no IRQ context deadlocks to worry about either.  Rejoice!
5910          */
5911         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5912                 if (!netif_tx_queue_stopped(txq)) {
5913                         netif_tx_stop_queue(txq);
5914
5915                         /* This is a hard error, log it. */
5916                         netdev_err(dev,
5917                                    "BUG! Tx Ring full when queue awake!\n");
5918                 }
5919                 return NETDEV_TX_BUSY;
5920         }
5921
5922         entry = tnapi->tx_prod;
5923         base_flags = 0;
5924         if (skb->ip_summed == CHECKSUM_PARTIAL)
5925                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5926
5927         mss = skb_shinfo(skb)->gso_size;
5928         if (mss) {
5929                 struct iphdr *iph;
5930                 u32 tcp_opt_len, hdr_len;
5931
5932                 if (skb_header_cloned(skb) &&
5933                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5934                         dev_kfree_skb(skb);
5935                         goto out_unlock;
5936                 }
5937
5938                 iph = ip_hdr(skb);
5939                 tcp_opt_len = tcp_optlen(skb);
5940
5941                 if (skb_is_gso_v6(skb)) {
5942                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5943                 } else {
5944                         u32 ip_tcp_len;
5945
5946                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5947                         hdr_len = ip_tcp_len + tcp_opt_len;
5948
5949                         iph->check = 0;
5950                         iph->tot_len = htons(mss + hdr_len);
5951                 }
5952
5953                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5954                     tg3_flag(tp, TSO_BUG))
5955                         return tg3_tso_bug(tp, skb);
5956
5957                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5958                                TXD_FLAG_CPU_POST_DMA);
5959
5960                 if (tg3_flag(tp, HW_TSO_1) ||
5961                     tg3_flag(tp, HW_TSO_2) ||
5962                     tg3_flag(tp, HW_TSO_3)) {
5963                         tcp_hdr(skb)->check = 0;
5964                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5965                 } else
5966                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5967                                                                  iph->daddr, 0,
5968                                                                  IPPROTO_TCP,
5969                                                                  0);
5970
5971                 if (tg3_flag(tp, HW_TSO_3)) {
5972                         mss |= (hdr_len & 0xc) << 12;
5973                         if (hdr_len & 0x10)
5974                                 base_flags |= 0x00000010;
5975                         base_flags |= (hdr_len & 0x3e0) << 5;
5976                 } else if (tg3_flag(tp, HW_TSO_2))
5977                         mss |= hdr_len << 9;
5978                 else if (tg3_flag(tp, HW_TSO_1) ||
5979                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5980                         if (tcp_opt_len || iph->ihl > 5) {
5981                                 int tsflags;
5982
5983                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5984                                 mss |= (tsflags << 11);
5985                         }
5986                 } else {
5987                         if (tcp_opt_len || iph->ihl > 5) {
5988                                 int tsflags;
5989
5990                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5991                                 base_flags |= tsflags << 12;
5992                         }
5993                 }
5994         }
5995
5996         if (vlan_tx_tag_present(skb))
5997                 base_flags |= (TXD_FLAG_VLAN |
5998                                (vlan_tx_tag_get(skb) << 16));
5999
6000         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6001             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6002                 base_flags |= TXD_FLAG_JMB_PKT;
6003
6004         len = skb_headlen(skb);
6005
6006         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6007         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6008                 dev_kfree_skb(skb);
6009                 goto out_unlock;
6010         }
6011
6012         tnapi->tx_buffers[entry].skb = skb;
6013         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6014
6015         would_hit_hwbug = 0;
6016
6017         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6018                 would_hit_hwbug = 1;
6019
6020         if (tg3_4g_overflow_test(mapping, len))
6021                 would_hit_hwbug = 1;
6022
6023         if (tg3_40bit_overflow_test(tp, mapping, len))
6024                 would_hit_hwbug = 1;
6025
6026         if (tg3_flag(tp, 5701_DMA_BUG))
6027                 would_hit_hwbug = 1;
6028
6029         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6030                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6031
6032         entry = NEXT_TX(entry);
6033
6034         /* Now loop through additional data fragments, and queue them. */
6035         if (skb_shinfo(skb)->nr_frags > 0) {
6036                 last = skb_shinfo(skb)->nr_frags - 1;
6037                 for (i = 0; i <= last; i++) {
6038                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6039
6040                         len = frag->size;
6041                         mapping = pci_map_page(tp->pdev,
6042                                                frag->page,
6043                                                frag->page_offset,
6044                                                len, PCI_DMA_TODEVICE);
6045
6046                         tnapi->tx_buffers[entry].skb = NULL;
6047                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6048                                            mapping);
6049                         if (pci_dma_mapping_error(tp->pdev, mapping))
6050                                 goto dma_error;
6051
6052                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6053                             len <= 8)
6054                                 would_hit_hwbug = 1;
6055
6056                         if (tg3_4g_overflow_test(mapping, len))
6057                                 would_hit_hwbug = 1;
6058
6059                         if (tg3_40bit_overflow_test(tp, mapping, len))
6060                                 would_hit_hwbug = 1;
6061
6062                         if (tg3_flag(tp, HW_TSO_1) ||
6063                             tg3_flag(tp, HW_TSO_2) ||
6064                             tg3_flag(tp, HW_TSO_3))
6065                                 tg3_set_txd(tnapi, entry, mapping, len,
6066                                             base_flags, (i == last)|(mss << 1));
6067                         else
6068                                 tg3_set_txd(tnapi, entry, mapping, len,
6069                                             base_flags, (i == last));
6070
6071                         entry = NEXT_TX(entry);
6072                 }
6073         }
6074
6075         if (would_hit_hwbug) {
6076                 tg3_skb_error_unmap(tnapi, skb, i);
6077
6078                 /* If the workaround fails due to memory/mapping
6079                  * failure, silently drop this packet.
6080                  */
6081                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6082                         goto out_unlock;
6083
6084                 entry = NEXT_TX(tnapi->tx_prod);
6085         }
6086
6087         /* Packets are ready, update Tx producer idx local and on card. */
6088         tw32_tx_mbox(tnapi->prodmbox, entry);
6089
6090         skb_tx_timestamp(skb);
6091
6092         tnapi->tx_prod = entry;
6093         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6094                 netif_tx_stop_queue(txq);
6095
6096                 /* netif_tx_stop_queue() must be done before checking
6097                  * checking tx index in tg3_tx_avail() below, because in
6098                  * tg3_tx(), we update tx index before checking for
6099                  * netif_tx_queue_stopped().
6100                  */
6101                 smp_mb();
6102                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6103                         netif_tx_wake_queue(txq);
6104         }
6105
6106 out_unlock:
6107         mmiowb();
6108
6109         return NETDEV_TX_OK;
6110
6111 dma_error:
6112         tg3_skb_error_unmap(tnapi, skb, i);
6113         dev_kfree_skb(skb);
6114         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6115         return NETDEV_TX_OK;
6116 }
6117
6118 static void tg3_set_loopback(struct net_device *dev, u32 features)
6119 {
6120         struct tg3 *tp = netdev_priv(dev);
6121
6122         if (features & NETIF_F_LOOPBACK) {
6123                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6124                         return;
6125
6126                 /*
6127                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6128                  * loopback mode if Half-Duplex mode was negotiated earlier.
6129                  */
6130                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6131
6132                 /* Enable internal MAC loopback mode */
6133                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6134                 spin_lock_bh(&tp->lock);
6135                 tw32(MAC_MODE, tp->mac_mode);
6136                 netif_carrier_on(tp->dev);
6137                 spin_unlock_bh(&tp->lock);
6138                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6139         } else {
6140                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6141                         return;
6142
6143                 /* Disable internal MAC loopback mode */
6144                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6145                 spin_lock_bh(&tp->lock);
6146                 tw32(MAC_MODE, tp->mac_mode);
6147                 /* Force link status check */
6148                 tg3_setup_phy(tp, 1);
6149                 spin_unlock_bh(&tp->lock);
6150                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6151         }
6152 }
6153
6154 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6155 {
6156         struct tg3 *tp = netdev_priv(dev);
6157
6158         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6159                 features &= ~NETIF_F_ALL_TSO;
6160
6161         return features;
6162 }
6163
6164 static int tg3_set_features(struct net_device *dev, u32 features)
6165 {
6166         u32 changed = dev->features ^ features;
6167
6168         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6169                 tg3_set_loopback(dev, features);
6170
6171         return 0;
6172 }
6173
6174 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6175                                int new_mtu)
6176 {
6177         dev->mtu = new_mtu;
6178
6179         if (new_mtu > ETH_DATA_LEN) {
6180                 if (tg3_flag(tp, 5780_CLASS)) {
6181                         netdev_update_features(dev);
6182                         tg3_flag_clear(tp, TSO_CAPABLE);
6183                 } else {
6184                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6185                 }
6186         } else {
6187                 if (tg3_flag(tp, 5780_CLASS)) {
6188                         tg3_flag_set(tp, TSO_CAPABLE);
6189                         netdev_update_features(dev);
6190                 }
6191                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6192         }
6193 }
6194
6195 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6196 {
6197         struct tg3 *tp = netdev_priv(dev);
6198         int err;
6199
6200         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6201                 return -EINVAL;
6202
6203         if (!netif_running(dev)) {
6204                 /* We'll just catch it later when the
6205                  * device is up'd.
6206                  */
6207                 tg3_set_mtu(dev, tp, new_mtu);
6208                 return 0;
6209         }
6210
6211         tg3_phy_stop(tp);
6212
6213         tg3_netif_stop(tp);
6214
6215         tg3_full_lock(tp, 1);
6216
6217         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6218
6219         tg3_set_mtu(dev, tp, new_mtu);
6220
6221         err = tg3_restart_hw(tp, 0);
6222
6223         if (!err)
6224                 tg3_netif_start(tp);
6225
6226         tg3_full_unlock(tp);
6227
6228         if (!err)
6229                 tg3_phy_start(tp);
6230
6231         return err;
6232 }
6233
6234 static void tg3_rx_prodring_free(struct tg3 *tp,
6235                                  struct tg3_rx_prodring_set *tpr)
6236 {
6237         int i;
6238
6239         if (tpr != &tp->napi[0].prodring) {
6240                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6241                      i = (i + 1) & tp->rx_std_ring_mask)
6242                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6243                                         tp->rx_pkt_map_sz);
6244
6245                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6246                         for (i = tpr->rx_jmb_cons_idx;
6247                              i != tpr->rx_jmb_prod_idx;
6248                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6249                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6250                                                 TG3_RX_JMB_MAP_SZ);
6251                         }
6252                 }
6253
6254                 return;
6255         }
6256
6257         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6258                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6259                                 tp->rx_pkt_map_sz);
6260
6261         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6262                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6263                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6264                                         TG3_RX_JMB_MAP_SZ);
6265         }
6266 }
6267
6268 /* Initialize rx rings for packet processing.
6269  *
6270  * The chip has been shut down and the driver detached from
6271  * the networking, so no interrupts or new tx packets will
6272  * end up in the driver.  tp->{tx,}lock are held and thus
6273  * we may not sleep.
6274  */
6275 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6276                                  struct tg3_rx_prodring_set *tpr)
6277 {
6278         u32 i, rx_pkt_dma_sz;
6279
6280         tpr->rx_std_cons_idx = 0;
6281         tpr->rx_std_prod_idx = 0;
6282         tpr->rx_jmb_cons_idx = 0;
6283         tpr->rx_jmb_prod_idx = 0;
6284
6285         if (tpr != &tp->napi[0].prodring) {
6286                 memset(&tpr->rx_std_buffers[0], 0,
6287                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6288                 if (tpr->rx_jmb_buffers)
6289                         memset(&tpr->rx_jmb_buffers[0], 0,
6290                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6291                 goto done;
6292         }
6293
6294         /* Zero out all descriptors. */
6295         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6296
6297         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6298         if (tg3_flag(tp, 5780_CLASS) &&
6299             tp->dev->mtu > ETH_DATA_LEN)
6300                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6301         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6302
6303         /* Initialize invariants of the rings, we only set this
6304          * stuff once.  This works because the card does not
6305          * write into the rx buffer posting rings.
6306          */
6307         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6308                 struct tg3_rx_buffer_desc *rxd;
6309
6310                 rxd = &tpr->rx_std[i];
6311                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6312                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6313                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6314                                (i << RXD_OPAQUE_INDEX_SHIFT));
6315         }
6316
6317         /* Now allocate fresh SKBs for each rx ring. */
6318         for (i = 0; i < tp->rx_pending; i++) {
6319                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6320                         netdev_warn(tp->dev,
6321                                     "Using a smaller RX standard ring. Only "
6322                                     "%d out of %d buffers were allocated "
6323                                     "successfully\n", i, tp->rx_pending);
6324                         if (i == 0)
6325                                 goto initfail;
6326                         tp->rx_pending = i;
6327                         break;
6328                 }
6329         }
6330
6331         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6332                 goto done;
6333
6334         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6335
6336         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6337                 goto done;
6338
6339         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6340                 struct tg3_rx_buffer_desc *rxd;
6341
6342                 rxd = &tpr->rx_jmb[i].std;
6343                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6344                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6345                                   RXD_FLAG_JUMBO;
6346                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6347                        (i << RXD_OPAQUE_INDEX_SHIFT));
6348         }
6349
6350         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6351                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6352                         netdev_warn(tp->dev,
6353                                     "Using a smaller RX jumbo ring. Only %d "
6354                                     "out of %d buffers were allocated "
6355                                     "successfully\n", i, tp->rx_jumbo_pending);
6356                         if (i == 0)
6357                                 goto initfail;
6358                         tp->rx_jumbo_pending = i;
6359                         break;
6360                 }
6361         }
6362
6363 done:
6364         return 0;
6365
6366 initfail:
6367         tg3_rx_prodring_free(tp, tpr);
6368         return -ENOMEM;
6369 }
6370
6371 static void tg3_rx_prodring_fini(struct tg3 *tp,
6372                                  struct tg3_rx_prodring_set *tpr)
6373 {
6374         kfree(tpr->rx_std_buffers);
6375         tpr->rx_std_buffers = NULL;
6376         kfree(tpr->rx_jmb_buffers);
6377         tpr->rx_jmb_buffers = NULL;
6378         if (tpr->rx_std) {
6379                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6380                                   tpr->rx_std, tpr->rx_std_mapping);
6381                 tpr->rx_std = NULL;
6382         }
6383         if (tpr->rx_jmb) {
6384                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6385                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6386                 tpr->rx_jmb = NULL;
6387         }
6388 }
6389
6390 static int tg3_rx_prodring_init(struct tg3 *tp,
6391                                 struct tg3_rx_prodring_set *tpr)
6392 {
6393         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6394                                       GFP_KERNEL);
6395         if (!tpr->rx_std_buffers)
6396                 return -ENOMEM;
6397
6398         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6399                                          TG3_RX_STD_RING_BYTES(tp),
6400                                          &tpr->rx_std_mapping,
6401                                          GFP_KERNEL);
6402         if (!tpr->rx_std)
6403                 goto err_out;
6404
6405         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6406                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6407                                               GFP_KERNEL);
6408                 if (!tpr->rx_jmb_buffers)
6409                         goto err_out;
6410
6411                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6412                                                  TG3_RX_JMB_RING_BYTES(tp),
6413                                                  &tpr->rx_jmb_mapping,
6414                                                  GFP_KERNEL);
6415                 if (!tpr->rx_jmb)
6416                         goto err_out;
6417         }
6418
6419         return 0;
6420
6421 err_out:
6422         tg3_rx_prodring_fini(tp, tpr);
6423         return -ENOMEM;
6424 }
6425
6426 /* Free up pending packets in all rx/tx rings.
6427  *
6428  * The chip has been shut down and the driver detached from
6429  * the networking, so no interrupts or new tx packets will
6430  * end up in the driver.  tp->{tx,}lock is not held and we are not
6431  * in an interrupt context and thus may sleep.
6432  */
6433 static void tg3_free_rings(struct tg3 *tp)
6434 {
6435         int i, j;
6436
6437         for (j = 0; j < tp->irq_cnt; j++) {
6438                 struct tg3_napi *tnapi = &tp->napi[j];
6439
6440                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6441
6442                 if (!tnapi->tx_buffers)
6443                         continue;
6444
6445                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6446                         struct ring_info *txp;
6447                         struct sk_buff *skb;
6448                         unsigned int k;
6449
6450                         txp = &tnapi->tx_buffers[i];
6451                         skb = txp->skb;
6452
6453                         if (skb == NULL) {
6454                                 i++;
6455                                 continue;
6456                         }
6457
6458                         pci_unmap_single(tp->pdev,
6459                                          dma_unmap_addr(txp, mapping),
6460                                          skb_headlen(skb),
6461                                          PCI_DMA_TODEVICE);
6462                         txp->skb = NULL;
6463
6464                         i++;
6465
6466                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6467                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6468                                 pci_unmap_page(tp->pdev,
6469                                                dma_unmap_addr(txp, mapping),
6470                                                skb_shinfo(skb)->frags[k].size,
6471                                                PCI_DMA_TODEVICE);
6472                                 i++;
6473                         }
6474
6475                         dev_kfree_skb_any(skb);
6476                 }
6477         }
6478 }
6479
6480 /* Initialize tx/rx rings for packet processing.
6481  *
6482  * The chip has been shut down and the driver detached from
6483  * the networking, so no interrupts or new tx packets will
6484  * end up in the driver.  tp->{tx,}lock are held and thus
6485  * we may not sleep.
6486  */
6487 static int tg3_init_rings(struct tg3 *tp)
6488 {
6489         int i;
6490
6491         /* Free up all the SKBs. */
6492         tg3_free_rings(tp);
6493
6494         for (i = 0; i < tp->irq_cnt; i++) {
6495                 struct tg3_napi *tnapi = &tp->napi[i];
6496
6497                 tnapi->last_tag = 0;
6498                 tnapi->last_irq_tag = 0;
6499                 tnapi->hw_status->status = 0;
6500                 tnapi->hw_status->status_tag = 0;
6501                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6502
6503                 tnapi->tx_prod = 0;
6504                 tnapi->tx_cons = 0;
6505                 if (tnapi->tx_ring)
6506                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6507
6508                 tnapi->rx_rcb_ptr = 0;
6509                 if (tnapi->rx_rcb)
6510                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6511
6512                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6513                         tg3_free_rings(tp);
6514                         return -ENOMEM;
6515                 }
6516         }
6517
6518         return 0;
6519 }
6520
6521 /*
6522  * Must not be invoked with interrupt sources disabled and
6523  * the hardware shutdown down.
6524  */
6525 static void tg3_free_consistent(struct tg3 *tp)
6526 {
6527         int i;
6528
6529         for (i = 0; i < tp->irq_cnt; i++) {
6530                 struct tg3_napi *tnapi = &tp->napi[i];
6531
6532                 if (tnapi->tx_ring) {
6533                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6534                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6535                         tnapi->tx_ring = NULL;
6536                 }
6537
6538                 kfree(tnapi->tx_buffers);
6539                 tnapi->tx_buffers = NULL;
6540
6541                 if (tnapi->rx_rcb) {
6542                         dma_free_coherent(&tp->pdev->dev,
6543                                           TG3_RX_RCB_RING_BYTES(tp),
6544                                           tnapi->rx_rcb,
6545                                           tnapi->rx_rcb_mapping);
6546                         tnapi->rx_rcb = NULL;
6547                 }
6548
6549                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6550
6551                 if (tnapi->hw_status) {
6552                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6553                                           tnapi->hw_status,
6554                                           tnapi->status_mapping);
6555                         tnapi->hw_status = NULL;
6556                 }
6557         }
6558
6559         if (tp->hw_stats) {
6560                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6561                                   tp->hw_stats, tp->stats_mapping);
6562                 tp->hw_stats = NULL;
6563         }
6564 }
6565
6566 /*
6567  * Must not be invoked with interrupt sources disabled and
6568  * the hardware shutdown down.  Can sleep.
6569  */
6570 static int tg3_alloc_consistent(struct tg3 *tp)
6571 {
6572         int i;
6573
6574         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6575                                           sizeof(struct tg3_hw_stats),
6576                                           &tp->stats_mapping,
6577                                           GFP_KERNEL);
6578         if (!tp->hw_stats)
6579                 goto err_out;
6580
6581         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6582
6583         for (i = 0; i < tp->irq_cnt; i++) {
6584                 struct tg3_napi *tnapi = &tp->napi[i];
6585                 struct tg3_hw_status *sblk;
6586
6587                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6588                                                       TG3_HW_STATUS_SIZE,
6589                                                       &tnapi->status_mapping,
6590                                                       GFP_KERNEL);
6591                 if (!tnapi->hw_status)
6592                         goto err_out;
6593
6594                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6595                 sblk = tnapi->hw_status;
6596
6597                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6598                         goto err_out;
6599
6600                 /* If multivector TSS is enabled, vector 0 does not handle
6601                  * tx interrupts.  Don't allocate any resources for it.
6602                  */
6603                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6604                     (i && tg3_flag(tp, ENABLE_TSS))) {
6605                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6606                                                     TG3_TX_RING_SIZE,
6607                                                     GFP_KERNEL);
6608                         if (!tnapi->tx_buffers)
6609                                 goto err_out;
6610
6611                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6612                                                             TG3_TX_RING_BYTES,
6613                                                         &tnapi->tx_desc_mapping,
6614                                                             GFP_KERNEL);
6615                         if (!tnapi->tx_ring)
6616                                 goto err_out;
6617                 }
6618
6619                 /*
6620                  * When RSS is enabled, the status block format changes
6621                  * slightly.  The "rx_jumbo_consumer", "reserved",
6622                  * and "rx_mini_consumer" members get mapped to the
6623                  * other three rx return ring producer indexes.
6624                  */
6625                 switch (i) {
6626                 default:
6627                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6628                         break;
6629                 case 2:
6630                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6631                         break;
6632                 case 3:
6633                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6634                         break;
6635                 case 4:
6636                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6637                         break;
6638                 }
6639
6640                 /*
6641                  * If multivector RSS is enabled, vector 0 does not handle
6642                  * rx or tx interrupts.  Don't allocate any resources for it.
6643                  */
6644                 if (!i && tg3_flag(tp, ENABLE_RSS))
6645                         continue;
6646
6647                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6648                                                    TG3_RX_RCB_RING_BYTES(tp),
6649                                                    &tnapi->rx_rcb_mapping,
6650                                                    GFP_KERNEL);
6651                 if (!tnapi->rx_rcb)
6652                         goto err_out;
6653
6654                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6655         }
6656
6657         return 0;
6658
6659 err_out:
6660         tg3_free_consistent(tp);
6661         return -ENOMEM;
6662 }
6663
6664 #define MAX_WAIT_CNT 1000
6665
6666 /* To stop a block, clear the enable bit and poll till it
6667  * clears.  tp->lock is held.
6668  */
6669 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6670 {
6671         unsigned int i;
6672         u32 val;
6673
6674         if (tg3_flag(tp, 5705_PLUS)) {
6675                 switch (ofs) {
6676                 case RCVLSC_MODE:
6677                 case DMAC_MODE:
6678                 case MBFREE_MODE:
6679                 case BUFMGR_MODE:
6680                 case MEMARB_MODE:
6681                         /* We can't enable/disable these bits of the
6682                          * 5705/5750, just say success.
6683                          */
6684                         return 0;
6685
6686                 default:
6687                         break;
6688                 }
6689         }
6690
6691         val = tr32(ofs);
6692         val &= ~enable_bit;
6693         tw32_f(ofs, val);
6694
6695         for (i = 0; i < MAX_WAIT_CNT; i++) {
6696                 udelay(100);
6697                 val = tr32(ofs);
6698                 if ((val & enable_bit) == 0)
6699                         break;
6700         }
6701
6702         if (i == MAX_WAIT_CNT && !silent) {
6703                 dev_err(&tp->pdev->dev,
6704                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6705                         ofs, enable_bit);
6706                 return -ENODEV;
6707         }
6708
6709         return 0;
6710 }
6711
6712 /* tp->lock is held. */
6713 static int tg3_abort_hw(struct tg3 *tp, int silent)
6714 {
6715         int i, err;
6716
6717         tg3_disable_ints(tp);
6718
6719         tp->rx_mode &= ~RX_MODE_ENABLE;
6720         tw32_f(MAC_RX_MODE, tp->rx_mode);
6721         udelay(10);
6722
6723         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6724         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6725         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6726         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6727         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6728         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6729
6730         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6731         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6732         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6733         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6734         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6735         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6736         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6737
6738         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6739         tw32_f(MAC_MODE, tp->mac_mode);
6740         udelay(40);
6741
6742         tp->tx_mode &= ~TX_MODE_ENABLE;
6743         tw32_f(MAC_TX_MODE, tp->tx_mode);
6744
6745         for (i = 0; i < MAX_WAIT_CNT; i++) {
6746                 udelay(100);
6747                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6748                         break;
6749         }
6750         if (i >= MAX_WAIT_CNT) {
6751                 dev_err(&tp->pdev->dev,
6752                         "%s timed out, TX_MODE_ENABLE will not clear "
6753                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6754                 err |= -ENODEV;
6755         }
6756
6757         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6758         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6759         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6760
6761         tw32(FTQ_RESET, 0xffffffff);
6762         tw32(FTQ_RESET, 0x00000000);
6763
6764         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6765         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6766
6767         for (i = 0; i < tp->irq_cnt; i++) {
6768                 struct tg3_napi *tnapi = &tp->napi[i];
6769                 if (tnapi->hw_status)
6770                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6771         }
6772         if (tp->hw_stats)
6773                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6774
6775         return err;
6776 }
6777
6778 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6779 {
6780         int i;
6781         u32 apedata;
6782
6783         /* NCSI does not support APE events */
6784         if (tg3_flag(tp, APE_HAS_NCSI))
6785                 return;
6786
6787         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6788         if (apedata != APE_SEG_SIG_MAGIC)
6789                 return;
6790
6791         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6792         if (!(apedata & APE_FW_STATUS_READY))
6793                 return;
6794
6795         /* Wait for up to 1 millisecond for APE to service previous event. */
6796         for (i = 0; i < 10; i++) {
6797                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6798                         return;
6799
6800                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6801
6802                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6803                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6804                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6805
6806                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6807
6808                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6809                         break;
6810
6811                 udelay(100);
6812         }
6813
6814         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6815                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6816 }
6817
6818 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6819 {
6820         u32 event;
6821         u32 apedata;
6822
6823         if (!tg3_flag(tp, ENABLE_APE))
6824                 return;
6825
6826         switch (kind) {
6827         case RESET_KIND_INIT:
6828                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6829                                 APE_HOST_SEG_SIG_MAGIC);
6830                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6831                                 APE_HOST_SEG_LEN_MAGIC);
6832                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6833                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6834                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6835                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6836                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6837                                 APE_HOST_BEHAV_NO_PHYLOCK);
6838                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6839                                     TG3_APE_HOST_DRVR_STATE_START);
6840
6841                 event = APE_EVENT_STATUS_STATE_START;
6842                 break;
6843         case RESET_KIND_SHUTDOWN:
6844                 /* With the interface we are currently using,
6845                  * APE does not track driver state.  Wiping
6846                  * out the HOST SEGMENT SIGNATURE forces
6847                  * the APE to assume OS absent status.
6848                  */
6849                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6850
6851                 if (device_may_wakeup(&tp->pdev->dev) &&
6852                     tg3_flag(tp, WOL_ENABLE)) {
6853                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6854                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6855                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6856                 } else
6857                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6858
6859                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6860
6861                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6862                 break;
6863         case RESET_KIND_SUSPEND:
6864                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6865                 break;
6866         default:
6867                 return;
6868         }
6869
6870         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6871
6872         tg3_ape_send_event(tp, event);
6873 }
6874
6875 /* tp->lock is held. */
6876 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6877 {
6878         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6879                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6880
6881         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6882                 switch (kind) {
6883                 case RESET_KIND_INIT:
6884                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6885                                       DRV_STATE_START);
6886                         break;
6887
6888                 case RESET_KIND_SHUTDOWN:
6889                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6890                                       DRV_STATE_UNLOAD);
6891                         break;
6892
6893                 case RESET_KIND_SUSPEND:
6894                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6895                                       DRV_STATE_SUSPEND);
6896                         break;
6897
6898                 default:
6899                         break;
6900                 }
6901         }
6902
6903         if (kind == RESET_KIND_INIT ||
6904             kind == RESET_KIND_SUSPEND)
6905                 tg3_ape_driver_state_change(tp, kind);
6906 }
6907
6908 /* tp->lock is held. */
6909 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6910 {
6911         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6912                 switch (kind) {
6913                 case RESET_KIND_INIT:
6914                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6915                                       DRV_STATE_START_DONE);
6916                         break;
6917
6918                 case RESET_KIND_SHUTDOWN:
6919                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6920                                       DRV_STATE_UNLOAD_DONE);
6921                         break;
6922
6923                 default:
6924                         break;
6925                 }
6926         }
6927
6928         if (kind == RESET_KIND_SHUTDOWN)
6929                 tg3_ape_driver_state_change(tp, kind);
6930 }
6931
6932 /* tp->lock is held. */
6933 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6934 {
6935         if (tg3_flag(tp, ENABLE_ASF)) {
6936                 switch (kind) {
6937                 case RESET_KIND_INIT:
6938                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6939                                       DRV_STATE_START);
6940                         break;
6941
6942                 case RESET_KIND_SHUTDOWN:
6943                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6944                                       DRV_STATE_UNLOAD);
6945                         break;
6946
6947                 case RESET_KIND_SUSPEND:
6948                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6949                                       DRV_STATE_SUSPEND);
6950                         break;
6951
6952                 default:
6953                         break;
6954                 }
6955         }
6956 }
6957
6958 static int tg3_poll_fw(struct tg3 *tp)
6959 {
6960         int i;
6961         u32 val;
6962
6963         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6964                 /* Wait up to 20ms for init done. */
6965                 for (i = 0; i < 200; i++) {
6966                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6967                                 return 0;
6968                         udelay(100);
6969                 }
6970                 return -ENODEV;
6971         }
6972
6973         /* Wait for firmware initialization to complete. */
6974         for (i = 0; i < 100000; i++) {
6975                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6976                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6977                         break;
6978                 udelay(10);
6979         }
6980
6981         /* Chip might not be fitted with firmware.  Some Sun onboard
6982          * parts are configured like that.  So don't signal the timeout
6983          * of the above loop as an error, but do report the lack of
6984          * running firmware once.
6985          */
6986         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6987                 tg3_flag_set(tp, NO_FWARE_REPORTED);
6988
6989                 netdev_info(tp->dev, "No firmware running\n");
6990         }
6991
6992         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6993                 /* The 57765 A0 needs a little more
6994                  * time to do some important work.
6995                  */
6996                 mdelay(10);
6997         }
6998
6999         return 0;
7000 }
7001
7002 /* Save PCI command register before chip reset */
7003 static void tg3_save_pci_state(struct tg3 *tp)
7004 {
7005         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7006 }
7007
7008 /* Restore PCI state after chip reset */
7009 static void tg3_restore_pci_state(struct tg3 *tp)
7010 {
7011         u32 val;
7012
7013         /* Re-enable indirect register accesses. */
7014         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7015                                tp->misc_host_ctrl);
7016
7017         /* Set MAX PCI retry to zero. */
7018         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7019         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7020             tg3_flag(tp, PCIX_MODE))
7021                 val |= PCISTATE_RETRY_SAME_DMA;
7022         /* Allow reads and writes to the APE register and memory space. */
7023         if (tg3_flag(tp, ENABLE_APE))
7024                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7025                        PCISTATE_ALLOW_APE_SHMEM_WR |
7026                        PCISTATE_ALLOW_APE_PSPACE_WR;
7027         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7028
7029         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7030
7031         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7032                 if (tg3_flag(tp, PCI_EXPRESS))
7033                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7034                 else {
7035                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7036                                               tp->pci_cacheline_sz);
7037                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7038                                               tp->pci_lat_timer);
7039                 }
7040         }
7041
7042         /* Make sure PCI-X relaxed ordering bit is clear. */
7043         if (tg3_flag(tp, PCIX_MODE)) {
7044                 u16 pcix_cmd;
7045
7046                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7047                                      &pcix_cmd);
7048                 pcix_cmd &= ~PCI_X_CMD_ERO;
7049                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7050                                       pcix_cmd);
7051         }
7052
7053         if (tg3_flag(tp, 5780_CLASS)) {
7054
7055                 /* Chip reset on 5780 will reset MSI enable bit,
7056                  * so need to restore it.
7057                  */
7058                 if (tg3_flag(tp, USING_MSI)) {
7059                         u16 ctrl;
7060
7061                         pci_read_config_word(tp->pdev,
7062                                              tp->msi_cap + PCI_MSI_FLAGS,
7063                                              &ctrl);
7064                         pci_write_config_word(tp->pdev,
7065                                               tp->msi_cap + PCI_MSI_FLAGS,
7066                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7067                         val = tr32(MSGINT_MODE);
7068                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7069                 }
7070         }
7071 }
7072
7073 static void tg3_stop_fw(struct tg3 *);
7074
7075 /* tp->lock is held. */
7076 static int tg3_chip_reset(struct tg3 *tp)
7077 {
7078         u32 val;
7079         void (*write_op)(struct tg3 *, u32, u32);
7080         int i, err;
7081
7082         tg3_nvram_lock(tp);
7083
7084         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7085
7086         /* No matching tg3_nvram_unlock() after this because
7087          * chip reset below will undo the nvram lock.
7088          */
7089         tp->nvram_lock_cnt = 0;
7090
7091         /* GRC_MISC_CFG core clock reset will clear the memory
7092          * enable bit in PCI register 4 and the MSI enable bit
7093          * on some chips, so we save relevant registers here.
7094          */
7095         tg3_save_pci_state(tp);
7096
7097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7098             tg3_flag(tp, 5755_PLUS))
7099                 tw32(GRC_FASTBOOT_PC, 0);
7100
7101         /*
7102          * We must avoid the readl() that normally takes place.
7103          * It locks machines, causes machine checks, and other
7104          * fun things.  So, temporarily disable the 5701
7105          * hardware workaround, while we do the reset.
7106          */
7107         write_op = tp->write32;
7108         if (write_op == tg3_write_flush_reg32)
7109                 tp->write32 = tg3_write32;
7110
7111         /* Prevent the irq handler from reading or writing PCI registers
7112          * during chip reset when the memory enable bit in the PCI command
7113          * register may be cleared.  The chip does not generate interrupt
7114          * at this time, but the irq handler may still be called due to irq
7115          * sharing or irqpoll.
7116          */
7117         tg3_flag_set(tp, CHIP_RESETTING);
7118         for (i = 0; i < tp->irq_cnt; i++) {
7119                 struct tg3_napi *tnapi = &tp->napi[i];
7120                 if (tnapi->hw_status) {
7121                         tnapi->hw_status->status = 0;
7122                         tnapi->hw_status->status_tag = 0;
7123                 }
7124                 tnapi->last_tag = 0;
7125                 tnapi->last_irq_tag = 0;
7126         }
7127         smp_mb();
7128
7129         for (i = 0; i < tp->irq_cnt; i++)
7130                 synchronize_irq(tp->napi[i].irq_vec);
7131
7132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7133                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7134                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7135         }
7136
7137         /* do the reset */
7138         val = GRC_MISC_CFG_CORECLK_RESET;
7139
7140         if (tg3_flag(tp, PCI_EXPRESS)) {
7141                 /* Force PCIe 1.0a mode */
7142                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7143                     !tg3_flag(tp, 57765_PLUS) &&
7144                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7145                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7146                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7147
7148                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7149                         tw32(GRC_MISC_CFG, (1 << 29));
7150                         val |= (1 << 29);
7151                 }
7152         }
7153
7154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7155                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7156                 tw32(GRC_VCPU_EXT_CTRL,
7157                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7158         }
7159
7160         /* Manage gphy power for all CPMU absent PCIe devices. */
7161         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7162                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7163
7164         tw32(GRC_MISC_CFG, val);
7165
7166         /* restore 5701 hardware bug workaround write method */
7167         tp->write32 = write_op;
7168
7169         /* Unfortunately, we have to delay before the PCI read back.
7170          * Some 575X chips even will not respond to a PCI cfg access
7171          * when the reset command is given to the chip.
7172          *
7173          * How do these hardware designers expect things to work
7174          * properly if the PCI write is posted for a long period
7175          * of time?  It is always necessary to have some method by
7176          * which a register read back can occur to push the write
7177          * out which does the reset.
7178          *
7179          * For most tg3 variants the trick below was working.
7180          * Ho hum...
7181          */
7182         udelay(120);
7183
7184         /* Flush PCI posted writes.  The normal MMIO registers
7185          * are inaccessible at this time so this is the only
7186          * way to make this reliably (actually, this is no longer
7187          * the case, see above).  I tried to use indirect
7188          * register read/write but this upset some 5701 variants.
7189          */
7190         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7191
7192         udelay(120);
7193
7194         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7195                 u16 val16;
7196
7197                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7198                         int i;
7199                         u32 cfg_val;
7200
7201                         /* Wait for link training to complete.  */
7202                         for (i = 0; i < 5000; i++)
7203                                 udelay(100);
7204
7205                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7206                         pci_write_config_dword(tp->pdev, 0xc4,
7207                                                cfg_val | (1 << 15));
7208                 }
7209
7210                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7211                 pci_read_config_word(tp->pdev,
7212                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7213                                      &val16);
7214                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7215                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7216                 /*
7217                  * Older PCIe devices only support the 128 byte
7218                  * MPS setting.  Enforce the restriction.
7219                  */
7220                 if (!tg3_flag(tp, CPMU_PRESENT))
7221                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7222                 pci_write_config_word(tp->pdev,
7223                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7224                                       val16);
7225
7226                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7227
7228                 /* Clear error status */
7229                 pci_write_config_word(tp->pdev,
7230                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7231                                       PCI_EXP_DEVSTA_CED |
7232                                       PCI_EXP_DEVSTA_NFED |
7233                                       PCI_EXP_DEVSTA_FED |
7234                                       PCI_EXP_DEVSTA_URD);
7235         }
7236
7237         tg3_restore_pci_state(tp);
7238
7239         tg3_flag_clear(tp, CHIP_RESETTING);
7240         tg3_flag_clear(tp, ERROR_PROCESSED);
7241
7242         val = 0;
7243         if (tg3_flag(tp, 5780_CLASS))
7244                 val = tr32(MEMARB_MODE);
7245         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7246
7247         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7248                 tg3_stop_fw(tp);
7249                 tw32(0x5000, 0x400);
7250         }
7251
7252         tw32(GRC_MODE, tp->grc_mode);
7253
7254         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7255                 val = tr32(0xc4);
7256
7257                 tw32(0xc4, val | (1 << 15));
7258         }
7259
7260         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7261             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7262                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7263                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7264                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7265                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7266         }
7267
7268         if (tg3_flag(tp, ENABLE_APE))
7269                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7270                                MAC_MODE_APE_RX_EN |
7271                                MAC_MODE_TDE_ENABLE;
7272
7273         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7274                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7275                 val = tp->mac_mode;
7276         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7277                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7278                 val = tp->mac_mode;
7279         } else
7280                 val = 0;
7281
7282         tw32_f(MAC_MODE, val);
7283         udelay(40);
7284
7285         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7286
7287         err = tg3_poll_fw(tp);
7288         if (err)
7289                 return err;
7290
7291         tg3_mdio_start(tp);
7292
7293         if (tg3_flag(tp, PCI_EXPRESS) &&
7294             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7295             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7296             !tg3_flag(tp, 57765_PLUS)) {
7297                 val = tr32(0x7c00);
7298
7299                 tw32(0x7c00, val | (1 << 25));
7300         }
7301
7302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7303                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7304                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7305         }
7306
7307         /* Reprobe ASF enable state.  */
7308         tg3_flag_clear(tp, ENABLE_ASF);
7309         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7310         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7311         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7312                 u32 nic_cfg;
7313
7314                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7315                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7316                         tg3_flag_set(tp, ENABLE_ASF);
7317                         tp->last_event_jiffies = jiffies;
7318                         if (tg3_flag(tp, 5750_PLUS))
7319                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7320                 }
7321         }
7322
7323         return 0;
7324 }
7325
7326 /* tp->lock is held. */
7327 static void tg3_stop_fw(struct tg3 *tp)
7328 {
7329         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7330                 /* Wait for RX cpu to ACK the previous event. */
7331                 tg3_wait_for_event_ack(tp);
7332
7333                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7334
7335                 tg3_generate_fw_event(tp);
7336
7337                 /* Wait for RX cpu to ACK this event. */
7338                 tg3_wait_for_event_ack(tp);
7339         }
7340 }
7341
7342 /* tp->lock is held. */
7343 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7344 {
7345         int err;
7346
7347         tg3_stop_fw(tp);
7348
7349         tg3_write_sig_pre_reset(tp, kind);
7350
7351         tg3_abort_hw(tp, silent);
7352         err = tg3_chip_reset(tp);
7353
7354         __tg3_set_mac_addr(tp, 0);
7355
7356         tg3_write_sig_legacy(tp, kind);
7357         tg3_write_sig_post_reset(tp, kind);
7358
7359         if (err)
7360                 return err;
7361
7362         return 0;
7363 }
7364
7365 #define RX_CPU_SCRATCH_BASE     0x30000
7366 #define RX_CPU_SCRATCH_SIZE     0x04000
7367 #define TX_CPU_SCRATCH_BASE     0x34000
7368 #define TX_CPU_SCRATCH_SIZE     0x04000
7369
7370 /* tp->lock is held. */
7371 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7372 {
7373         int i;
7374
7375         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7376
7377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7378                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7379
7380                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7381                 return 0;
7382         }
7383         if (offset == RX_CPU_BASE) {
7384                 for (i = 0; i < 10000; i++) {
7385                         tw32(offset + CPU_STATE, 0xffffffff);
7386                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7387                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7388                                 break;
7389                 }
7390
7391                 tw32(offset + CPU_STATE, 0xffffffff);
7392                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7393                 udelay(10);
7394         } else {
7395                 for (i = 0; i < 10000; i++) {
7396                         tw32(offset + CPU_STATE, 0xffffffff);
7397                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7398                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7399                                 break;
7400                 }
7401         }
7402
7403         if (i >= 10000) {
7404                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7405                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7406                 return -ENODEV;
7407         }
7408
7409         /* Clear firmware's nvram arbitration. */
7410         if (tg3_flag(tp, NVRAM))
7411                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7412         return 0;
7413 }
7414
7415 struct fw_info {
7416         unsigned int fw_base;
7417         unsigned int fw_len;
7418         const __be32 *fw_data;
7419 };
7420
7421 /* tp->lock is held. */
7422 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7423                                  int cpu_scratch_size, struct fw_info *info)
7424 {
7425         int err, lock_err, i;
7426         void (*write_op)(struct tg3 *, u32, u32);
7427
7428         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7429                 netdev_err(tp->dev,
7430                            "%s: Trying to load TX cpu firmware which is 5705\n",
7431                            __func__);
7432                 return -EINVAL;
7433         }
7434
7435         if (tg3_flag(tp, 5705_PLUS))
7436                 write_op = tg3_write_mem;
7437         else
7438                 write_op = tg3_write_indirect_reg32;
7439
7440         /* It is possible that bootcode is still loading at this point.
7441          * Get the nvram lock first before halting the cpu.
7442          */
7443         lock_err = tg3_nvram_lock(tp);
7444         err = tg3_halt_cpu(tp, cpu_base);
7445         if (!lock_err)
7446                 tg3_nvram_unlock(tp);
7447         if (err)
7448                 goto out;
7449
7450         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7451                 write_op(tp, cpu_scratch_base + i, 0);
7452         tw32(cpu_base + CPU_STATE, 0xffffffff);
7453         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7454         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7455                 write_op(tp, (cpu_scratch_base +
7456                               (info->fw_base & 0xffff) +
7457                               (i * sizeof(u32))),
7458                               be32_to_cpu(info->fw_data[i]));
7459
7460         err = 0;
7461
7462 out:
7463         return err;
7464 }
7465
7466 /* tp->lock is held. */
7467 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7468 {
7469         struct fw_info info;
7470         const __be32 *fw_data;
7471         int err, i;
7472
7473         fw_data = (void *)tp->fw->data;
7474
7475         /* Firmware blob starts with version numbers, followed by
7476            start address and length. We are setting complete length.
7477            length = end_address_of_bss - start_address_of_text.
7478            Remainder is the blob to be loaded contiguously
7479            from start address. */
7480
7481         info.fw_base = be32_to_cpu(fw_data[1]);
7482         info.fw_len = tp->fw->size - 12;
7483         info.fw_data = &fw_data[3];
7484
7485         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7486                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7487                                     &info);
7488         if (err)
7489                 return err;
7490
7491         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7492                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7493                                     &info);
7494         if (err)
7495                 return err;
7496
7497         /* Now startup only the RX cpu. */
7498         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7499         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7500
7501         for (i = 0; i < 5; i++) {
7502                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7503                         break;
7504                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7505                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7506                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7507                 udelay(1000);
7508         }
7509         if (i >= 5) {
7510                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7511                            "should be %08x\n", __func__,
7512                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7513                 return -ENODEV;
7514         }
7515         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7516         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7517
7518         return 0;
7519 }
7520
7521 /* tp->lock is held. */
7522 static int tg3_load_tso_firmware(struct tg3 *tp)
7523 {
7524         struct fw_info info;
7525         const __be32 *fw_data;
7526         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7527         int err, i;
7528
7529         if (tg3_flag(tp, HW_TSO_1) ||
7530             tg3_flag(tp, HW_TSO_2) ||
7531             tg3_flag(tp, HW_TSO_3))
7532                 return 0;
7533
7534         fw_data = (void *)tp->fw->data;
7535
7536         /* Firmware blob starts with version numbers, followed by
7537            start address and length. We are setting complete length.
7538            length = end_address_of_bss - start_address_of_text.
7539            Remainder is the blob to be loaded contiguously
7540            from start address. */
7541
7542         info.fw_base = be32_to_cpu(fw_data[1]);
7543         cpu_scratch_size = tp->fw_len;
7544         info.fw_len = tp->fw->size - 12;
7545         info.fw_data = &fw_data[3];
7546
7547         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7548                 cpu_base = RX_CPU_BASE;
7549                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7550         } else {
7551                 cpu_base = TX_CPU_BASE;
7552                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7553                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7554         }
7555
7556         err = tg3_load_firmware_cpu(tp, cpu_base,
7557                                     cpu_scratch_base, cpu_scratch_size,
7558                                     &info);
7559         if (err)
7560                 return err;
7561
7562         /* Now startup the cpu. */
7563         tw32(cpu_base + CPU_STATE, 0xffffffff);
7564         tw32_f(cpu_base + CPU_PC, info.fw_base);
7565
7566         for (i = 0; i < 5; i++) {
7567                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7568                         break;
7569                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7570                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7571                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7572                 udelay(1000);
7573         }
7574         if (i >= 5) {
7575                 netdev_err(tp->dev,
7576                            "%s fails to set CPU PC, is %08x should be %08x\n",
7577                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7578                 return -ENODEV;
7579         }
7580         tw32(cpu_base + CPU_STATE, 0xffffffff);
7581         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7582         return 0;
7583 }
7584
7585
7586 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7587 {
7588         struct tg3 *tp = netdev_priv(dev);
7589         struct sockaddr *addr = p;
7590         int err = 0, skip_mac_1 = 0;
7591
7592         if (!is_valid_ether_addr(addr->sa_data))
7593                 return -EINVAL;
7594
7595         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7596
7597         if (!netif_running(dev))
7598                 return 0;
7599
7600         if (tg3_flag(tp, ENABLE_ASF)) {
7601                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7602
7603                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7604                 addr0_low = tr32(MAC_ADDR_0_LOW);
7605                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7606                 addr1_low = tr32(MAC_ADDR_1_LOW);
7607
7608                 /* Skip MAC addr 1 if ASF is using it. */
7609                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7610                     !(addr1_high == 0 && addr1_low == 0))
7611                         skip_mac_1 = 1;
7612         }
7613         spin_lock_bh(&tp->lock);
7614         __tg3_set_mac_addr(tp, skip_mac_1);
7615         spin_unlock_bh(&tp->lock);
7616
7617         return err;
7618 }
7619
7620 /* tp->lock is held. */
7621 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7622                            dma_addr_t mapping, u32 maxlen_flags,
7623                            u32 nic_addr)
7624 {
7625         tg3_write_mem(tp,
7626                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7627                       ((u64) mapping >> 32));
7628         tg3_write_mem(tp,
7629                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7630                       ((u64) mapping & 0xffffffff));
7631         tg3_write_mem(tp,
7632                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7633                        maxlen_flags);
7634
7635         if (!tg3_flag(tp, 5705_PLUS))
7636                 tg3_write_mem(tp,
7637                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7638                               nic_addr);
7639 }
7640
7641 static void __tg3_set_rx_mode(struct net_device *);
7642 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7643 {
7644         int i;
7645
7646         if (!tg3_flag(tp, ENABLE_TSS)) {
7647                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7648                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7649                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7650         } else {
7651                 tw32(HOSTCC_TXCOL_TICKS, 0);
7652                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7653                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7654         }
7655
7656         if (!tg3_flag(tp, ENABLE_RSS)) {
7657                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7658                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7659                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7660         } else {
7661                 tw32(HOSTCC_RXCOL_TICKS, 0);
7662                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7663                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7664         }
7665
7666         if (!tg3_flag(tp, 5705_PLUS)) {
7667                 u32 val = ec->stats_block_coalesce_usecs;
7668
7669                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7670                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7671
7672                 if (!netif_carrier_ok(tp->dev))
7673                         val = 0;
7674
7675                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7676         }
7677
7678         for (i = 0; i < tp->irq_cnt - 1; i++) {
7679                 u32 reg;
7680
7681                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7682                 tw32(reg, ec->rx_coalesce_usecs);
7683                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7684                 tw32(reg, ec->rx_max_coalesced_frames);
7685                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7686                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7687
7688                 if (tg3_flag(tp, ENABLE_TSS)) {
7689                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7690                         tw32(reg, ec->tx_coalesce_usecs);
7691                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7692                         tw32(reg, ec->tx_max_coalesced_frames);
7693                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7694                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7695                 }
7696         }
7697
7698         for (; i < tp->irq_max - 1; i++) {
7699                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7700                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7701                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7702
7703                 if (tg3_flag(tp, ENABLE_TSS)) {
7704                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7705                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7706                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7707                 }
7708         }
7709 }
7710
7711 /* tp->lock is held. */
7712 static void tg3_rings_reset(struct tg3 *tp)
7713 {
7714         int i;
7715         u32 stblk, txrcb, rxrcb, limit;
7716         struct tg3_napi *tnapi = &tp->napi[0];
7717
7718         /* Disable all transmit rings but the first. */
7719         if (!tg3_flag(tp, 5705_PLUS))
7720                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7721         else if (tg3_flag(tp, 5717_PLUS))
7722                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7723         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7724                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7725         else
7726                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7727
7728         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7729              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7730                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7731                               BDINFO_FLAGS_DISABLED);
7732
7733
7734         /* Disable all receive return rings but the first. */
7735         if (tg3_flag(tp, 5717_PLUS))
7736                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7737         else if (!tg3_flag(tp, 5705_PLUS))
7738                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7739         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7740                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7741                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7742         else
7743                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7744
7745         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7746              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7747                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7748                               BDINFO_FLAGS_DISABLED);
7749
7750         /* Disable interrupts */
7751         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7752         tp->napi[0].chk_msi_cnt = 0;
7753         tp->napi[0].last_rx_cons = 0;
7754         tp->napi[0].last_tx_cons = 0;
7755
7756         /* Zero mailbox registers. */
7757         if (tg3_flag(tp, SUPPORT_MSIX)) {
7758                 for (i = 1; i < tp->irq_max; i++) {
7759                         tp->napi[i].tx_prod = 0;
7760                         tp->napi[i].tx_cons = 0;
7761                         if (tg3_flag(tp, ENABLE_TSS))
7762                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7763                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7764                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7765                         tp->napi[0].chk_msi_cnt = 0;
7766                         tp->napi[i].last_rx_cons = 0;
7767                         tp->napi[i].last_tx_cons = 0;
7768                 }
7769                 if (!tg3_flag(tp, ENABLE_TSS))
7770                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7771         } else {
7772                 tp->napi[0].tx_prod = 0;
7773                 tp->napi[0].tx_cons = 0;
7774                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7775                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7776         }
7777
7778         /* Make sure the NIC-based send BD rings are disabled. */
7779         if (!tg3_flag(tp, 5705_PLUS)) {
7780                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7781                 for (i = 0; i < 16; i++)
7782                         tw32_tx_mbox(mbox + i * 8, 0);
7783         }
7784
7785         txrcb = NIC_SRAM_SEND_RCB;
7786         rxrcb = NIC_SRAM_RCV_RET_RCB;
7787
7788         /* Clear status block in ram. */
7789         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7790
7791         /* Set status block DMA address */
7792         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7793              ((u64) tnapi->status_mapping >> 32));
7794         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7795              ((u64) tnapi->status_mapping & 0xffffffff));
7796
7797         if (tnapi->tx_ring) {
7798                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7799                                (TG3_TX_RING_SIZE <<
7800                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7801                                NIC_SRAM_TX_BUFFER_DESC);
7802                 txrcb += TG3_BDINFO_SIZE;
7803         }
7804
7805         if (tnapi->rx_rcb) {
7806                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7807                                (tp->rx_ret_ring_mask + 1) <<
7808                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7809                 rxrcb += TG3_BDINFO_SIZE;
7810         }
7811
7812         stblk = HOSTCC_STATBLCK_RING1;
7813
7814         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7815                 u64 mapping = (u64)tnapi->status_mapping;
7816                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7817                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7818
7819                 /* Clear status block in ram. */
7820                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7821
7822                 if (tnapi->tx_ring) {
7823                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7824                                        (TG3_TX_RING_SIZE <<
7825                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7826                                        NIC_SRAM_TX_BUFFER_DESC);
7827                         txrcb += TG3_BDINFO_SIZE;
7828                 }
7829
7830                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7831                                ((tp->rx_ret_ring_mask + 1) <<
7832                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7833
7834                 stblk += 8;
7835                 rxrcb += TG3_BDINFO_SIZE;
7836         }
7837 }
7838
7839 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7840 {
7841         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7842
7843         if (!tg3_flag(tp, 5750_PLUS) ||
7844             tg3_flag(tp, 5780_CLASS) ||
7845             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7846             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7847                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7848         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7849                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7850                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7851         else
7852                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7853
7854         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7855         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7856
7857         val = min(nic_rep_thresh, host_rep_thresh);
7858         tw32(RCVBDI_STD_THRESH, val);
7859
7860         if (tg3_flag(tp, 57765_PLUS))
7861                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7862
7863         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7864                 return;
7865
7866         if (!tg3_flag(tp, 5705_PLUS))
7867                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7868         else
7869                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7870
7871         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7872
7873         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7874         tw32(RCVBDI_JUMBO_THRESH, val);
7875
7876         if (tg3_flag(tp, 57765_PLUS))
7877                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7878 }
7879
7880 /* tp->lock is held. */
7881 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7882 {
7883         u32 val, rdmac_mode;
7884         int i, err, limit;
7885         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7886
7887         tg3_disable_ints(tp);
7888
7889         tg3_stop_fw(tp);
7890
7891         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7892
7893         if (tg3_flag(tp, INIT_COMPLETE))
7894                 tg3_abort_hw(tp, 1);
7895
7896         /* Enable MAC control of LPI */
7897         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7898                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7899                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7900                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7901
7902                 tw32_f(TG3_CPMU_EEE_CTRL,
7903                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7904
7905                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7906                       TG3_CPMU_EEEMD_LPI_IN_TX |
7907                       TG3_CPMU_EEEMD_LPI_IN_RX |
7908                       TG3_CPMU_EEEMD_EEE_ENABLE;
7909
7910                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7911                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7912
7913                 if (tg3_flag(tp, ENABLE_APE))
7914                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7915
7916                 tw32_f(TG3_CPMU_EEE_MODE, val);
7917
7918                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7919                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7920                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7921
7922                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7923                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7924                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7925         }
7926
7927         if (reset_phy)
7928                 tg3_phy_reset(tp);
7929
7930         err = tg3_chip_reset(tp);
7931         if (err)
7932                 return err;
7933
7934         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7935
7936         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7937                 val = tr32(TG3_CPMU_CTRL);
7938                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7939                 tw32(TG3_CPMU_CTRL, val);
7940
7941                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7942                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7943                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7944                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7945
7946                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7947                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7948                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7949                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7950
7951                 val = tr32(TG3_CPMU_HST_ACC);
7952                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7953                 val |= CPMU_HST_ACC_MACCLK_6_25;
7954                 tw32(TG3_CPMU_HST_ACC, val);
7955         }
7956
7957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7958                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7959                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7960                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7961                 tw32(PCIE_PWR_MGMT_THRESH, val);
7962
7963                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7964                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7965
7966                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7967
7968                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7969                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7970         }
7971
7972         if (tg3_flag(tp, L1PLLPD_EN)) {
7973                 u32 grc_mode = tr32(GRC_MODE);
7974
7975                 /* Access the lower 1K of PL PCIE block registers. */
7976                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7977                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7978
7979                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7980                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7981                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7982
7983                 tw32(GRC_MODE, grc_mode);
7984         }
7985
7986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7987                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7988                         u32 grc_mode = tr32(GRC_MODE);
7989
7990                         /* Access the lower 1K of PL PCIE block registers. */
7991                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7992                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7993
7994                         val = tr32(TG3_PCIE_TLDLPL_PORT +
7995                                    TG3_PCIE_PL_LO_PHYCTL5);
7996                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7997                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7998
7999                         tw32(GRC_MODE, grc_mode);
8000                 }
8001
8002                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8003                         u32 grc_mode = tr32(GRC_MODE);
8004
8005                         /* Access the lower 1K of DL PCIE block registers. */
8006                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8007                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8008
8009                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8010                                    TG3_PCIE_DL_LO_FTSMAX);
8011                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8012                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8013                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8014
8015                         tw32(GRC_MODE, grc_mode);
8016                 }
8017
8018                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8019                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8020                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8021                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8022         }
8023
8024         /* This works around an issue with Athlon chipsets on
8025          * B3 tigon3 silicon.  This bit has no effect on any
8026          * other revision.  But do not set this on PCI Express
8027          * chips and don't even touch the clocks if the CPMU is present.
8028          */
8029         if (!tg3_flag(tp, CPMU_PRESENT)) {
8030                 if (!tg3_flag(tp, PCI_EXPRESS))
8031                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8032                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8033         }
8034
8035         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8036             tg3_flag(tp, PCIX_MODE)) {
8037                 val = tr32(TG3PCI_PCISTATE);
8038                 val |= PCISTATE_RETRY_SAME_DMA;
8039                 tw32(TG3PCI_PCISTATE, val);
8040         }
8041
8042         if (tg3_flag(tp, ENABLE_APE)) {
8043                 /* Allow reads and writes to the
8044                  * APE register and memory space.
8045                  */
8046                 val = tr32(TG3PCI_PCISTATE);
8047                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8048                        PCISTATE_ALLOW_APE_SHMEM_WR |
8049                        PCISTATE_ALLOW_APE_PSPACE_WR;
8050                 tw32(TG3PCI_PCISTATE, val);
8051         }
8052
8053         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8054                 /* Enable some hw fixes.  */
8055                 val = tr32(TG3PCI_MSI_DATA);
8056                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8057                 tw32(TG3PCI_MSI_DATA, val);
8058         }
8059
8060         /* Descriptor ring init may make accesses to the
8061          * NIC SRAM area to setup the TX descriptors, so we
8062          * can only do this after the hardware has been
8063          * successfully reset.
8064          */
8065         err = tg3_init_rings(tp);
8066         if (err)
8067                 return err;
8068
8069         if (tg3_flag(tp, 57765_PLUS)) {
8070                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8071                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8072                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8073                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8074                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8075                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8076                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8077                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8078         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8079                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8080                 /* This value is determined during the probe time DMA
8081                  * engine test, tg3_test_dma.
8082                  */
8083                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8084         }
8085
8086         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8087                           GRC_MODE_4X_NIC_SEND_RINGS |
8088                           GRC_MODE_NO_TX_PHDR_CSUM |
8089                           GRC_MODE_NO_RX_PHDR_CSUM);
8090         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8091
8092         /* Pseudo-header checksum is done by hardware logic and not
8093          * the offload processers, so make the chip do the pseudo-
8094          * header checksums on receive.  For transmit it is more
8095          * convenient to do the pseudo-header checksum in software
8096          * as Linux does that on transmit for us in all cases.
8097          */
8098         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8099
8100         tw32(GRC_MODE,
8101              tp->grc_mode |
8102              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8103
8104         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8105         val = tr32(GRC_MISC_CFG);
8106         val &= ~0xff;
8107         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8108         tw32(GRC_MISC_CFG, val);
8109
8110         /* Initialize MBUF/DESC pool. */
8111         if (tg3_flag(tp, 5750_PLUS)) {
8112                 /* Do nothing.  */
8113         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8114                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8115                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8116                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8117                 else
8118                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8119                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8120                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8121         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8122                 int fw_len;
8123
8124                 fw_len = tp->fw_len;
8125                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8126                 tw32(BUFMGR_MB_POOL_ADDR,
8127                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8128                 tw32(BUFMGR_MB_POOL_SIZE,
8129                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8130         }
8131
8132         if (tp->dev->mtu <= ETH_DATA_LEN) {
8133                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8134                      tp->bufmgr_config.mbuf_read_dma_low_water);
8135                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8136                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8137                 tw32(BUFMGR_MB_HIGH_WATER,
8138                      tp->bufmgr_config.mbuf_high_water);
8139         } else {
8140                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8141                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8142                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8143                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8144                 tw32(BUFMGR_MB_HIGH_WATER,
8145                      tp->bufmgr_config.mbuf_high_water_jumbo);
8146         }
8147         tw32(BUFMGR_DMA_LOW_WATER,
8148              tp->bufmgr_config.dma_low_water);
8149         tw32(BUFMGR_DMA_HIGH_WATER,
8150              tp->bufmgr_config.dma_high_water);
8151
8152         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8154                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8156             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8157             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8158                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8159         tw32(BUFMGR_MODE, val);
8160         for (i = 0; i < 2000; i++) {
8161                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8162                         break;
8163                 udelay(10);
8164         }
8165         if (i >= 2000) {
8166                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8167                 return -ENODEV;
8168         }
8169
8170         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8171                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8172
8173         tg3_setup_rxbd_thresholds(tp);
8174
8175         /* Initialize TG3_BDINFO's at:
8176          *  RCVDBDI_STD_BD:     standard eth size rx ring
8177          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8178          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8179          *
8180          * like so:
8181          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8182          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8183          *                              ring attribute flags
8184          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8185          *
8186          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8187          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8188          *
8189          * The size of each ring is fixed in the firmware, but the location is
8190          * configurable.
8191          */
8192         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8193              ((u64) tpr->rx_std_mapping >> 32));
8194         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8195              ((u64) tpr->rx_std_mapping & 0xffffffff));
8196         if (!tg3_flag(tp, 5717_PLUS))
8197                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8198                      NIC_SRAM_RX_BUFFER_DESC);
8199
8200         /* Disable the mini ring */
8201         if (!tg3_flag(tp, 5705_PLUS))
8202                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8203                      BDINFO_FLAGS_DISABLED);
8204
8205         /* Program the jumbo buffer descriptor ring control
8206          * blocks on those devices that have them.
8207          */
8208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8209             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8210
8211                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8212                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8213                              ((u64) tpr->rx_jmb_mapping >> 32));
8214                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8215                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8216                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8217                               BDINFO_FLAGS_MAXLEN_SHIFT;
8218                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8219                              val | BDINFO_FLAGS_USE_EXT_RECV);
8220                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8221                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8222                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8223                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8224                 } else {
8225                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8226                              BDINFO_FLAGS_DISABLED);
8227                 }
8228
8229                 if (tg3_flag(tp, 57765_PLUS)) {
8230                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8231                                 val = TG3_RX_STD_MAX_SIZE_5700;
8232                         else
8233                                 val = TG3_RX_STD_MAX_SIZE_5717;
8234                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8235                         val |= (TG3_RX_STD_DMA_SZ << 2);
8236                 } else
8237                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8238         } else
8239                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8240
8241         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8242
8243         tpr->rx_std_prod_idx = tp->rx_pending;
8244         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8245
8246         tpr->rx_jmb_prod_idx =
8247                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8248         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8249
8250         tg3_rings_reset(tp);
8251
8252         /* Initialize MAC address and backoff seed. */
8253         __tg3_set_mac_addr(tp, 0);
8254
8255         /* MTU + ethernet header + FCS + optional VLAN tag */
8256         tw32(MAC_RX_MTU_SIZE,
8257              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8258
8259         /* The slot time is changed by tg3_setup_phy if we
8260          * run at gigabit with half duplex.
8261          */
8262         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8263               (6 << TX_LENGTHS_IPG_SHIFT) |
8264               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8265
8266         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8267                 val |= tr32(MAC_TX_LENGTHS) &
8268                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8269                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8270
8271         tw32(MAC_TX_LENGTHS, val);
8272
8273         /* Receive rules. */
8274         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8275         tw32(RCVLPC_CONFIG, 0x0181);
8276
8277         /* Calculate RDMAC_MODE setting early, we need it to determine
8278          * the RCVLPC_STATE_ENABLE mask.
8279          */
8280         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8281                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8282                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8283                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8284                       RDMAC_MODE_LNGREAD_ENAB);
8285
8286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8287                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8288
8289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8290             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8291             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8292                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8293                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8294                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8295
8296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8297             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8298                 if (tg3_flag(tp, TSO_CAPABLE) &&
8299                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8300                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8301                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8302                            !tg3_flag(tp, IS_5788)) {
8303                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8304                 }
8305         }
8306
8307         if (tg3_flag(tp, PCI_EXPRESS))
8308                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8309
8310         if (tg3_flag(tp, HW_TSO_1) ||
8311             tg3_flag(tp, HW_TSO_2) ||
8312             tg3_flag(tp, HW_TSO_3))
8313                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8314
8315         if (tg3_flag(tp, 57765_PLUS) ||
8316             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8317             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8318                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8319
8320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8321                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8322
8323         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8324             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8325             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8326             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8327             tg3_flag(tp, 57765_PLUS)) {
8328                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8330                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8331                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8332                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8333                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8334                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8335                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8336                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8337                 }
8338                 tw32(TG3_RDMA_RSRVCTRL_REG,
8339                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8340         }
8341
8342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8343             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8344                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8345                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8346                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8347                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8348         }
8349
8350         /* Receive/send statistics. */
8351         if (tg3_flag(tp, 5750_PLUS)) {
8352                 val = tr32(RCVLPC_STATS_ENABLE);
8353                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8354                 tw32(RCVLPC_STATS_ENABLE, val);
8355         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8356                    tg3_flag(tp, TSO_CAPABLE)) {
8357                 val = tr32(RCVLPC_STATS_ENABLE);
8358                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8359                 tw32(RCVLPC_STATS_ENABLE, val);
8360         } else {
8361                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8362         }
8363         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8364         tw32(SNDDATAI_STATSENAB, 0xffffff);
8365         tw32(SNDDATAI_STATSCTRL,
8366              (SNDDATAI_SCTRL_ENABLE |
8367               SNDDATAI_SCTRL_FASTUPD));
8368
8369         /* Setup host coalescing engine. */
8370         tw32(HOSTCC_MODE, 0);
8371         for (i = 0; i < 2000; i++) {
8372                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8373                         break;
8374                 udelay(10);
8375         }
8376
8377         __tg3_set_coalesce(tp, &tp->coal);
8378
8379         if (!tg3_flag(tp, 5705_PLUS)) {
8380                 /* Status/statistics block address.  See tg3_timer,
8381                  * the tg3_periodic_fetch_stats call there, and
8382                  * tg3_get_stats to see how this works for 5705/5750 chips.
8383                  */
8384                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8385                      ((u64) tp->stats_mapping >> 32));
8386                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8387                      ((u64) tp->stats_mapping & 0xffffffff));
8388                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8389
8390                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8391
8392                 /* Clear statistics and status block memory areas */
8393                 for (i = NIC_SRAM_STATS_BLK;
8394                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8395                      i += sizeof(u32)) {
8396                         tg3_write_mem(tp, i, 0);
8397                         udelay(40);
8398                 }
8399         }
8400
8401         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8402
8403         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8404         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8405         if (!tg3_flag(tp, 5705_PLUS))
8406                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8407
8408         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8409                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8410                 /* reset to prevent losing 1st rx packet intermittently */
8411                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8412                 udelay(10);
8413         }
8414
8415         if (tg3_flag(tp, ENABLE_APE))
8416                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8417         else
8418                 tp->mac_mode = 0;
8419         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8420                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8421         if (!tg3_flag(tp, 5705_PLUS) &&
8422             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8423             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8424                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8425         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8426         udelay(40);
8427
8428         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8429          * If TG3_FLAG_IS_NIC is zero, we should read the
8430          * register to preserve the GPIO settings for LOMs. The GPIOs,
8431          * whether used as inputs or outputs, are set by boot code after
8432          * reset.
8433          */
8434         if (!tg3_flag(tp, IS_NIC)) {
8435                 u32 gpio_mask;
8436
8437                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8438                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8439                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8440
8441                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8442                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8443                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8444
8445                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8446                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8447
8448                 tp->grc_local_ctrl &= ~gpio_mask;
8449                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8450
8451                 /* GPIO1 must be driven high for eeprom write protect */
8452                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8453                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8454                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8455         }
8456         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8457         udelay(100);
8458
8459         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8460                 val = tr32(MSGINT_MODE);
8461                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8462                 tw32(MSGINT_MODE, val);
8463         }
8464
8465         if (!tg3_flag(tp, 5705_PLUS)) {
8466                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8467                 udelay(40);
8468         }
8469
8470         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8471                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8472                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8473                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8474                WDMAC_MODE_LNGREAD_ENAB);
8475
8476         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8477             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8478                 if (tg3_flag(tp, TSO_CAPABLE) &&
8479                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8480                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8481                         /* nothing */
8482                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8483                            !tg3_flag(tp, IS_5788)) {
8484                         val |= WDMAC_MODE_RX_ACCEL;
8485                 }
8486         }
8487
8488         /* Enable host coalescing bug fix */
8489         if (tg3_flag(tp, 5755_PLUS))
8490                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8491
8492         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8493                 val |= WDMAC_MODE_BURST_ALL_DATA;
8494
8495         tw32_f(WDMAC_MODE, val);
8496         udelay(40);
8497
8498         if (tg3_flag(tp, PCIX_MODE)) {
8499                 u16 pcix_cmd;
8500
8501                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8502                                      &pcix_cmd);
8503                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8504                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8505                         pcix_cmd |= PCI_X_CMD_READ_2K;
8506                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8507                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8508                         pcix_cmd |= PCI_X_CMD_READ_2K;
8509                 }
8510                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8511                                       pcix_cmd);
8512         }
8513
8514         tw32_f(RDMAC_MODE, rdmac_mode);
8515         udelay(40);
8516
8517         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8518         if (!tg3_flag(tp, 5705_PLUS))
8519                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8520
8521         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8522                 tw32(SNDDATAC_MODE,
8523                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8524         else
8525                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8526
8527         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8528         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8529         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8530         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8531                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8532         tw32(RCVDBDI_MODE, val);
8533         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8534         if (tg3_flag(tp, HW_TSO_1) ||
8535             tg3_flag(tp, HW_TSO_2) ||
8536             tg3_flag(tp, HW_TSO_3))
8537                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8538         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8539         if (tg3_flag(tp, ENABLE_TSS))
8540                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8541         tw32(SNDBDI_MODE, val);
8542         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8543
8544         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8545                 err = tg3_load_5701_a0_firmware_fix(tp);
8546                 if (err)
8547                         return err;
8548         }
8549
8550         if (tg3_flag(tp, TSO_CAPABLE)) {
8551                 err = tg3_load_tso_firmware(tp);
8552                 if (err)
8553                         return err;
8554         }
8555
8556         tp->tx_mode = TX_MODE_ENABLE;
8557
8558         if (tg3_flag(tp, 5755_PLUS) ||
8559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8560                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8561
8562         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8563                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8564                 tp->tx_mode &= ~val;
8565                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8566         }
8567
8568         tw32_f(MAC_TX_MODE, tp->tx_mode);
8569         udelay(100);
8570
8571         if (tg3_flag(tp, ENABLE_RSS)) {
8572                 u32 reg = MAC_RSS_INDIR_TBL_0;
8573                 u8 *ent = (u8 *)&val;
8574
8575                 /* Setup the indirection table */
8576                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8577                         int idx = i % sizeof(val);
8578
8579                         ent[idx] = i % (tp->irq_cnt - 1);
8580                         if (idx == sizeof(val) - 1) {
8581                                 tw32(reg, val);
8582                                 reg += 4;
8583                         }
8584                 }
8585
8586                 /* Setup the "secret" hash key. */
8587                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8588                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8589                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8590                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8591                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8592                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8593                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8594                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8595                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8596                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8597         }
8598
8599         tp->rx_mode = RX_MODE_ENABLE;
8600         if (tg3_flag(tp, 5755_PLUS))
8601                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8602
8603         if (tg3_flag(tp, ENABLE_RSS))
8604                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8605                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8606                                RX_MODE_RSS_IPV6_HASH_EN |
8607                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8608                                RX_MODE_RSS_IPV4_HASH_EN |
8609                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8610
8611         tw32_f(MAC_RX_MODE, tp->rx_mode);
8612         udelay(10);
8613
8614         tw32(MAC_LED_CTRL, tp->led_ctrl);
8615
8616         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8617         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8618                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8619                 udelay(10);
8620         }
8621         tw32_f(MAC_RX_MODE, tp->rx_mode);
8622         udelay(10);
8623
8624         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8625                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8626                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8627                         /* Set drive transmission level to 1.2V  */
8628                         /* only if the signal pre-emphasis bit is not set  */
8629                         val = tr32(MAC_SERDES_CFG);
8630                         val &= 0xfffff000;
8631                         val |= 0x880;
8632                         tw32(MAC_SERDES_CFG, val);
8633                 }
8634                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8635                         tw32(MAC_SERDES_CFG, 0x616000);
8636         }
8637
8638         /* Prevent chip from dropping frames when flow control
8639          * is enabled.
8640          */
8641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8642                 val = 1;
8643         else
8644                 val = 2;
8645         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8646
8647         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8648             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8649                 /* Use hardware link auto-negotiation */
8650                 tg3_flag_set(tp, HW_AUTONEG);
8651         }
8652
8653         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8654             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8655                 u32 tmp;
8656
8657                 tmp = tr32(SERDES_RX_CTRL);
8658                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8659                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8660                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8661                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8662         }
8663
8664         if (!tg3_flag(tp, USE_PHYLIB)) {
8665                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8666                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8667                         tp->link_config.speed = tp->link_config.orig_speed;
8668                         tp->link_config.duplex = tp->link_config.orig_duplex;
8669                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8670                 }
8671
8672                 err = tg3_setup_phy(tp, 0);
8673                 if (err)
8674                         return err;
8675
8676                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8677                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8678                         u32 tmp;
8679
8680                         /* Clear CRC stats. */
8681                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8682                                 tg3_writephy(tp, MII_TG3_TEST1,
8683                                              tmp | MII_TG3_TEST1_CRC_EN);
8684                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8685                         }
8686                 }
8687         }
8688
8689         __tg3_set_rx_mode(tp->dev);
8690
8691         /* Initialize receive rules. */
8692         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8693         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8694         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8695         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8696
8697         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8698                 limit = 8;
8699         else
8700                 limit = 16;
8701         if (tg3_flag(tp, ENABLE_ASF))
8702                 limit -= 4;
8703         switch (limit) {
8704         case 16:
8705                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8706         case 15:
8707                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8708         case 14:
8709                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8710         case 13:
8711                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8712         case 12:
8713                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8714         case 11:
8715                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8716         case 10:
8717                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8718         case 9:
8719                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8720         case 8:
8721                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8722         case 7:
8723                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8724         case 6:
8725                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8726         case 5:
8727                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8728         case 4:
8729                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8730         case 3:
8731                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8732         case 2:
8733         case 1:
8734
8735         default:
8736                 break;
8737         }
8738
8739         if (tg3_flag(tp, ENABLE_APE))
8740                 /* Write our heartbeat update interval to APE. */
8741                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8742                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8743
8744         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8745
8746         return 0;
8747 }
8748
8749 /* Called at device open time to get the chip ready for
8750  * packet processing.  Invoked with tp->lock held.
8751  */
8752 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8753 {
8754         tg3_switch_clocks(tp);
8755
8756         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8757
8758         return tg3_reset_hw(tp, reset_phy);
8759 }
8760
8761 #define TG3_STAT_ADD32(PSTAT, REG) \
8762 do {    u32 __val = tr32(REG); \
8763         (PSTAT)->low += __val; \
8764         if ((PSTAT)->low < __val) \
8765                 (PSTAT)->high += 1; \
8766 } while (0)
8767
8768 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8769 {
8770         struct tg3_hw_stats *sp = tp->hw_stats;
8771
8772         if (!netif_carrier_ok(tp->dev))
8773                 return;
8774
8775         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8776         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8777         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8778         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8779         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8780         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8781         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8782         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8783         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8784         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8785         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8786         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8787         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8788
8789         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8790         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8791         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8792         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8793         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8794         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8795         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8796         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8797         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8798         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8799         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8800         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8801         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8802         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8803
8804         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8805         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8806             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8807             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8808                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8809         } else {
8810                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8811                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8812                 if (val) {
8813                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8814                         sp->rx_discards.low += val;
8815                         if (sp->rx_discards.low < val)
8816                                 sp->rx_discards.high += 1;
8817                 }
8818                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8819         }
8820         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8821 }
8822
8823 static void tg3_chk_missed_msi(struct tg3 *tp)
8824 {
8825         u32 i;
8826
8827         for (i = 0; i < tp->irq_cnt; i++) {
8828                 struct tg3_napi *tnapi = &tp->napi[i];
8829
8830                 if (tg3_has_work(tnapi)) {
8831                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8832                             tnapi->last_tx_cons == tnapi->tx_cons) {
8833                                 if (tnapi->chk_msi_cnt < 1) {
8834                                         tnapi->chk_msi_cnt++;
8835                                         return;
8836                                 }
8837                                 tw32_mailbox(tnapi->int_mbox,
8838                                              tnapi->last_tag << 24);
8839                         }
8840                 }
8841                 tnapi->chk_msi_cnt = 0;
8842                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8843                 tnapi->last_tx_cons = tnapi->tx_cons;
8844         }
8845 }
8846
8847 static void tg3_timer(unsigned long __opaque)
8848 {
8849         struct tg3 *tp = (struct tg3 *) __opaque;
8850
8851         if (tp->irq_sync)
8852                 goto restart_timer;
8853
8854         spin_lock(&tp->lock);
8855
8856         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8857             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8858                 tg3_chk_missed_msi(tp);
8859
8860         if (!tg3_flag(tp, TAGGED_STATUS)) {
8861                 /* All of this garbage is because when using non-tagged
8862                  * IRQ status the mailbox/status_block protocol the chip
8863                  * uses with the cpu is race prone.
8864                  */
8865                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8866                         tw32(GRC_LOCAL_CTRL,
8867                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8868                 } else {
8869                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8870                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8871                 }
8872
8873                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8874                         tg3_flag_set(tp, RESTART_TIMER);
8875                         spin_unlock(&tp->lock);
8876                         schedule_work(&tp->reset_task);
8877                         return;
8878                 }
8879         }
8880
8881         /* This part only runs once per second. */
8882         if (!--tp->timer_counter) {
8883                 if (tg3_flag(tp, 5705_PLUS))
8884                         tg3_periodic_fetch_stats(tp);
8885
8886                 if (tp->setlpicnt && !--tp->setlpicnt)
8887                         tg3_phy_eee_enable(tp);
8888
8889                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8890                         u32 mac_stat;
8891                         int phy_event;
8892
8893                         mac_stat = tr32(MAC_STATUS);
8894
8895                         phy_event = 0;
8896                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8897                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8898                                         phy_event = 1;
8899                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8900                                 phy_event = 1;
8901
8902                         if (phy_event)
8903                                 tg3_setup_phy(tp, 0);
8904                 } else if (tg3_flag(tp, POLL_SERDES)) {
8905                         u32 mac_stat = tr32(MAC_STATUS);
8906                         int need_setup = 0;
8907
8908                         if (netif_carrier_ok(tp->dev) &&
8909                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8910                                 need_setup = 1;
8911                         }
8912                         if (!netif_carrier_ok(tp->dev) &&
8913                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8914                                          MAC_STATUS_SIGNAL_DET))) {
8915                                 need_setup = 1;
8916                         }
8917                         if (need_setup) {
8918                                 if (!tp->serdes_counter) {
8919                                         tw32_f(MAC_MODE,
8920                                              (tp->mac_mode &
8921                                               ~MAC_MODE_PORT_MODE_MASK));
8922                                         udelay(40);
8923                                         tw32_f(MAC_MODE, tp->mac_mode);
8924                                         udelay(40);
8925                                 }
8926                                 tg3_setup_phy(tp, 0);
8927                         }
8928                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8929                            tg3_flag(tp, 5780_CLASS)) {
8930                         tg3_serdes_parallel_detect(tp);
8931                 }
8932
8933                 tp->timer_counter = tp->timer_multiplier;
8934         }
8935
8936         /* Heartbeat is only sent once every 2 seconds.
8937          *
8938          * The heartbeat is to tell the ASF firmware that the host
8939          * driver is still alive.  In the event that the OS crashes,
8940          * ASF needs to reset the hardware to free up the FIFO space
8941          * that may be filled with rx packets destined for the host.
8942          * If the FIFO is full, ASF will no longer function properly.
8943          *
8944          * Unintended resets have been reported on real time kernels
8945          * where the timer doesn't run on time.  Netpoll will also have
8946          * same problem.
8947          *
8948          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8949          * to check the ring condition when the heartbeat is expiring
8950          * before doing the reset.  This will prevent most unintended
8951          * resets.
8952          */
8953         if (!--tp->asf_counter) {
8954                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8955                         tg3_wait_for_event_ack(tp);
8956
8957                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8958                                       FWCMD_NICDRV_ALIVE3);
8959                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8960                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8961                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8962
8963                         tg3_generate_fw_event(tp);
8964                 }
8965                 tp->asf_counter = tp->asf_multiplier;
8966         }
8967
8968         spin_unlock(&tp->lock);
8969
8970 restart_timer:
8971         tp->timer.expires = jiffies + tp->timer_offset;
8972         add_timer(&tp->timer);
8973 }
8974
8975 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8976 {
8977         irq_handler_t fn;
8978         unsigned long flags;
8979         char *name;
8980         struct tg3_napi *tnapi = &tp->napi[irq_num];
8981
8982         if (tp->irq_cnt == 1)
8983                 name = tp->dev->name;
8984         else {
8985                 name = &tnapi->irq_lbl[0];
8986                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8987                 name[IFNAMSIZ-1] = 0;
8988         }
8989
8990         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8991                 fn = tg3_msi;
8992                 if (tg3_flag(tp, 1SHOT_MSI))
8993                         fn = tg3_msi_1shot;
8994                 flags = 0;
8995         } else {
8996                 fn = tg3_interrupt;
8997                 if (tg3_flag(tp, TAGGED_STATUS))
8998                         fn = tg3_interrupt_tagged;
8999                 flags = IRQF_SHARED;
9000         }
9001
9002         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9003 }
9004
9005 static int tg3_test_interrupt(struct tg3 *tp)
9006 {
9007         struct tg3_napi *tnapi = &tp->napi[0];
9008         struct net_device *dev = tp->dev;
9009         int err, i, intr_ok = 0;
9010         u32 val;
9011
9012         if (!netif_running(dev))
9013                 return -ENODEV;
9014
9015         tg3_disable_ints(tp);
9016
9017         free_irq(tnapi->irq_vec, tnapi);
9018
9019         /*
9020          * Turn off MSI one shot mode.  Otherwise this test has no
9021          * observable way to know whether the interrupt was delivered.
9022          */
9023         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9024                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9025                 tw32(MSGINT_MODE, val);
9026         }
9027
9028         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9029                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9030         if (err)
9031                 return err;
9032
9033         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9034         tg3_enable_ints(tp);
9035
9036         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9037                tnapi->coal_now);
9038
9039         for (i = 0; i < 5; i++) {
9040                 u32 int_mbox, misc_host_ctrl;
9041
9042                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9043                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9044
9045                 if ((int_mbox != 0) ||
9046                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9047                         intr_ok = 1;
9048                         break;
9049                 }
9050
9051                 msleep(10);
9052         }
9053
9054         tg3_disable_ints(tp);
9055
9056         free_irq(tnapi->irq_vec, tnapi);
9057
9058         err = tg3_request_irq(tp, 0);
9059
9060         if (err)
9061                 return err;
9062
9063         if (intr_ok) {
9064                 /* Reenable MSI one shot mode. */
9065                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9066                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9067                         tw32(MSGINT_MODE, val);
9068                 }
9069                 return 0;
9070         }
9071
9072         return -EIO;
9073 }
9074
9075 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9076  * successfully restored
9077  */
9078 static int tg3_test_msi(struct tg3 *tp)
9079 {
9080         int err;
9081         u16 pci_cmd;
9082
9083         if (!tg3_flag(tp, USING_MSI))
9084                 return 0;
9085
9086         /* Turn off SERR reporting in case MSI terminates with Master
9087          * Abort.
9088          */
9089         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9090         pci_write_config_word(tp->pdev, PCI_COMMAND,
9091                               pci_cmd & ~PCI_COMMAND_SERR);
9092
9093         err = tg3_test_interrupt(tp);
9094
9095         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9096
9097         if (!err)
9098                 return 0;
9099
9100         /* other failures */
9101         if (err != -EIO)
9102                 return err;
9103
9104         /* MSI test failed, go back to INTx mode */
9105         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9106                     "to INTx mode. Please report this failure to the PCI "
9107                     "maintainer and include system chipset information\n");
9108
9109         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9110
9111         pci_disable_msi(tp->pdev);
9112
9113         tg3_flag_clear(tp, USING_MSI);
9114         tp->napi[0].irq_vec = tp->pdev->irq;
9115
9116         err = tg3_request_irq(tp, 0);
9117         if (err)
9118                 return err;
9119
9120         /* Need to reset the chip because the MSI cycle may have terminated
9121          * with Master Abort.
9122          */
9123         tg3_full_lock(tp, 1);
9124
9125         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9126         err = tg3_init_hw(tp, 1);
9127
9128         tg3_full_unlock(tp);
9129
9130         if (err)
9131                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9132
9133         return err;
9134 }
9135
9136 static int tg3_request_firmware(struct tg3 *tp)
9137 {
9138         const __be32 *fw_data;
9139
9140         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9141                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9142                            tp->fw_needed);
9143                 return -ENOENT;
9144         }
9145
9146         fw_data = (void *)tp->fw->data;
9147
9148         /* Firmware blob starts with version numbers, followed by
9149          * start address and _full_ length including BSS sections
9150          * (which must be longer than the actual data, of course
9151          */
9152
9153         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9154         if (tp->fw_len < (tp->fw->size - 12)) {
9155                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9156                            tp->fw_len, tp->fw_needed);
9157                 release_firmware(tp->fw);
9158                 tp->fw = NULL;
9159                 return -EINVAL;
9160         }
9161
9162         /* We no longer need firmware; we have it. */
9163         tp->fw_needed = NULL;
9164         return 0;
9165 }
9166
9167 static bool tg3_enable_msix(struct tg3 *tp)
9168 {
9169         int i, rc, cpus = num_online_cpus();
9170         struct msix_entry msix_ent[tp->irq_max];
9171
9172         if (cpus == 1)
9173                 /* Just fallback to the simpler MSI mode. */
9174                 return false;
9175
9176         /*
9177          * We want as many rx rings enabled as there are cpus.
9178          * The first MSIX vector only deals with link interrupts, etc,
9179          * so we add one to the number of vectors we are requesting.
9180          */
9181         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9182
9183         for (i = 0; i < tp->irq_max; i++) {
9184                 msix_ent[i].entry  = i;
9185                 msix_ent[i].vector = 0;
9186         }
9187
9188         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9189         if (rc < 0) {
9190                 return false;
9191         } else if (rc != 0) {
9192                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9193                         return false;
9194                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9195                               tp->irq_cnt, rc);
9196                 tp->irq_cnt = rc;
9197         }
9198
9199         for (i = 0; i < tp->irq_max; i++)
9200                 tp->napi[i].irq_vec = msix_ent[i].vector;
9201
9202         netif_set_real_num_tx_queues(tp->dev, 1);
9203         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9204         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9205                 pci_disable_msix(tp->pdev);
9206                 return false;
9207         }
9208
9209         if (tp->irq_cnt > 1) {
9210                 tg3_flag_set(tp, ENABLE_RSS);
9211
9212                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9213                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9214                         tg3_flag_set(tp, ENABLE_TSS);
9215                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9216                 }
9217         }
9218
9219         return true;
9220 }
9221
9222 static void tg3_ints_init(struct tg3 *tp)
9223 {
9224         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9225             !tg3_flag(tp, TAGGED_STATUS)) {
9226                 /* All MSI supporting chips should support tagged
9227                  * status.  Assert that this is the case.
9228                  */
9229                 netdev_warn(tp->dev,
9230                             "MSI without TAGGED_STATUS? Not using MSI\n");
9231                 goto defcfg;
9232         }
9233
9234         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9235                 tg3_flag_set(tp, USING_MSIX);
9236         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9237                 tg3_flag_set(tp, USING_MSI);
9238
9239         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9240                 u32 msi_mode = tr32(MSGINT_MODE);
9241                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9242                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9243                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9244         }
9245 defcfg:
9246         if (!tg3_flag(tp, USING_MSIX)) {
9247                 tp->irq_cnt = 1;
9248                 tp->napi[0].irq_vec = tp->pdev->irq;
9249                 netif_set_real_num_tx_queues(tp->dev, 1);
9250                 netif_set_real_num_rx_queues(tp->dev, 1);
9251         }
9252 }
9253
9254 static void tg3_ints_fini(struct tg3 *tp)
9255 {
9256         if (tg3_flag(tp, USING_MSIX))
9257                 pci_disable_msix(tp->pdev);
9258         else if (tg3_flag(tp, USING_MSI))
9259                 pci_disable_msi(tp->pdev);
9260         tg3_flag_clear(tp, USING_MSI);
9261         tg3_flag_clear(tp, USING_MSIX);
9262         tg3_flag_clear(tp, ENABLE_RSS);
9263         tg3_flag_clear(tp, ENABLE_TSS);
9264 }
9265
9266 static int tg3_open(struct net_device *dev)
9267 {
9268         struct tg3 *tp = netdev_priv(dev);
9269         int i, err;
9270
9271         if (tp->fw_needed) {
9272                 err = tg3_request_firmware(tp);
9273                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9274                         if (err)
9275                                 return err;
9276                 } else if (err) {
9277                         netdev_warn(tp->dev, "TSO capability disabled\n");
9278                         tg3_flag_clear(tp, TSO_CAPABLE);
9279                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9280                         netdev_notice(tp->dev, "TSO capability restored\n");
9281                         tg3_flag_set(tp, TSO_CAPABLE);
9282                 }
9283         }
9284
9285         netif_carrier_off(tp->dev);
9286
9287         err = tg3_power_up(tp);
9288         if (err)
9289                 return err;
9290
9291         tg3_full_lock(tp, 0);
9292
9293         tg3_disable_ints(tp);
9294         tg3_flag_clear(tp, INIT_COMPLETE);
9295
9296         tg3_full_unlock(tp);
9297
9298         /*
9299          * Setup interrupts first so we know how
9300          * many NAPI resources to allocate
9301          */
9302         tg3_ints_init(tp);
9303
9304         /* The placement of this call is tied
9305          * to the setup and use of Host TX descriptors.
9306          */
9307         err = tg3_alloc_consistent(tp);
9308         if (err)
9309                 goto err_out1;
9310
9311         tg3_napi_init(tp);
9312
9313         tg3_napi_enable(tp);
9314
9315         for (i = 0; i < tp->irq_cnt; i++) {
9316                 struct tg3_napi *tnapi = &tp->napi[i];
9317                 err = tg3_request_irq(tp, i);
9318                 if (err) {
9319                         for (i--; i >= 0; i--)
9320                                 free_irq(tnapi->irq_vec, tnapi);
9321                         break;
9322                 }
9323         }
9324
9325         if (err)
9326                 goto err_out2;
9327
9328         tg3_full_lock(tp, 0);
9329
9330         err = tg3_init_hw(tp, 1);
9331         if (err) {
9332                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9333                 tg3_free_rings(tp);
9334         } else {
9335                 if (tg3_flag(tp, TAGGED_STATUS) &&
9336                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9337                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9338                         tp->timer_offset = HZ;
9339                 else
9340                         tp->timer_offset = HZ / 10;
9341
9342                 BUG_ON(tp->timer_offset > HZ);
9343                 tp->timer_counter = tp->timer_multiplier =
9344                         (HZ / tp->timer_offset);
9345                 tp->asf_counter = tp->asf_multiplier =
9346                         ((HZ / tp->timer_offset) * 2);
9347
9348                 init_timer(&tp->timer);
9349                 tp->timer.expires = jiffies + tp->timer_offset;
9350                 tp->timer.data = (unsigned long) tp;
9351                 tp->timer.function = tg3_timer;
9352         }
9353
9354         tg3_full_unlock(tp);
9355
9356         if (err)
9357                 goto err_out3;
9358
9359         if (tg3_flag(tp, USING_MSI)) {
9360                 err = tg3_test_msi(tp);
9361
9362                 if (err) {
9363                         tg3_full_lock(tp, 0);
9364                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9365                         tg3_free_rings(tp);
9366                         tg3_full_unlock(tp);
9367
9368                         goto err_out2;
9369                 }
9370
9371                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9372                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9373
9374                         tw32(PCIE_TRANSACTION_CFG,
9375                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9376                 }
9377         }
9378
9379         tg3_phy_start(tp);
9380
9381         tg3_full_lock(tp, 0);
9382
9383         add_timer(&tp->timer);
9384         tg3_flag_set(tp, INIT_COMPLETE);
9385         tg3_enable_ints(tp);
9386
9387         tg3_full_unlock(tp);
9388
9389         netif_tx_start_all_queues(dev);
9390
9391         /*
9392          * Reset loopback feature if it was turned on while the device was down
9393          * make sure that it's installed properly now.
9394          */
9395         if (dev->features & NETIF_F_LOOPBACK)
9396                 tg3_set_loopback(dev, dev->features);
9397
9398         return 0;
9399
9400 err_out3:
9401         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9402                 struct tg3_napi *tnapi = &tp->napi[i];
9403                 free_irq(tnapi->irq_vec, tnapi);
9404         }
9405
9406 err_out2:
9407         tg3_napi_disable(tp);
9408         tg3_napi_fini(tp);
9409         tg3_free_consistent(tp);
9410
9411 err_out1:
9412         tg3_ints_fini(tp);
9413         return err;
9414 }
9415
9416 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9417                                                  struct rtnl_link_stats64 *);
9418 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9419
9420 static int tg3_close(struct net_device *dev)
9421 {
9422         int i;
9423         struct tg3 *tp = netdev_priv(dev);
9424
9425         tg3_napi_disable(tp);
9426         cancel_work_sync(&tp->reset_task);
9427
9428         netif_tx_stop_all_queues(dev);
9429
9430         del_timer_sync(&tp->timer);
9431
9432         tg3_phy_stop(tp);
9433
9434         tg3_full_lock(tp, 1);
9435
9436         tg3_disable_ints(tp);
9437
9438         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9439         tg3_free_rings(tp);
9440         tg3_flag_clear(tp, INIT_COMPLETE);
9441
9442         tg3_full_unlock(tp);
9443
9444         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9445                 struct tg3_napi *tnapi = &tp->napi[i];
9446                 free_irq(tnapi->irq_vec, tnapi);
9447         }
9448
9449         tg3_ints_fini(tp);
9450
9451         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9452
9453         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9454                sizeof(tp->estats_prev));
9455
9456         tg3_napi_fini(tp);
9457
9458         tg3_free_consistent(tp);
9459
9460         tg3_power_down(tp);
9461
9462         netif_carrier_off(tp->dev);
9463
9464         return 0;
9465 }
9466
9467 static inline u64 get_stat64(tg3_stat64_t *val)
9468 {
9469        return ((u64)val->high << 32) | ((u64)val->low);
9470 }
9471
9472 static u64 calc_crc_errors(struct tg3 *tp)
9473 {
9474         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9475
9476         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9477             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9478              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9479                 u32 val;
9480
9481                 spin_lock_bh(&tp->lock);
9482                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9483                         tg3_writephy(tp, MII_TG3_TEST1,
9484                                      val | MII_TG3_TEST1_CRC_EN);
9485                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9486                 } else
9487                         val = 0;
9488                 spin_unlock_bh(&tp->lock);
9489
9490                 tp->phy_crc_errors += val;
9491
9492                 return tp->phy_crc_errors;
9493         }
9494
9495         return get_stat64(&hw_stats->rx_fcs_errors);
9496 }
9497
9498 #define ESTAT_ADD(member) \
9499         estats->member =        old_estats->member + \
9500                                 get_stat64(&hw_stats->member)
9501
9502 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9503 {
9504         struct tg3_ethtool_stats *estats = &tp->estats;
9505         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9506         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9507
9508         if (!hw_stats)
9509                 return old_estats;
9510
9511         ESTAT_ADD(rx_octets);
9512         ESTAT_ADD(rx_fragments);
9513         ESTAT_ADD(rx_ucast_packets);
9514         ESTAT_ADD(rx_mcast_packets);
9515         ESTAT_ADD(rx_bcast_packets);
9516         ESTAT_ADD(rx_fcs_errors);
9517         ESTAT_ADD(rx_align_errors);
9518         ESTAT_ADD(rx_xon_pause_rcvd);
9519         ESTAT_ADD(rx_xoff_pause_rcvd);
9520         ESTAT_ADD(rx_mac_ctrl_rcvd);
9521         ESTAT_ADD(rx_xoff_entered);
9522         ESTAT_ADD(rx_frame_too_long_errors);
9523         ESTAT_ADD(rx_jabbers);
9524         ESTAT_ADD(rx_undersize_packets);
9525         ESTAT_ADD(rx_in_length_errors);
9526         ESTAT_ADD(rx_out_length_errors);
9527         ESTAT_ADD(rx_64_or_less_octet_packets);
9528         ESTAT_ADD(rx_65_to_127_octet_packets);
9529         ESTAT_ADD(rx_128_to_255_octet_packets);
9530         ESTAT_ADD(rx_256_to_511_octet_packets);
9531         ESTAT_ADD(rx_512_to_1023_octet_packets);
9532         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9533         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9534         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9535         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9536         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9537
9538         ESTAT_ADD(tx_octets);
9539         ESTAT_ADD(tx_collisions);
9540         ESTAT_ADD(tx_xon_sent);
9541         ESTAT_ADD(tx_xoff_sent);
9542         ESTAT_ADD(tx_flow_control);
9543         ESTAT_ADD(tx_mac_errors);
9544         ESTAT_ADD(tx_single_collisions);
9545         ESTAT_ADD(tx_mult_collisions);
9546         ESTAT_ADD(tx_deferred);
9547         ESTAT_ADD(tx_excessive_collisions);
9548         ESTAT_ADD(tx_late_collisions);
9549         ESTAT_ADD(tx_collide_2times);
9550         ESTAT_ADD(tx_collide_3times);
9551         ESTAT_ADD(tx_collide_4times);
9552         ESTAT_ADD(tx_collide_5times);
9553         ESTAT_ADD(tx_collide_6times);
9554         ESTAT_ADD(tx_collide_7times);
9555         ESTAT_ADD(tx_collide_8times);
9556         ESTAT_ADD(tx_collide_9times);
9557         ESTAT_ADD(tx_collide_10times);
9558         ESTAT_ADD(tx_collide_11times);
9559         ESTAT_ADD(tx_collide_12times);
9560         ESTAT_ADD(tx_collide_13times);
9561         ESTAT_ADD(tx_collide_14times);
9562         ESTAT_ADD(tx_collide_15times);
9563         ESTAT_ADD(tx_ucast_packets);
9564         ESTAT_ADD(tx_mcast_packets);
9565         ESTAT_ADD(tx_bcast_packets);
9566         ESTAT_ADD(tx_carrier_sense_errors);
9567         ESTAT_ADD(tx_discards);
9568         ESTAT_ADD(tx_errors);
9569
9570         ESTAT_ADD(dma_writeq_full);
9571         ESTAT_ADD(dma_write_prioq_full);
9572         ESTAT_ADD(rxbds_empty);
9573         ESTAT_ADD(rx_discards);
9574         ESTAT_ADD(rx_errors);
9575         ESTAT_ADD(rx_threshold_hit);
9576
9577         ESTAT_ADD(dma_readq_full);
9578         ESTAT_ADD(dma_read_prioq_full);
9579         ESTAT_ADD(tx_comp_queue_full);
9580
9581         ESTAT_ADD(ring_set_send_prod_index);
9582         ESTAT_ADD(ring_status_update);
9583         ESTAT_ADD(nic_irqs);
9584         ESTAT_ADD(nic_avoided_irqs);
9585         ESTAT_ADD(nic_tx_threshold_hit);
9586
9587         ESTAT_ADD(mbuf_lwm_thresh_hit);
9588
9589         return estats;
9590 }
9591
9592 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9593                                                  struct rtnl_link_stats64 *stats)
9594 {
9595         struct tg3 *tp = netdev_priv(dev);
9596         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9597         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9598
9599         if (!hw_stats)
9600                 return old_stats;
9601
9602         stats->rx_packets = old_stats->rx_packets +
9603                 get_stat64(&hw_stats->rx_ucast_packets) +
9604                 get_stat64(&hw_stats->rx_mcast_packets) +
9605                 get_stat64(&hw_stats->rx_bcast_packets);
9606
9607         stats->tx_packets = old_stats->tx_packets +
9608                 get_stat64(&hw_stats->tx_ucast_packets) +
9609                 get_stat64(&hw_stats->tx_mcast_packets) +
9610                 get_stat64(&hw_stats->tx_bcast_packets);
9611
9612         stats->rx_bytes = old_stats->rx_bytes +
9613                 get_stat64(&hw_stats->rx_octets);
9614         stats->tx_bytes = old_stats->tx_bytes +
9615                 get_stat64(&hw_stats->tx_octets);
9616
9617         stats->rx_errors = old_stats->rx_errors +
9618                 get_stat64(&hw_stats->rx_errors);
9619         stats->tx_errors = old_stats->tx_errors +
9620                 get_stat64(&hw_stats->tx_errors) +
9621                 get_stat64(&hw_stats->tx_mac_errors) +
9622                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9623                 get_stat64(&hw_stats->tx_discards);
9624
9625         stats->multicast = old_stats->multicast +
9626                 get_stat64(&hw_stats->rx_mcast_packets);
9627         stats->collisions = old_stats->collisions +
9628                 get_stat64(&hw_stats->tx_collisions);
9629
9630         stats->rx_length_errors = old_stats->rx_length_errors +
9631                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9632                 get_stat64(&hw_stats->rx_undersize_packets);
9633
9634         stats->rx_over_errors = old_stats->rx_over_errors +
9635                 get_stat64(&hw_stats->rxbds_empty);
9636         stats->rx_frame_errors = old_stats->rx_frame_errors +
9637                 get_stat64(&hw_stats->rx_align_errors);
9638         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9639                 get_stat64(&hw_stats->tx_discards);
9640         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9641                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9642
9643         stats->rx_crc_errors = old_stats->rx_crc_errors +
9644                 calc_crc_errors(tp);
9645
9646         stats->rx_missed_errors = old_stats->rx_missed_errors +
9647                 get_stat64(&hw_stats->rx_discards);
9648
9649         stats->rx_dropped = tp->rx_dropped;
9650
9651         return stats;
9652 }
9653
9654 static inline u32 calc_crc(unsigned char *buf, int len)
9655 {
9656         u32 reg;
9657         u32 tmp;
9658         int j, k;
9659
9660         reg = 0xffffffff;
9661
9662         for (j = 0; j < len; j++) {
9663                 reg ^= buf[j];
9664
9665                 for (k = 0; k < 8; k++) {
9666                         tmp = reg & 0x01;
9667
9668                         reg >>= 1;
9669
9670                         if (tmp)
9671                                 reg ^= 0xedb88320;
9672                 }
9673         }
9674
9675         return ~reg;
9676 }
9677
9678 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9679 {
9680         /* accept or reject all multicast frames */
9681         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9682         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9683         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9684         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9685 }
9686
9687 static void __tg3_set_rx_mode(struct net_device *dev)
9688 {
9689         struct tg3 *tp = netdev_priv(dev);
9690         u32 rx_mode;
9691
9692         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9693                                   RX_MODE_KEEP_VLAN_TAG);
9694
9695 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9696         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9697          * flag clear.
9698          */
9699         if (!tg3_flag(tp, ENABLE_ASF))
9700                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9701 #endif
9702
9703         if (dev->flags & IFF_PROMISC) {
9704                 /* Promiscuous mode. */
9705                 rx_mode |= RX_MODE_PROMISC;
9706         } else if (dev->flags & IFF_ALLMULTI) {
9707                 /* Accept all multicast. */
9708                 tg3_set_multi(tp, 1);
9709         } else if (netdev_mc_empty(dev)) {
9710                 /* Reject all multicast. */
9711                 tg3_set_multi(tp, 0);
9712         } else {
9713                 /* Accept one or more multicast(s). */
9714                 struct netdev_hw_addr *ha;
9715                 u32 mc_filter[4] = { 0, };
9716                 u32 regidx;
9717                 u32 bit;
9718                 u32 crc;
9719
9720                 netdev_for_each_mc_addr(ha, dev) {
9721                         crc = calc_crc(ha->addr, ETH_ALEN);
9722                         bit = ~crc & 0x7f;
9723                         regidx = (bit & 0x60) >> 5;
9724                         bit &= 0x1f;
9725                         mc_filter[regidx] |= (1 << bit);
9726                 }
9727
9728                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9729                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9730                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9731                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9732         }
9733
9734         if (rx_mode != tp->rx_mode) {
9735                 tp->rx_mode = rx_mode;
9736                 tw32_f(MAC_RX_MODE, rx_mode);
9737                 udelay(10);
9738         }
9739 }
9740
9741 static void tg3_set_rx_mode(struct net_device *dev)
9742 {
9743         struct tg3 *tp = netdev_priv(dev);
9744
9745         if (!netif_running(dev))
9746                 return;
9747
9748         tg3_full_lock(tp, 0);
9749         __tg3_set_rx_mode(dev);
9750         tg3_full_unlock(tp);
9751 }
9752
9753 static int tg3_get_regs_len(struct net_device *dev)
9754 {
9755         return TG3_REG_BLK_SIZE;
9756 }
9757
9758 static void tg3_get_regs(struct net_device *dev,
9759                 struct ethtool_regs *regs, void *_p)
9760 {
9761         struct tg3 *tp = netdev_priv(dev);
9762
9763         regs->version = 0;
9764
9765         memset(_p, 0, TG3_REG_BLK_SIZE);
9766
9767         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9768                 return;
9769
9770         tg3_full_lock(tp, 0);
9771
9772         tg3_dump_legacy_regs(tp, (u32 *)_p);
9773
9774         tg3_full_unlock(tp);
9775 }
9776
9777 static int tg3_get_eeprom_len(struct net_device *dev)
9778 {
9779         struct tg3 *tp = netdev_priv(dev);
9780
9781         return tp->nvram_size;
9782 }
9783
9784 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9785 {
9786         struct tg3 *tp = netdev_priv(dev);
9787         int ret;
9788         u8  *pd;
9789         u32 i, offset, len, b_offset, b_count;
9790         __be32 val;
9791
9792         if (tg3_flag(tp, NO_NVRAM))
9793                 return -EINVAL;
9794
9795         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9796                 return -EAGAIN;
9797
9798         offset = eeprom->offset;
9799         len = eeprom->len;
9800         eeprom->len = 0;
9801
9802         eeprom->magic = TG3_EEPROM_MAGIC;
9803
9804         if (offset & 3) {
9805                 /* adjustments to start on required 4 byte boundary */
9806                 b_offset = offset & 3;
9807                 b_count = 4 - b_offset;
9808                 if (b_count > len) {
9809                         /* i.e. offset=1 len=2 */
9810                         b_count = len;
9811                 }
9812                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9813                 if (ret)
9814                         return ret;
9815                 memcpy(data, ((char *)&val) + b_offset, b_count);
9816                 len -= b_count;
9817                 offset += b_count;
9818                 eeprom->len += b_count;
9819         }
9820
9821         /* read bytes up to the last 4 byte boundary */
9822         pd = &data[eeprom->len];
9823         for (i = 0; i < (len - (len & 3)); i += 4) {
9824                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9825                 if (ret) {
9826                         eeprom->len += i;
9827                         return ret;
9828                 }
9829                 memcpy(pd + i, &val, 4);
9830         }
9831         eeprom->len += i;
9832
9833         if (len & 3) {
9834                 /* read last bytes not ending on 4 byte boundary */
9835                 pd = &data[eeprom->len];
9836                 b_count = len & 3;
9837                 b_offset = offset + len - b_count;
9838                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9839                 if (ret)
9840                         return ret;
9841                 memcpy(pd, &val, b_count);
9842                 eeprom->len += b_count;
9843         }
9844         return 0;
9845 }
9846
9847 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9848
9849 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9850 {
9851         struct tg3 *tp = netdev_priv(dev);
9852         int ret;
9853         u32 offset, len, b_offset, odd_len;
9854         u8 *buf;
9855         __be32 start, end;
9856
9857         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9858                 return -EAGAIN;
9859
9860         if (tg3_flag(tp, NO_NVRAM) ||
9861             eeprom->magic != TG3_EEPROM_MAGIC)
9862                 return -EINVAL;
9863
9864         offset = eeprom->offset;
9865         len = eeprom->len;
9866
9867         if ((b_offset = (offset & 3))) {
9868                 /* adjustments to start on required 4 byte boundary */
9869                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9870                 if (ret)
9871                         return ret;
9872                 len += b_offset;
9873                 offset &= ~3;
9874                 if (len < 4)
9875                         len = 4;
9876         }
9877
9878         odd_len = 0;
9879         if (len & 3) {
9880                 /* adjustments to end on required 4 byte boundary */
9881                 odd_len = 1;
9882                 len = (len + 3) & ~3;
9883                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9884                 if (ret)
9885                         return ret;
9886         }
9887
9888         buf = data;
9889         if (b_offset || odd_len) {
9890                 buf = kmalloc(len, GFP_KERNEL);
9891                 if (!buf)
9892                         return -ENOMEM;
9893                 if (b_offset)
9894                         memcpy(buf, &start, 4);
9895                 if (odd_len)
9896                         memcpy(buf+len-4, &end, 4);
9897                 memcpy(buf + b_offset, data, eeprom->len);
9898         }
9899
9900         ret = tg3_nvram_write_block(tp, offset, len, buf);
9901
9902         if (buf != data)
9903                 kfree(buf);
9904
9905         return ret;
9906 }
9907
9908 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9909 {
9910         struct tg3 *tp = netdev_priv(dev);
9911
9912         if (tg3_flag(tp, USE_PHYLIB)) {
9913                 struct phy_device *phydev;
9914                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9915                         return -EAGAIN;
9916                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9917                 return phy_ethtool_gset(phydev, cmd);
9918         }
9919
9920         cmd->supported = (SUPPORTED_Autoneg);
9921
9922         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9923                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9924                                    SUPPORTED_1000baseT_Full);
9925
9926         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9927                 cmd->supported |= (SUPPORTED_100baseT_Half |
9928                                   SUPPORTED_100baseT_Full |
9929                                   SUPPORTED_10baseT_Half |
9930                                   SUPPORTED_10baseT_Full |
9931                                   SUPPORTED_TP);
9932                 cmd->port = PORT_TP;
9933         } else {
9934                 cmd->supported |= SUPPORTED_FIBRE;
9935                 cmd->port = PORT_FIBRE;
9936         }
9937
9938         cmd->advertising = tp->link_config.advertising;
9939         if (netif_running(dev)) {
9940                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9941                 cmd->duplex = tp->link_config.active_duplex;
9942         } else {
9943                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9944                 cmd->duplex = DUPLEX_INVALID;
9945         }
9946         cmd->phy_address = tp->phy_addr;
9947         cmd->transceiver = XCVR_INTERNAL;
9948         cmd->autoneg = tp->link_config.autoneg;
9949         cmd->maxtxpkt = 0;
9950         cmd->maxrxpkt = 0;
9951         return 0;
9952 }
9953
9954 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9955 {
9956         struct tg3 *tp = netdev_priv(dev);
9957         u32 speed = ethtool_cmd_speed(cmd);
9958
9959         if (tg3_flag(tp, USE_PHYLIB)) {
9960                 struct phy_device *phydev;
9961                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9962                         return -EAGAIN;
9963                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9964                 return phy_ethtool_sset(phydev, cmd);
9965         }
9966
9967         if (cmd->autoneg != AUTONEG_ENABLE &&
9968             cmd->autoneg != AUTONEG_DISABLE)
9969                 return -EINVAL;
9970
9971         if (cmd->autoneg == AUTONEG_DISABLE &&
9972             cmd->duplex != DUPLEX_FULL &&
9973             cmd->duplex != DUPLEX_HALF)
9974                 return -EINVAL;
9975
9976         if (cmd->autoneg == AUTONEG_ENABLE) {
9977                 u32 mask = ADVERTISED_Autoneg |
9978                            ADVERTISED_Pause |
9979                            ADVERTISED_Asym_Pause;
9980
9981                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9982                         mask |= ADVERTISED_1000baseT_Half |
9983                                 ADVERTISED_1000baseT_Full;
9984
9985                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9986                         mask |= ADVERTISED_100baseT_Half |
9987                                 ADVERTISED_100baseT_Full |
9988                                 ADVERTISED_10baseT_Half |
9989                                 ADVERTISED_10baseT_Full |
9990                                 ADVERTISED_TP;
9991                 else
9992                         mask |= ADVERTISED_FIBRE;
9993
9994                 if (cmd->advertising & ~mask)
9995                         return -EINVAL;
9996
9997                 mask &= (ADVERTISED_1000baseT_Half |
9998                          ADVERTISED_1000baseT_Full |
9999                          ADVERTISED_100baseT_Half |
10000                          ADVERTISED_100baseT_Full |
10001                          ADVERTISED_10baseT_Half |
10002                          ADVERTISED_10baseT_Full);
10003
10004                 cmd->advertising &= mask;
10005         } else {
10006                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10007                         if (speed != SPEED_1000)
10008                                 return -EINVAL;
10009
10010                         if (cmd->duplex != DUPLEX_FULL)
10011                                 return -EINVAL;
10012                 } else {
10013                         if (speed != SPEED_100 &&
10014                             speed != SPEED_10)
10015                                 return -EINVAL;
10016                 }
10017         }
10018
10019         tg3_full_lock(tp, 0);
10020
10021         tp->link_config.autoneg = cmd->autoneg;
10022         if (cmd->autoneg == AUTONEG_ENABLE) {
10023                 tp->link_config.advertising = (cmd->advertising |
10024                                               ADVERTISED_Autoneg);
10025                 tp->link_config.speed = SPEED_INVALID;
10026                 tp->link_config.duplex = DUPLEX_INVALID;
10027         } else {
10028                 tp->link_config.advertising = 0;
10029                 tp->link_config.speed = speed;
10030                 tp->link_config.duplex = cmd->duplex;
10031         }
10032
10033         tp->link_config.orig_speed = tp->link_config.speed;
10034         tp->link_config.orig_duplex = tp->link_config.duplex;
10035         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10036
10037         if (netif_running(dev))
10038                 tg3_setup_phy(tp, 1);
10039
10040         tg3_full_unlock(tp);
10041
10042         return 0;
10043 }
10044
10045 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10046 {
10047         struct tg3 *tp = netdev_priv(dev);
10048
10049         strcpy(info->driver, DRV_MODULE_NAME);
10050         strcpy(info->version, DRV_MODULE_VERSION);
10051         strcpy(info->fw_version, tp->fw_ver);
10052         strcpy(info->bus_info, pci_name(tp->pdev));
10053 }
10054
10055 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10056 {
10057         struct tg3 *tp = netdev_priv(dev);
10058
10059         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10060                 wol->supported = WAKE_MAGIC;
10061         else
10062                 wol->supported = 0;
10063         wol->wolopts = 0;
10064         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10065                 wol->wolopts = WAKE_MAGIC;
10066         memset(&wol->sopass, 0, sizeof(wol->sopass));
10067 }
10068
10069 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10070 {
10071         struct tg3 *tp = netdev_priv(dev);
10072         struct device *dp = &tp->pdev->dev;
10073
10074         if (wol->wolopts & ~WAKE_MAGIC)
10075                 return -EINVAL;
10076         if ((wol->wolopts & WAKE_MAGIC) &&
10077             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10078                 return -EINVAL;
10079
10080         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10081
10082         spin_lock_bh(&tp->lock);
10083         if (device_may_wakeup(dp))
10084                 tg3_flag_set(tp, WOL_ENABLE);
10085         else
10086                 tg3_flag_clear(tp, WOL_ENABLE);
10087         spin_unlock_bh(&tp->lock);
10088
10089         return 0;
10090 }
10091
10092 static u32 tg3_get_msglevel(struct net_device *dev)
10093 {
10094         struct tg3 *tp = netdev_priv(dev);
10095         return tp->msg_enable;
10096 }
10097
10098 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10099 {
10100         struct tg3 *tp = netdev_priv(dev);
10101         tp->msg_enable = value;
10102 }
10103
10104 static int tg3_nway_reset(struct net_device *dev)
10105 {
10106         struct tg3 *tp = netdev_priv(dev);
10107         int r;
10108
10109         if (!netif_running(dev))
10110                 return -EAGAIN;
10111
10112         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10113                 return -EINVAL;
10114
10115         if (tg3_flag(tp, USE_PHYLIB)) {
10116                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10117                         return -EAGAIN;
10118                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10119         } else {
10120                 u32 bmcr;
10121
10122                 spin_lock_bh(&tp->lock);
10123                 r = -EINVAL;
10124                 tg3_readphy(tp, MII_BMCR, &bmcr);
10125                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10126                     ((bmcr & BMCR_ANENABLE) ||
10127                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10128                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10129                                                    BMCR_ANENABLE);
10130                         r = 0;
10131                 }
10132                 spin_unlock_bh(&tp->lock);
10133         }
10134
10135         return r;
10136 }
10137
10138 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10139 {
10140         struct tg3 *tp = netdev_priv(dev);
10141
10142         ering->rx_max_pending = tp->rx_std_ring_mask;
10143         ering->rx_mini_max_pending = 0;
10144         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10145                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10146         else
10147                 ering->rx_jumbo_max_pending = 0;
10148
10149         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10150
10151         ering->rx_pending = tp->rx_pending;
10152         ering->rx_mini_pending = 0;
10153         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10154                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10155         else
10156                 ering->rx_jumbo_pending = 0;
10157
10158         ering->tx_pending = tp->napi[0].tx_pending;
10159 }
10160
10161 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10162 {
10163         struct tg3 *tp = netdev_priv(dev);
10164         int i, irq_sync = 0, err = 0;
10165
10166         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10167             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10168             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10169             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10170             (tg3_flag(tp, TSO_BUG) &&
10171              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10172                 return -EINVAL;
10173
10174         if (netif_running(dev)) {
10175                 tg3_phy_stop(tp);
10176                 tg3_netif_stop(tp);
10177                 irq_sync = 1;
10178         }
10179
10180         tg3_full_lock(tp, irq_sync);
10181
10182         tp->rx_pending = ering->rx_pending;
10183
10184         if (tg3_flag(tp, MAX_RXPEND_64) &&
10185             tp->rx_pending > 63)
10186                 tp->rx_pending = 63;
10187         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10188
10189         for (i = 0; i < tp->irq_max; i++)
10190                 tp->napi[i].tx_pending = ering->tx_pending;
10191
10192         if (netif_running(dev)) {
10193                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10194                 err = tg3_restart_hw(tp, 1);
10195                 if (!err)
10196                         tg3_netif_start(tp);
10197         }
10198
10199         tg3_full_unlock(tp);
10200
10201         if (irq_sync && !err)
10202                 tg3_phy_start(tp);
10203
10204         return err;
10205 }
10206
10207 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10208 {
10209         struct tg3 *tp = netdev_priv(dev);
10210
10211         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10212
10213         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10214                 epause->rx_pause = 1;
10215         else
10216                 epause->rx_pause = 0;
10217
10218         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10219                 epause->tx_pause = 1;
10220         else
10221                 epause->tx_pause = 0;
10222 }
10223
10224 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10225 {
10226         struct tg3 *tp = netdev_priv(dev);
10227         int err = 0;
10228
10229         if (tg3_flag(tp, USE_PHYLIB)) {
10230                 u32 newadv;
10231                 struct phy_device *phydev;
10232
10233                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10234
10235                 if (!(phydev->supported & SUPPORTED_Pause) ||
10236                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10237                      (epause->rx_pause != epause->tx_pause)))
10238                         return -EINVAL;
10239
10240                 tp->link_config.flowctrl = 0;
10241                 if (epause->rx_pause) {
10242                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10243
10244                         if (epause->tx_pause) {
10245                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10246                                 newadv = ADVERTISED_Pause;
10247                         } else
10248                                 newadv = ADVERTISED_Pause |
10249                                          ADVERTISED_Asym_Pause;
10250                 } else if (epause->tx_pause) {
10251                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10252                         newadv = ADVERTISED_Asym_Pause;
10253                 } else
10254                         newadv = 0;
10255
10256                 if (epause->autoneg)
10257                         tg3_flag_set(tp, PAUSE_AUTONEG);
10258                 else
10259                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10260
10261                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10262                         u32 oldadv = phydev->advertising &
10263                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10264                         if (oldadv != newadv) {
10265                                 phydev->advertising &=
10266                                         ~(ADVERTISED_Pause |
10267                                           ADVERTISED_Asym_Pause);
10268                                 phydev->advertising |= newadv;
10269                                 if (phydev->autoneg) {
10270                                         /*
10271                                          * Always renegotiate the link to
10272                                          * inform our link partner of our
10273                                          * flow control settings, even if the
10274                                          * flow control is forced.  Let
10275                                          * tg3_adjust_link() do the final
10276                                          * flow control setup.
10277                                          */
10278                                         return phy_start_aneg(phydev);
10279                                 }
10280                         }
10281
10282                         if (!epause->autoneg)
10283                                 tg3_setup_flow_control(tp, 0, 0);
10284                 } else {
10285                         tp->link_config.orig_advertising &=
10286                                         ~(ADVERTISED_Pause |
10287                                           ADVERTISED_Asym_Pause);
10288                         tp->link_config.orig_advertising |= newadv;
10289                 }
10290         } else {
10291                 int irq_sync = 0;
10292
10293                 if (netif_running(dev)) {
10294                         tg3_netif_stop(tp);
10295                         irq_sync = 1;
10296                 }
10297
10298                 tg3_full_lock(tp, irq_sync);
10299
10300                 if (epause->autoneg)
10301                         tg3_flag_set(tp, PAUSE_AUTONEG);
10302                 else
10303                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10304                 if (epause->rx_pause)
10305                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10306                 else
10307                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10308                 if (epause->tx_pause)
10309                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10310                 else
10311                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10312
10313                 if (netif_running(dev)) {
10314                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10315                         err = tg3_restart_hw(tp, 1);
10316                         if (!err)
10317                                 tg3_netif_start(tp);
10318                 }
10319
10320                 tg3_full_unlock(tp);
10321         }
10322
10323         return err;
10324 }
10325
10326 static int tg3_get_sset_count(struct net_device *dev, int sset)
10327 {
10328         switch (sset) {
10329         case ETH_SS_TEST:
10330                 return TG3_NUM_TEST;
10331         case ETH_SS_STATS:
10332                 return TG3_NUM_STATS;
10333         default:
10334                 return -EOPNOTSUPP;
10335         }
10336 }
10337
10338 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10339 {
10340         switch (stringset) {
10341         case ETH_SS_STATS:
10342                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10343                 break;
10344         case ETH_SS_TEST:
10345                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10346                 break;
10347         default:
10348                 WARN_ON(1);     /* we need a WARN() */
10349                 break;
10350         }
10351 }
10352
10353 static int tg3_set_phys_id(struct net_device *dev,
10354                             enum ethtool_phys_id_state state)
10355 {
10356         struct tg3 *tp = netdev_priv(dev);
10357
10358         if (!netif_running(tp->dev))
10359                 return -EAGAIN;
10360
10361         switch (state) {
10362         case ETHTOOL_ID_ACTIVE:
10363                 return 1;       /* cycle on/off once per second */
10364
10365         case ETHTOOL_ID_ON:
10366                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10367                      LED_CTRL_1000MBPS_ON |
10368                      LED_CTRL_100MBPS_ON |
10369                      LED_CTRL_10MBPS_ON |
10370                      LED_CTRL_TRAFFIC_OVERRIDE |
10371                      LED_CTRL_TRAFFIC_BLINK |
10372                      LED_CTRL_TRAFFIC_LED);
10373                 break;
10374
10375         case ETHTOOL_ID_OFF:
10376                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10377                      LED_CTRL_TRAFFIC_OVERRIDE);
10378                 break;
10379
10380         case ETHTOOL_ID_INACTIVE:
10381                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10382                 break;
10383         }
10384
10385         return 0;
10386 }
10387
10388 static void tg3_get_ethtool_stats(struct net_device *dev,
10389                                    struct ethtool_stats *estats, u64 *tmp_stats)
10390 {
10391         struct tg3 *tp = netdev_priv(dev);
10392         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10393 }
10394
10395 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10396 {
10397         int i;
10398         __be32 *buf;
10399         u32 offset = 0, len = 0;
10400         u32 magic, val;
10401
10402         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10403                 return NULL;
10404
10405         if (magic == TG3_EEPROM_MAGIC) {
10406                 for (offset = TG3_NVM_DIR_START;
10407                      offset < TG3_NVM_DIR_END;
10408                      offset += TG3_NVM_DIRENT_SIZE) {
10409                         if (tg3_nvram_read(tp, offset, &val))
10410                                 return NULL;
10411
10412                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10413                             TG3_NVM_DIRTYPE_EXTVPD)
10414                                 break;
10415                 }
10416
10417                 if (offset != TG3_NVM_DIR_END) {
10418                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10419                         if (tg3_nvram_read(tp, offset + 4, &offset))
10420                                 return NULL;
10421
10422                         offset = tg3_nvram_logical_addr(tp, offset);
10423                 }
10424         }
10425
10426         if (!offset || !len) {
10427                 offset = TG3_NVM_VPD_OFF;
10428                 len = TG3_NVM_VPD_LEN;
10429         }
10430
10431         buf = kmalloc(len, GFP_KERNEL);
10432         if (buf == NULL)
10433                 return NULL;
10434
10435         if (magic == TG3_EEPROM_MAGIC) {
10436                 for (i = 0; i < len; i += 4) {
10437                         /* The data is in little-endian format in NVRAM.
10438                          * Use the big-endian read routines to preserve
10439                          * the byte order as it exists in NVRAM.
10440                          */
10441                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10442                                 goto error;
10443                 }
10444         } else {
10445                 u8 *ptr;
10446                 ssize_t cnt;
10447                 unsigned int pos = 0;
10448
10449                 ptr = (u8 *)&buf[0];
10450                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10451                         cnt = pci_read_vpd(tp->pdev, pos,
10452                                            len - pos, ptr);
10453                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10454                                 cnt = 0;
10455                         else if (cnt < 0)
10456                                 goto error;
10457                 }
10458                 if (pos != len)
10459                         goto error;
10460         }
10461
10462         return buf;
10463
10464 error:
10465         kfree(buf);
10466         return NULL;
10467 }
10468
10469 #define NVRAM_TEST_SIZE 0x100
10470 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10471 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10472 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10473 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10474 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10475
10476 static int tg3_test_nvram(struct tg3 *tp)
10477 {
10478         u32 csum, magic;
10479         __be32 *buf;
10480         int i, j, k, err = 0, size;
10481
10482         if (tg3_flag(tp, NO_NVRAM))
10483                 return 0;
10484
10485         if (tg3_nvram_read(tp, 0, &magic) != 0)
10486                 return -EIO;
10487
10488         if (magic == TG3_EEPROM_MAGIC)
10489                 size = NVRAM_TEST_SIZE;
10490         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10491                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10492                     TG3_EEPROM_SB_FORMAT_1) {
10493                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10494                         case TG3_EEPROM_SB_REVISION_0:
10495                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10496                                 break;
10497                         case TG3_EEPROM_SB_REVISION_2:
10498                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10499                                 break;
10500                         case TG3_EEPROM_SB_REVISION_3:
10501                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10502                                 break;
10503                         default:
10504                                 return 0;
10505                         }
10506                 } else
10507                         return 0;
10508         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10509                 size = NVRAM_SELFBOOT_HW_SIZE;
10510         else
10511                 return -EIO;
10512
10513         buf = kmalloc(size, GFP_KERNEL);
10514         if (buf == NULL)
10515                 return -ENOMEM;
10516
10517         err = -EIO;
10518         for (i = 0, j = 0; i < size; i += 4, j++) {
10519                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10520                 if (err)
10521                         break;
10522         }
10523         if (i < size)
10524                 goto out;
10525
10526         /* Selfboot format */
10527         magic = be32_to_cpu(buf[0]);
10528         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10529             TG3_EEPROM_MAGIC_FW) {
10530                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10531
10532                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10533                     TG3_EEPROM_SB_REVISION_2) {
10534                         /* For rev 2, the csum doesn't include the MBA. */
10535                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10536                                 csum8 += buf8[i];
10537                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10538                                 csum8 += buf8[i];
10539                 } else {
10540                         for (i = 0; i < size; i++)
10541                                 csum8 += buf8[i];
10542                 }
10543
10544                 if (csum8 == 0) {
10545                         err = 0;
10546                         goto out;
10547                 }
10548
10549                 err = -EIO;
10550                 goto out;
10551         }
10552
10553         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10554             TG3_EEPROM_MAGIC_HW) {
10555                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10556                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10557                 u8 *buf8 = (u8 *) buf;
10558
10559                 /* Separate the parity bits and the data bytes.  */
10560                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10561                         if ((i == 0) || (i == 8)) {
10562                                 int l;
10563                                 u8 msk;
10564
10565                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10566                                         parity[k++] = buf8[i] & msk;
10567                                 i++;
10568                         } else if (i == 16) {
10569                                 int l;
10570                                 u8 msk;
10571
10572                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10573                                         parity[k++] = buf8[i] & msk;
10574                                 i++;
10575
10576                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10577                                         parity[k++] = buf8[i] & msk;
10578                                 i++;
10579                         }
10580                         data[j++] = buf8[i];
10581                 }
10582
10583                 err = -EIO;
10584                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10585                         u8 hw8 = hweight8(data[i]);
10586
10587                         if ((hw8 & 0x1) && parity[i])
10588                                 goto out;
10589                         else if (!(hw8 & 0x1) && !parity[i])
10590                                 goto out;
10591                 }
10592                 err = 0;
10593                 goto out;
10594         }
10595
10596         err = -EIO;
10597
10598         /* Bootstrap checksum at offset 0x10 */
10599         csum = calc_crc((unsigned char *) buf, 0x10);
10600         if (csum != le32_to_cpu(buf[0x10/4]))
10601                 goto out;
10602
10603         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10604         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10605         if (csum != le32_to_cpu(buf[0xfc/4]))
10606                 goto out;
10607
10608         kfree(buf);
10609
10610         buf = tg3_vpd_readblock(tp);
10611         if (!buf)
10612                 return -ENOMEM;
10613
10614         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10615                              PCI_VPD_LRDT_RO_DATA);
10616         if (i > 0) {
10617                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10618                 if (j < 0)
10619                         goto out;
10620
10621                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10622                         goto out;
10623
10624                 i += PCI_VPD_LRDT_TAG_SIZE;
10625                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10626                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10627                 if (j > 0) {
10628                         u8 csum8 = 0;
10629
10630                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10631
10632                         for (i = 0; i <= j; i++)
10633                                 csum8 += ((u8 *)buf)[i];
10634
10635                         if (csum8)
10636                                 goto out;
10637                 }
10638         }
10639
10640         err = 0;
10641
10642 out:
10643         kfree(buf);
10644         return err;
10645 }
10646
10647 #define TG3_SERDES_TIMEOUT_SEC  2
10648 #define TG3_COPPER_TIMEOUT_SEC  6
10649
10650 static int tg3_test_link(struct tg3 *tp)
10651 {
10652         int i, max;
10653
10654         if (!netif_running(tp->dev))
10655                 return -ENODEV;
10656
10657         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10658                 max = TG3_SERDES_TIMEOUT_SEC;
10659         else
10660                 max = TG3_COPPER_TIMEOUT_SEC;
10661
10662         for (i = 0; i < max; i++) {
10663                 if (netif_carrier_ok(tp->dev))
10664                         return 0;
10665
10666                 if (msleep_interruptible(1000))
10667                         break;
10668         }
10669
10670         return -EIO;
10671 }
10672
10673 /* Only test the commonly used registers */
10674 static int tg3_test_registers(struct tg3 *tp)
10675 {
10676         int i, is_5705, is_5750;
10677         u32 offset, read_mask, write_mask, val, save_val, read_val;
10678         static struct {
10679                 u16 offset;
10680                 u16 flags;
10681 #define TG3_FL_5705     0x1
10682 #define TG3_FL_NOT_5705 0x2
10683 #define TG3_FL_NOT_5788 0x4
10684 #define TG3_FL_NOT_5750 0x8
10685                 u32 read_mask;
10686                 u32 write_mask;
10687         } reg_tbl[] = {
10688                 /* MAC Control Registers */
10689                 { MAC_MODE, TG3_FL_NOT_5705,
10690                         0x00000000, 0x00ef6f8c },
10691                 { MAC_MODE, TG3_FL_5705,
10692                         0x00000000, 0x01ef6b8c },
10693                 { MAC_STATUS, TG3_FL_NOT_5705,
10694                         0x03800107, 0x00000000 },
10695                 { MAC_STATUS, TG3_FL_5705,
10696                         0x03800100, 0x00000000 },
10697                 { MAC_ADDR_0_HIGH, 0x0000,
10698                         0x00000000, 0x0000ffff },
10699                 { MAC_ADDR_0_LOW, 0x0000,
10700                         0x00000000, 0xffffffff },
10701                 { MAC_RX_MTU_SIZE, 0x0000,
10702                         0x00000000, 0x0000ffff },
10703                 { MAC_TX_MODE, 0x0000,
10704                         0x00000000, 0x00000070 },
10705                 { MAC_TX_LENGTHS, 0x0000,
10706                         0x00000000, 0x00003fff },
10707                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10708                         0x00000000, 0x000007fc },
10709                 { MAC_RX_MODE, TG3_FL_5705,
10710                         0x00000000, 0x000007dc },
10711                 { MAC_HASH_REG_0, 0x0000,
10712                         0x00000000, 0xffffffff },
10713                 { MAC_HASH_REG_1, 0x0000,
10714                         0x00000000, 0xffffffff },
10715                 { MAC_HASH_REG_2, 0x0000,
10716                         0x00000000, 0xffffffff },
10717                 { MAC_HASH_REG_3, 0x0000,
10718                         0x00000000, 0xffffffff },
10719
10720                 /* Receive Data and Receive BD Initiator Control Registers. */
10721                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10722                         0x00000000, 0xffffffff },
10723                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10724                         0x00000000, 0xffffffff },
10725                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10726                         0x00000000, 0x00000003 },
10727                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10728                         0x00000000, 0xffffffff },
10729                 { RCVDBDI_STD_BD+0, 0x0000,
10730                         0x00000000, 0xffffffff },
10731                 { RCVDBDI_STD_BD+4, 0x0000,
10732                         0x00000000, 0xffffffff },
10733                 { RCVDBDI_STD_BD+8, 0x0000,
10734                         0x00000000, 0xffff0002 },
10735                 { RCVDBDI_STD_BD+0xc, 0x0000,
10736                         0x00000000, 0xffffffff },
10737
10738                 /* Receive BD Initiator Control Registers. */
10739                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10740                         0x00000000, 0xffffffff },
10741                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10742                         0x00000000, 0x000003ff },
10743                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10744                         0x00000000, 0xffffffff },
10745
10746                 /* Host Coalescing Control Registers. */
10747                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10748                         0x00000000, 0x00000004 },
10749                 { HOSTCC_MODE, TG3_FL_5705,
10750                         0x00000000, 0x000000f6 },
10751                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10752                         0x00000000, 0xffffffff },
10753                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10754                         0x00000000, 0x000003ff },
10755                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10756                         0x00000000, 0xffffffff },
10757                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10758                         0x00000000, 0x000003ff },
10759                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10760                         0x00000000, 0xffffffff },
10761                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10762                         0x00000000, 0x000000ff },
10763                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10764                         0x00000000, 0xffffffff },
10765                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10766                         0x00000000, 0x000000ff },
10767                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10768                         0x00000000, 0xffffffff },
10769                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10770                         0x00000000, 0xffffffff },
10771                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10772                         0x00000000, 0xffffffff },
10773                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10774                         0x00000000, 0x000000ff },
10775                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10776                         0x00000000, 0xffffffff },
10777                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10778                         0x00000000, 0x000000ff },
10779                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10780                         0x00000000, 0xffffffff },
10781                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10782                         0x00000000, 0xffffffff },
10783                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10784                         0x00000000, 0xffffffff },
10785                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10786                         0x00000000, 0xffffffff },
10787                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10788                         0x00000000, 0xffffffff },
10789                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10790                         0xffffffff, 0x00000000 },
10791                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10792                         0xffffffff, 0x00000000 },
10793
10794                 /* Buffer Manager Control Registers. */
10795                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10796                         0x00000000, 0x007fff80 },
10797                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10798                         0x00000000, 0x007fffff },
10799                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10800                         0x00000000, 0x0000003f },
10801                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10802                         0x00000000, 0x000001ff },
10803                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10804                         0x00000000, 0x000001ff },
10805                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10806                         0xffffffff, 0x00000000 },
10807                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10808                         0xffffffff, 0x00000000 },
10809
10810                 /* Mailbox Registers */
10811                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10812                         0x00000000, 0x000001ff },
10813                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10814                         0x00000000, 0x000001ff },
10815                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10816                         0x00000000, 0x000007ff },
10817                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10818                         0x00000000, 0x000001ff },
10819
10820                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10821         };
10822
10823         is_5705 = is_5750 = 0;
10824         if (tg3_flag(tp, 5705_PLUS)) {
10825                 is_5705 = 1;
10826                 if (tg3_flag(tp, 5750_PLUS))
10827                         is_5750 = 1;
10828         }
10829
10830         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10831                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10832                         continue;
10833
10834                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10835                         continue;
10836
10837                 if (tg3_flag(tp, IS_5788) &&
10838                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10839                         continue;
10840
10841                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10842                         continue;
10843
10844                 offset = (u32) reg_tbl[i].offset;
10845                 read_mask = reg_tbl[i].read_mask;
10846                 write_mask = reg_tbl[i].write_mask;
10847
10848                 /* Save the original register content */
10849                 save_val = tr32(offset);
10850
10851                 /* Determine the read-only value. */
10852                 read_val = save_val & read_mask;
10853
10854                 /* Write zero to the register, then make sure the read-only bits
10855                  * are not changed and the read/write bits are all zeros.
10856                  */
10857                 tw32(offset, 0);
10858
10859                 val = tr32(offset);
10860
10861                 /* Test the read-only and read/write bits. */
10862                 if (((val & read_mask) != read_val) || (val & write_mask))
10863                         goto out;
10864
10865                 /* Write ones to all the bits defined by RdMask and WrMask, then
10866                  * make sure the read-only bits are not changed and the
10867                  * read/write bits are all ones.
10868                  */
10869                 tw32(offset, read_mask | write_mask);
10870
10871                 val = tr32(offset);
10872
10873                 /* Test the read-only bits. */
10874                 if ((val & read_mask) != read_val)
10875                         goto out;
10876
10877                 /* Test the read/write bits. */
10878                 if ((val & write_mask) != write_mask)
10879                         goto out;
10880
10881                 tw32(offset, save_val);
10882         }
10883
10884         return 0;
10885
10886 out:
10887         if (netif_msg_hw(tp))
10888                 netdev_err(tp->dev,
10889                            "Register test failed at offset %x\n", offset);
10890         tw32(offset, save_val);
10891         return -EIO;
10892 }
10893
10894 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10895 {
10896         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10897         int i;
10898         u32 j;
10899
10900         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10901                 for (j = 0; j < len; j += 4) {
10902                         u32 val;
10903
10904                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10905                         tg3_read_mem(tp, offset + j, &val);
10906                         if (val != test_pattern[i])
10907                                 return -EIO;
10908                 }
10909         }
10910         return 0;
10911 }
10912
10913 static int tg3_test_memory(struct tg3 *tp)
10914 {
10915         static struct mem_entry {
10916                 u32 offset;
10917                 u32 len;
10918         } mem_tbl_570x[] = {
10919                 { 0x00000000, 0x00b50},
10920                 { 0x00002000, 0x1c000},
10921                 { 0xffffffff, 0x00000}
10922         }, mem_tbl_5705[] = {
10923                 { 0x00000100, 0x0000c},
10924                 { 0x00000200, 0x00008},
10925                 { 0x00004000, 0x00800},
10926                 { 0x00006000, 0x01000},
10927                 { 0x00008000, 0x02000},
10928                 { 0x00010000, 0x0e000},
10929                 { 0xffffffff, 0x00000}
10930         }, mem_tbl_5755[] = {
10931                 { 0x00000200, 0x00008},
10932                 { 0x00004000, 0x00800},
10933                 { 0x00006000, 0x00800},
10934                 { 0x00008000, 0x02000},
10935                 { 0x00010000, 0x0c000},
10936                 { 0xffffffff, 0x00000}
10937         }, mem_tbl_5906[] = {
10938                 { 0x00000200, 0x00008},
10939                 { 0x00004000, 0x00400},
10940                 { 0x00006000, 0x00400},
10941                 { 0x00008000, 0x01000},
10942                 { 0x00010000, 0x01000},
10943                 { 0xffffffff, 0x00000}
10944         }, mem_tbl_5717[] = {
10945                 { 0x00000200, 0x00008},
10946                 { 0x00010000, 0x0a000},
10947                 { 0x00020000, 0x13c00},
10948                 { 0xffffffff, 0x00000}
10949         }, mem_tbl_57765[] = {
10950                 { 0x00000200, 0x00008},
10951                 { 0x00004000, 0x00800},
10952                 { 0x00006000, 0x09800},
10953                 { 0x00010000, 0x0a000},
10954                 { 0xffffffff, 0x00000}
10955         };
10956         struct mem_entry *mem_tbl;
10957         int err = 0;
10958         int i;
10959
10960         if (tg3_flag(tp, 5717_PLUS))
10961                 mem_tbl = mem_tbl_5717;
10962         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10963                 mem_tbl = mem_tbl_57765;
10964         else if (tg3_flag(tp, 5755_PLUS))
10965                 mem_tbl = mem_tbl_5755;
10966         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10967                 mem_tbl = mem_tbl_5906;
10968         else if (tg3_flag(tp, 5705_PLUS))
10969                 mem_tbl = mem_tbl_5705;
10970         else
10971                 mem_tbl = mem_tbl_570x;
10972
10973         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10974                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10975                 if (err)
10976                         break;
10977         }
10978
10979         return err;
10980 }
10981
10982 #define TG3_MAC_LOOPBACK        0
10983 #define TG3_PHY_LOOPBACK        1
10984 #define TG3_TSO_LOOPBACK        2
10985
10986 #define TG3_TSO_MSS             500
10987
10988 #define TG3_TSO_IP_HDR_LEN      20
10989 #define TG3_TSO_TCP_HDR_LEN     20
10990 #define TG3_TSO_TCP_OPT_LEN     12
10991
10992 static const u8 tg3_tso_header[] = {
10993 0x08, 0x00,
10994 0x45, 0x00, 0x00, 0x00,
10995 0x00, 0x00, 0x40, 0x00,
10996 0x40, 0x06, 0x00, 0x00,
10997 0x0a, 0x00, 0x00, 0x01,
10998 0x0a, 0x00, 0x00, 0x02,
10999 0x0d, 0x00, 0xe0, 0x00,
11000 0x00, 0x00, 0x01, 0x00,
11001 0x00, 0x00, 0x02, 0x00,
11002 0x80, 0x10, 0x10, 0x00,
11003 0x14, 0x09, 0x00, 0x00,
11004 0x01, 0x01, 0x08, 0x0a,
11005 0x11, 0x11, 0x11, 0x11,
11006 0x11, 0x11, 0x11, 0x11,
11007 };
11008
11009 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11010 {
11011         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11012         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11013         struct sk_buff *skb, *rx_skb;
11014         u8 *tx_data;
11015         dma_addr_t map;
11016         int num_pkts, tx_len, rx_len, i, err;
11017         struct tg3_rx_buffer_desc *desc;
11018         struct tg3_napi *tnapi, *rnapi;
11019         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11020
11021         tnapi = &tp->napi[0];
11022         rnapi = &tp->napi[0];
11023         if (tp->irq_cnt > 1) {
11024                 if (tg3_flag(tp, ENABLE_RSS))
11025                         rnapi = &tp->napi[1];
11026                 if (tg3_flag(tp, ENABLE_TSS))
11027                         tnapi = &tp->napi[1];
11028         }
11029         coal_now = tnapi->coal_now | rnapi->coal_now;
11030
11031         if (loopback_mode == TG3_MAC_LOOPBACK) {
11032                 /* HW errata - mac loopback fails in some cases on 5780.
11033                  * Normal traffic and PHY loopback are not affected by
11034                  * errata.  Also, the MAC loopback test is deprecated for
11035                  * all newer ASIC revisions.
11036                  */
11037                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11038                     tg3_flag(tp, CPMU_PRESENT))
11039                         return 0;
11040
11041                 mac_mode = tp->mac_mode &
11042                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11043                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11044                 if (!tg3_flag(tp, 5705_PLUS))
11045                         mac_mode |= MAC_MODE_LINK_POLARITY;
11046                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11047                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11048                 else
11049                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11050                 tw32(MAC_MODE, mac_mode);
11051         } else {
11052                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11053                         tg3_phy_fet_toggle_apd(tp, false);
11054                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11055                 } else
11056                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11057
11058                 tg3_phy_toggle_automdix(tp, 0);
11059
11060                 tg3_writephy(tp, MII_BMCR, val);
11061                 udelay(40);
11062
11063                 mac_mode = tp->mac_mode &
11064                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11065                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11066                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11067                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11068                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11069                         /* The write needs to be flushed for the AC131 */
11070                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11071                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11072                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11073                 } else
11074                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11075
11076                 /* reset to prevent losing 1st rx packet intermittently */
11077                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11078                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11079                         udelay(10);
11080                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11081                 }
11082                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11083                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11084                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11085                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11086                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11087                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11088                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11089                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11090                 }
11091                 tw32(MAC_MODE, mac_mode);
11092
11093                 /* Wait for link */
11094                 for (i = 0; i < 100; i++) {
11095                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11096                                 break;
11097                         mdelay(1);
11098                 }
11099         }
11100
11101         err = -EIO;
11102
11103         tx_len = pktsz;
11104         skb = netdev_alloc_skb(tp->dev, tx_len);
11105         if (!skb)
11106                 return -ENOMEM;
11107
11108         tx_data = skb_put(skb, tx_len);
11109         memcpy(tx_data, tp->dev->dev_addr, 6);
11110         memset(tx_data + 6, 0x0, 8);
11111
11112         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11113
11114         if (loopback_mode == TG3_TSO_LOOPBACK) {
11115                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11116
11117                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11118                               TG3_TSO_TCP_OPT_LEN;
11119
11120                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11121                        sizeof(tg3_tso_header));
11122                 mss = TG3_TSO_MSS;
11123
11124                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11125                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11126
11127                 /* Set the total length field in the IP header */
11128                 iph->tot_len = htons((u16)(mss + hdr_len));
11129
11130                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11131                               TXD_FLAG_CPU_POST_DMA);
11132
11133                 if (tg3_flag(tp, HW_TSO_1) ||
11134                     tg3_flag(tp, HW_TSO_2) ||
11135                     tg3_flag(tp, HW_TSO_3)) {
11136                         struct tcphdr *th;
11137                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11138                         th = (struct tcphdr *)&tx_data[val];
11139                         th->check = 0;
11140                 } else
11141                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11142
11143                 if (tg3_flag(tp, HW_TSO_3)) {
11144                         mss |= (hdr_len & 0xc) << 12;
11145                         if (hdr_len & 0x10)
11146                                 base_flags |= 0x00000010;
11147                         base_flags |= (hdr_len & 0x3e0) << 5;
11148                 } else if (tg3_flag(tp, HW_TSO_2))
11149                         mss |= hdr_len << 9;
11150                 else if (tg3_flag(tp, HW_TSO_1) ||
11151                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11152                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11153                 } else {
11154                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11155                 }
11156
11157                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11158         } else {
11159                 num_pkts = 1;
11160                 data_off = ETH_HLEN;
11161         }
11162
11163         for (i = data_off; i < tx_len; i++)
11164                 tx_data[i] = (u8) (i & 0xff);
11165
11166         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11167         if (pci_dma_mapping_error(tp->pdev, map)) {
11168                 dev_kfree_skb(skb);
11169                 return -EIO;
11170         }
11171
11172         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11173                rnapi->coal_now);
11174
11175         udelay(10);
11176
11177         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11178
11179         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11180                     base_flags, (mss << 1) | 1);
11181
11182         tnapi->tx_prod++;
11183
11184         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11185         tr32_mailbox(tnapi->prodmbox);
11186
11187         udelay(10);
11188
11189         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11190         for (i = 0; i < 35; i++) {
11191                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11192                        coal_now);
11193
11194                 udelay(10);
11195
11196                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11197                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11198                 if ((tx_idx == tnapi->tx_prod) &&
11199                     (rx_idx == (rx_start_idx + num_pkts)))
11200                         break;
11201         }
11202
11203         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11204         dev_kfree_skb(skb);
11205
11206         if (tx_idx != tnapi->tx_prod)
11207                 goto out;
11208
11209         if (rx_idx != rx_start_idx + num_pkts)
11210                 goto out;
11211
11212         val = data_off;
11213         while (rx_idx != rx_start_idx) {
11214                 desc = &rnapi->rx_rcb[rx_start_idx++];
11215                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11216                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11217
11218                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11219                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11220                         goto out;
11221
11222                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11223                          - ETH_FCS_LEN;
11224
11225                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11226                         if (rx_len != tx_len)
11227                                 goto out;
11228
11229                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11230                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11231                                         goto out;
11232                         } else {
11233                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11234                                         goto out;
11235                         }
11236                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11237                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11238                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11239                         goto out;
11240                 }
11241
11242                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11243                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11244                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11245                                              mapping);
11246                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11247                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11248                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11249                                              mapping);
11250                 } else
11251                         goto out;
11252
11253                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11254                                             PCI_DMA_FROMDEVICE);
11255
11256                 for (i = data_off; i < rx_len; i++, val++) {
11257                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11258                                 goto out;
11259                 }
11260         }
11261
11262         err = 0;
11263
11264         /* tg3_free_rings will unmap and free the rx_skb */
11265 out:
11266         return err;
11267 }
11268
11269 #define TG3_STD_LOOPBACK_FAILED         1
11270 #define TG3_JMB_LOOPBACK_FAILED         2
11271 #define TG3_TSO_LOOPBACK_FAILED         4
11272
11273 #define TG3_MAC_LOOPBACK_SHIFT          0
11274 #define TG3_PHY_LOOPBACK_SHIFT          4
11275 #define TG3_LOOPBACK_FAILED             0x00000077
11276
11277 static int tg3_test_loopback(struct tg3 *tp)
11278 {
11279         int err = 0;
11280         u32 eee_cap, cpmuctrl = 0;
11281
11282         if (!netif_running(tp->dev))
11283                 return TG3_LOOPBACK_FAILED;
11284
11285         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11286         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11287
11288         err = tg3_reset_hw(tp, 1);
11289         if (err) {
11290                 err = TG3_LOOPBACK_FAILED;
11291                 goto done;
11292         }
11293
11294         if (tg3_flag(tp, ENABLE_RSS)) {
11295                 int i;
11296
11297                 /* Reroute all rx packets to the 1st queue */
11298                 for (i = MAC_RSS_INDIR_TBL_0;
11299                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11300                         tw32(i, 0x0);
11301         }
11302
11303         /* Turn off gphy autopowerdown. */
11304         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11305                 tg3_phy_toggle_apd(tp, false);
11306
11307         if (tg3_flag(tp, CPMU_PRESENT)) {
11308                 int i;
11309                 u32 status;
11310
11311                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11312
11313                 /* Wait for up to 40 microseconds to acquire lock. */
11314                 for (i = 0; i < 4; i++) {
11315                         status = tr32(TG3_CPMU_MUTEX_GNT);
11316                         if (status == CPMU_MUTEX_GNT_DRIVER)
11317                                 break;
11318                         udelay(10);
11319                 }
11320
11321                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11322                         err = TG3_LOOPBACK_FAILED;
11323                         goto done;
11324                 }
11325
11326                 /* Turn off link-based power management. */
11327                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11328                 tw32(TG3_CPMU_CTRL,
11329                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11330                                   CPMU_CTRL_LINK_AWARE_MODE));
11331         }
11332
11333         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11334                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11335
11336         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11337             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11338                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11339
11340         if (tg3_flag(tp, CPMU_PRESENT)) {
11341                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11342
11343                 /* Release the mutex */
11344                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11345         }
11346
11347         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11348             !tg3_flag(tp, USE_PHYLIB)) {
11349                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11350                         err |= TG3_STD_LOOPBACK_FAILED <<
11351                                TG3_PHY_LOOPBACK_SHIFT;
11352                 if (tg3_flag(tp, TSO_CAPABLE) &&
11353                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11354                         err |= TG3_TSO_LOOPBACK_FAILED <<
11355                                TG3_PHY_LOOPBACK_SHIFT;
11356                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11357                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11358                         err |= TG3_JMB_LOOPBACK_FAILED <<
11359                                TG3_PHY_LOOPBACK_SHIFT;
11360         }
11361
11362         /* Re-enable gphy autopowerdown. */
11363         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11364                 tg3_phy_toggle_apd(tp, true);
11365
11366 done:
11367         tp->phy_flags |= eee_cap;
11368
11369         return err;
11370 }
11371
11372 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11373                           u64 *data)
11374 {
11375         struct tg3 *tp = netdev_priv(dev);
11376
11377         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11378                 tg3_power_up(tp);
11379
11380         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11381
11382         if (tg3_test_nvram(tp) != 0) {
11383                 etest->flags |= ETH_TEST_FL_FAILED;
11384                 data[0] = 1;
11385         }
11386         if (tg3_test_link(tp) != 0) {
11387                 etest->flags |= ETH_TEST_FL_FAILED;
11388                 data[1] = 1;
11389         }
11390         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11391                 int err, err2 = 0, irq_sync = 0;
11392
11393                 if (netif_running(dev)) {
11394                         tg3_phy_stop(tp);
11395                         tg3_netif_stop(tp);
11396                         irq_sync = 1;
11397                 }
11398
11399                 tg3_full_lock(tp, irq_sync);
11400
11401                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11402                 err = tg3_nvram_lock(tp);
11403                 tg3_halt_cpu(tp, RX_CPU_BASE);
11404                 if (!tg3_flag(tp, 5705_PLUS))
11405                         tg3_halt_cpu(tp, TX_CPU_BASE);
11406                 if (!err)
11407                         tg3_nvram_unlock(tp);
11408
11409                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11410                         tg3_phy_reset(tp);
11411
11412                 if (tg3_test_registers(tp) != 0) {
11413                         etest->flags |= ETH_TEST_FL_FAILED;
11414                         data[2] = 1;
11415                 }
11416                 if (tg3_test_memory(tp) != 0) {
11417                         etest->flags |= ETH_TEST_FL_FAILED;
11418                         data[3] = 1;
11419                 }
11420                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11421                         etest->flags |= ETH_TEST_FL_FAILED;
11422
11423                 tg3_full_unlock(tp);
11424
11425                 if (tg3_test_interrupt(tp) != 0) {
11426                         etest->flags |= ETH_TEST_FL_FAILED;
11427                         data[5] = 1;
11428                 }
11429
11430                 tg3_full_lock(tp, 0);
11431
11432                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11433                 if (netif_running(dev)) {
11434                         tg3_flag_set(tp, INIT_COMPLETE);
11435                         err2 = tg3_restart_hw(tp, 1);
11436                         if (!err2)
11437                                 tg3_netif_start(tp);
11438                 }
11439
11440                 tg3_full_unlock(tp);
11441
11442                 if (irq_sync && !err2)
11443                         tg3_phy_start(tp);
11444         }
11445         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11446                 tg3_power_down(tp);
11447
11448 }
11449
11450 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11451 {
11452         struct mii_ioctl_data *data = if_mii(ifr);
11453         struct tg3 *tp = netdev_priv(dev);
11454         int err;
11455
11456         if (tg3_flag(tp, USE_PHYLIB)) {
11457                 struct phy_device *phydev;
11458                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11459                         return -EAGAIN;
11460                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11461                 return phy_mii_ioctl(phydev, ifr, cmd);
11462         }
11463
11464         switch (cmd) {
11465         case SIOCGMIIPHY:
11466                 data->phy_id = tp->phy_addr;
11467
11468                 /* fallthru */
11469         case SIOCGMIIREG: {
11470                 u32 mii_regval;
11471
11472                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11473                         break;                  /* We have no PHY */
11474
11475                 if (!netif_running(dev))
11476                         return -EAGAIN;
11477
11478                 spin_lock_bh(&tp->lock);
11479                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11480                 spin_unlock_bh(&tp->lock);
11481
11482                 data->val_out = mii_regval;
11483
11484                 return err;
11485         }
11486
11487         case SIOCSMIIREG:
11488                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11489                         break;                  /* We have no PHY */
11490
11491                 if (!netif_running(dev))
11492                         return -EAGAIN;
11493
11494                 spin_lock_bh(&tp->lock);
11495                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11496                 spin_unlock_bh(&tp->lock);
11497
11498                 return err;
11499
11500         default:
11501                 /* do nothing */
11502                 break;
11503         }
11504         return -EOPNOTSUPP;
11505 }
11506
11507 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11508 {
11509         struct tg3 *tp = netdev_priv(dev);
11510
11511         memcpy(ec, &tp->coal, sizeof(*ec));
11512         return 0;
11513 }
11514
11515 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11516 {
11517         struct tg3 *tp = netdev_priv(dev);
11518         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11519         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11520
11521         if (!tg3_flag(tp, 5705_PLUS)) {
11522                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11523                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11524                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11525                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11526         }
11527
11528         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11529             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11530             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11531             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11532             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11533             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11534             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11535             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11536             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11537             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11538                 return -EINVAL;
11539
11540         /* No rx interrupts will be generated if both are zero */
11541         if ((ec->rx_coalesce_usecs == 0) &&
11542             (ec->rx_max_coalesced_frames == 0))
11543                 return -EINVAL;
11544
11545         /* No tx interrupts will be generated if both are zero */
11546         if ((ec->tx_coalesce_usecs == 0) &&
11547             (ec->tx_max_coalesced_frames == 0))
11548                 return -EINVAL;
11549
11550         /* Only copy relevant parameters, ignore all others. */
11551         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11552         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11553         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11554         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11555         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11556         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11557         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11558         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11559         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11560
11561         if (netif_running(dev)) {
11562                 tg3_full_lock(tp, 0);
11563                 __tg3_set_coalesce(tp, &tp->coal);
11564                 tg3_full_unlock(tp);
11565         }
11566         return 0;
11567 }
11568
11569 static const struct ethtool_ops tg3_ethtool_ops = {
11570         .get_settings           = tg3_get_settings,
11571         .set_settings           = tg3_set_settings,
11572         .get_drvinfo            = tg3_get_drvinfo,
11573         .get_regs_len           = tg3_get_regs_len,
11574         .get_regs               = tg3_get_regs,
11575         .get_wol                = tg3_get_wol,
11576         .set_wol                = tg3_set_wol,
11577         .get_msglevel           = tg3_get_msglevel,
11578         .set_msglevel           = tg3_set_msglevel,
11579         .nway_reset             = tg3_nway_reset,
11580         .get_link               = ethtool_op_get_link,
11581         .get_eeprom_len         = tg3_get_eeprom_len,
11582         .get_eeprom             = tg3_get_eeprom,
11583         .set_eeprom             = tg3_set_eeprom,
11584         .get_ringparam          = tg3_get_ringparam,
11585         .set_ringparam          = tg3_set_ringparam,
11586         .get_pauseparam         = tg3_get_pauseparam,
11587         .set_pauseparam         = tg3_set_pauseparam,
11588         .self_test              = tg3_self_test,
11589         .get_strings            = tg3_get_strings,
11590         .set_phys_id            = tg3_set_phys_id,
11591         .get_ethtool_stats      = tg3_get_ethtool_stats,
11592         .get_coalesce           = tg3_get_coalesce,
11593         .set_coalesce           = tg3_set_coalesce,
11594         .get_sset_count         = tg3_get_sset_count,
11595 };
11596
11597 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11598 {
11599         u32 cursize, val, magic;
11600
11601         tp->nvram_size = EEPROM_CHIP_SIZE;
11602
11603         if (tg3_nvram_read(tp, 0, &magic) != 0)
11604                 return;
11605
11606         if ((magic != TG3_EEPROM_MAGIC) &&
11607             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11608             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11609                 return;
11610
11611         /*
11612          * Size the chip by reading offsets at increasing powers of two.
11613          * When we encounter our validation signature, we know the addressing
11614          * has wrapped around, and thus have our chip size.
11615          */
11616         cursize = 0x10;
11617
11618         while (cursize < tp->nvram_size) {
11619                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11620                         return;
11621
11622                 if (val == magic)
11623                         break;
11624
11625                 cursize <<= 1;
11626         }
11627
11628         tp->nvram_size = cursize;
11629 }
11630
11631 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11632 {
11633         u32 val;
11634
11635         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11636                 return;
11637
11638         /* Selfboot format */
11639         if (val != TG3_EEPROM_MAGIC) {
11640                 tg3_get_eeprom_size(tp);
11641                 return;
11642         }
11643
11644         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11645                 if (val != 0) {
11646                         /* This is confusing.  We want to operate on the
11647                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11648                          * call will read from NVRAM and byteswap the data
11649                          * according to the byteswapping settings for all
11650                          * other register accesses.  This ensures the data we
11651                          * want will always reside in the lower 16-bits.
11652                          * However, the data in NVRAM is in LE format, which
11653                          * means the data from the NVRAM read will always be
11654                          * opposite the endianness of the CPU.  The 16-bit
11655                          * byteswap then brings the data to CPU endianness.
11656                          */
11657                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11658                         return;
11659                 }
11660         }
11661         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11662 }
11663
11664 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11665 {
11666         u32 nvcfg1;
11667
11668         nvcfg1 = tr32(NVRAM_CFG1);
11669         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11670                 tg3_flag_set(tp, FLASH);
11671         } else {
11672                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11673                 tw32(NVRAM_CFG1, nvcfg1);
11674         }
11675
11676         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11677             tg3_flag(tp, 5780_CLASS)) {
11678                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11679                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11680                         tp->nvram_jedecnum = JEDEC_ATMEL;
11681                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11682                         tg3_flag_set(tp, NVRAM_BUFFERED);
11683                         break;
11684                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11685                         tp->nvram_jedecnum = JEDEC_ATMEL;
11686                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11687                         break;
11688                 case FLASH_VENDOR_ATMEL_EEPROM:
11689                         tp->nvram_jedecnum = JEDEC_ATMEL;
11690                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11691                         tg3_flag_set(tp, NVRAM_BUFFERED);
11692                         break;
11693                 case FLASH_VENDOR_ST:
11694                         tp->nvram_jedecnum = JEDEC_ST;
11695                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11696                         tg3_flag_set(tp, NVRAM_BUFFERED);
11697                         break;
11698                 case FLASH_VENDOR_SAIFUN:
11699                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11700                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11701                         break;
11702                 case FLASH_VENDOR_SST_SMALL:
11703                 case FLASH_VENDOR_SST_LARGE:
11704                         tp->nvram_jedecnum = JEDEC_SST;
11705                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11706                         break;
11707                 }
11708         } else {
11709                 tp->nvram_jedecnum = JEDEC_ATMEL;
11710                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11711                 tg3_flag_set(tp, NVRAM_BUFFERED);
11712         }
11713 }
11714
11715 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11716 {
11717         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11718         case FLASH_5752PAGE_SIZE_256:
11719                 tp->nvram_pagesize = 256;
11720                 break;
11721         case FLASH_5752PAGE_SIZE_512:
11722                 tp->nvram_pagesize = 512;
11723                 break;
11724         case FLASH_5752PAGE_SIZE_1K:
11725                 tp->nvram_pagesize = 1024;
11726                 break;
11727         case FLASH_5752PAGE_SIZE_2K:
11728                 tp->nvram_pagesize = 2048;
11729                 break;
11730         case FLASH_5752PAGE_SIZE_4K:
11731                 tp->nvram_pagesize = 4096;
11732                 break;
11733         case FLASH_5752PAGE_SIZE_264:
11734                 tp->nvram_pagesize = 264;
11735                 break;
11736         case FLASH_5752PAGE_SIZE_528:
11737                 tp->nvram_pagesize = 528;
11738                 break;
11739         }
11740 }
11741
11742 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11743 {
11744         u32 nvcfg1;
11745
11746         nvcfg1 = tr32(NVRAM_CFG1);
11747
11748         /* NVRAM protection for TPM */
11749         if (nvcfg1 & (1 << 27))
11750                 tg3_flag_set(tp, PROTECTED_NVRAM);
11751
11752         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11753         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11754         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11755                 tp->nvram_jedecnum = JEDEC_ATMEL;
11756                 tg3_flag_set(tp, NVRAM_BUFFERED);
11757                 break;
11758         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11759                 tp->nvram_jedecnum = JEDEC_ATMEL;
11760                 tg3_flag_set(tp, NVRAM_BUFFERED);
11761                 tg3_flag_set(tp, FLASH);
11762                 break;
11763         case FLASH_5752VENDOR_ST_M45PE10:
11764         case FLASH_5752VENDOR_ST_M45PE20:
11765         case FLASH_5752VENDOR_ST_M45PE40:
11766                 tp->nvram_jedecnum = JEDEC_ST;
11767                 tg3_flag_set(tp, NVRAM_BUFFERED);
11768                 tg3_flag_set(tp, FLASH);
11769                 break;
11770         }
11771
11772         if (tg3_flag(tp, FLASH)) {
11773                 tg3_nvram_get_pagesize(tp, nvcfg1);
11774         } else {
11775                 /* For eeprom, set pagesize to maximum eeprom size */
11776                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11777
11778                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11779                 tw32(NVRAM_CFG1, nvcfg1);
11780         }
11781 }
11782
11783 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11784 {
11785         u32 nvcfg1, protect = 0;
11786
11787         nvcfg1 = tr32(NVRAM_CFG1);
11788
11789         /* NVRAM protection for TPM */
11790         if (nvcfg1 & (1 << 27)) {
11791                 tg3_flag_set(tp, PROTECTED_NVRAM);
11792                 protect = 1;
11793         }
11794
11795         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11796         switch (nvcfg1) {
11797         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11798         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11799         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11800         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11801                 tp->nvram_jedecnum = JEDEC_ATMEL;
11802                 tg3_flag_set(tp, NVRAM_BUFFERED);
11803                 tg3_flag_set(tp, FLASH);
11804                 tp->nvram_pagesize = 264;
11805                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11806                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11807                         tp->nvram_size = (protect ? 0x3e200 :
11808                                           TG3_NVRAM_SIZE_512KB);
11809                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11810                         tp->nvram_size = (protect ? 0x1f200 :
11811                                           TG3_NVRAM_SIZE_256KB);
11812                 else
11813                         tp->nvram_size = (protect ? 0x1f200 :
11814                                           TG3_NVRAM_SIZE_128KB);
11815                 break;
11816         case FLASH_5752VENDOR_ST_M45PE10:
11817         case FLASH_5752VENDOR_ST_M45PE20:
11818         case FLASH_5752VENDOR_ST_M45PE40:
11819                 tp->nvram_jedecnum = JEDEC_ST;
11820                 tg3_flag_set(tp, NVRAM_BUFFERED);
11821                 tg3_flag_set(tp, FLASH);
11822                 tp->nvram_pagesize = 256;
11823                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11824                         tp->nvram_size = (protect ?
11825                                           TG3_NVRAM_SIZE_64KB :
11826                                           TG3_NVRAM_SIZE_128KB);
11827                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11828                         tp->nvram_size = (protect ?
11829                                           TG3_NVRAM_SIZE_64KB :
11830                                           TG3_NVRAM_SIZE_256KB);
11831                 else
11832                         tp->nvram_size = (protect ?
11833                                           TG3_NVRAM_SIZE_128KB :
11834                                           TG3_NVRAM_SIZE_512KB);
11835                 break;
11836         }
11837 }
11838
11839 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11840 {
11841         u32 nvcfg1;
11842
11843         nvcfg1 = tr32(NVRAM_CFG1);
11844
11845         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11846         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11847         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11848         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11849         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11850                 tp->nvram_jedecnum = JEDEC_ATMEL;
11851                 tg3_flag_set(tp, NVRAM_BUFFERED);
11852                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11853
11854                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11855                 tw32(NVRAM_CFG1, nvcfg1);
11856                 break;
11857         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11858         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11859         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11860         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11861                 tp->nvram_jedecnum = JEDEC_ATMEL;
11862                 tg3_flag_set(tp, NVRAM_BUFFERED);
11863                 tg3_flag_set(tp, FLASH);
11864                 tp->nvram_pagesize = 264;
11865                 break;
11866         case FLASH_5752VENDOR_ST_M45PE10:
11867         case FLASH_5752VENDOR_ST_M45PE20:
11868         case FLASH_5752VENDOR_ST_M45PE40:
11869                 tp->nvram_jedecnum = JEDEC_ST;
11870                 tg3_flag_set(tp, NVRAM_BUFFERED);
11871                 tg3_flag_set(tp, FLASH);
11872                 tp->nvram_pagesize = 256;
11873                 break;
11874         }
11875 }
11876
11877 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11878 {
11879         u32 nvcfg1, protect = 0;
11880
11881         nvcfg1 = tr32(NVRAM_CFG1);
11882
11883         /* NVRAM protection for TPM */
11884         if (nvcfg1 & (1 << 27)) {
11885                 tg3_flag_set(tp, PROTECTED_NVRAM);
11886                 protect = 1;
11887         }
11888
11889         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11890         switch (nvcfg1) {
11891         case FLASH_5761VENDOR_ATMEL_ADB021D:
11892         case FLASH_5761VENDOR_ATMEL_ADB041D:
11893         case FLASH_5761VENDOR_ATMEL_ADB081D:
11894         case FLASH_5761VENDOR_ATMEL_ADB161D:
11895         case FLASH_5761VENDOR_ATMEL_MDB021D:
11896         case FLASH_5761VENDOR_ATMEL_MDB041D:
11897         case FLASH_5761VENDOR_ATMEL_MDB081D:
11898         case FLASH_5761VENDOR_ATMEL_MDB161D:
11899                 tp->nvram_jedecnum = JEDEC_ATMEL;
11900                 tg3_flag_set(tp, NVRAM_BUFFERED);
11901                 tg3_flag_set(tp, FLASH);
11902                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11903                 tp->nvram_pagesize = 256;
11904                 break;
11905         case FLASH_5761VENDOR_ST_A_M45PE20:
11906         case FLASH_5761VENDOR_ST_A_M45PE40:
11907         case FLASH_5761VENDOR_ST_A_M45PE80:
11908         case FLASH_5761VENDOR_ST_A_M45PE16:
11909         case FLASH_5761VENDOR_ST_M_M45PE20:
11910         case FLASH_5761VENDOR_ST_M_M45PE40:
11911         case FLASH_5761VENDOR_ST_M_M45PE80:
11912         case FLASH_5761VENDOR_ST_M_M45PE16:
11913                 tp->nvram_jedecnum = JEDEC_ST;
11914                 tg3_flag_set(tp, NVRAM_BUFFERED);
11915                 tg3_flag_set(tp, FLASH);
11916                 tp->nvram_pagesize = 256;
11917                 break;
11918         }
11919
11920         if (protect) {
11921                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11922         } else {
11923                 switch (nvcfg1) {
11924                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11925                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11926                 case FLASH_5761VENDOR_ST_A_M45PE16:
11927                 case FLASH_5761VENDOR_ST_M_M45PE16:
11928                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11929                         break;
11930                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11931                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11932                 case FLASH_5761VENDOR_ST_A_M45PE80:
11933                 case FLASH_5761VENDOR_ST_M_M45PE80:
11934                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11935                         break;
11936                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11937                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11938                 case FLASH_5761VENDOR_ST_A_M45PE40:
11939                 case FLASH_5761VENDOR_ST_M_M45PE40:
11940                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11941                         break;
11942                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11943                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11944                 case FLASH_5761VENDOR_ST_A_M45PE20:
11945                 case FLASH_5761VENDOR_ST_M_M45PE20:
11946                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11947                         break;
11948                 }
11949         }
11950 }
11951
11952 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11953 {
11954         tp->nvram_jedecnum = JEDEC_ATMEL;
11955         tg3_flag_set(tp, NVRAM_BUFFERED);
11956         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11957 }
11958
11959 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11960 {
11961         u32 nvcfg1;
11962
11963         nvcfg1 = tr32(NVRAM_CFG1);
11964
11965         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11966         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11967         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11968                 tp->nvram_jedecnum = JEDEC_ATMEL;
11969                 tg3_flag_set(tp, NVRAM_BUFFERED);
11970                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11971
11972                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11973                 tw32(NVRAM_CFG1, nvcfg1);
11974                 return;
11975         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11976         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11977         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11978         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11979         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11980         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11981         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11982                 tp->nvram_jedecnum = JEDEC_ATMEL;
11983                 tg3_flag_set(tp, NVRAM_BUFFERED);
11984                 tg3_flag_set(tp, FLASH);
11985
11986                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11987                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11988                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11989                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11990                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11991                         break;
11992                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11993                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11994                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11995                         break;
11996                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11997                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11998                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11999                         break;
12000                 }
12001                 break;
12002         case FLASH_5752VENDOR_ST_M45PE10:
12003         case FLASH_5752VENDOR_ST_M45PE20:
12004         case FLASH_5752VENDOR_ST_M45PE40:
12005                 tp->nvram_jedecnum = JEDEC_ST;
12006                 tg3_flag_set(tp, NVRAM_BUFFERED);
12007                 tg3_flag_set(tp, FLASH);
12008
12009                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12010                 case FLASH_5752VENDOR_ST_M45PE10:
12011                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12012                         break;
12013                 case FLASH_5752VENDOR_ST_M45PE20:
12014                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12015                         break;
12016                 case FLASH_5752VENDOR_ST_M45PE40:
12017                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12018                         break;
12019                 }
12020                 break;
12021         default:
12022                 tg3_flag_set(tp, NO_NVRAM);
12023                 return;
12024         }
12025
12026         tg3_nvram_get_pagesize(tp, nvcfg1);
12027         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12028                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12029 }
12030
12031
12032 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12033 {
12034         u32 nvcfg1;
12035
12036         nvcfg1 = tr32(NVRAM_CFG1);
12037
12038         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12039         case FLASH_5717VENDOR_ATMEL_EEPROM:
12040         case FLASH_5717VENDOR_MICRO_EEPROM:
12041                 tp->nvram_jedecnum = JEDEC_ATMEL;
12042                 tg3_flag_set(tp, NVRAM_BUFFERED);
12043                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12044
12045                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12046                 tw32(NVRAM_CFG1, nvcfg1);
12047                 return;
12048         case FLASH_5717VENDOR_ATMEL_MDB011D:
12049         case FLASH_5717VENDOR_ATMEL_ADB011B:
12050         case FLASH_5717VENDOR_ATMEL_ADB011D:
12051         case FLASH_5717VENDOR_ATMEL_MDB021D:
12052         case FLASH_5717VENDOR_ATMEL_ADB021B:
12053         case FLASH_5717VENDOR_ATMEL_ADB021D:
12054         case FLASH_5717VENDOR_ATMEL_45USPT:
12055                 tp->nvram_jedecnum = JEDEC_ATMEL;
12056                 tg3_flag_set(tp, NVRAM_BUFFERED);
12057                 tg3_flag_set(tp, FLASH);
12058
12059                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12060                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12061                         /* Detect size with tg3_nvram_get_size() */
12062                         break;
12063                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12064                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12065                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12066                         break;
12067                 default:
12068                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12069                         break;
12070                 }
12071                 break;
12072         case FLASH_5717VENDOR_ST_M_M25PE10:
12073         case FLASH_5717VENDOR_ST_A_M25PE10:
12074         case FLASH_5717VENDOR_ST_M_M45PE10:
12075         case FLASH_5717VENDOR_ST_A_M45PE10:
12076         case FLASH_5717VENDOR_ST_M_M25PE20:
12077         case FLASH_5717VENDOR_ST_A_M25PE20:
12078         case FLASH_5717VENDOR_ST_M_M45PE20:
12079         case FLASH_5717VENDOR_ST_A_M45PE20:
12080         case FLASH_5717VENDOR_ST_25USPT:
12081         case FLASH_5717VENDOR_ST_45USPT:
12082                 tp->nvram_jedecnum = JEDEC_ST;
12083                 tg3_flag_set(tp, NVRAM_BUFFERED);
12084                 tg3_flag_set(tp, FLASH);
12085
12086                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12087                 case FLASH_5717VENDOR_ST_M_M25PE20:
12088                 case FLASH_5717VENDOR_ST_M_M45PE20:
12089                         /* Detect size with tg3_nvram_get_size() */
12090                         break;
12091                 case FLASH_5717VENDOR_ST_A_M25PE20:
12092                 case FLASH_5717VENDOR_ST_A_M45PE20:
12093                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12094                         break;
12095                 default:
12096                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12097                         break;
12098                 }
12099                 break;
12100         default:
12101                 tg3_flag_set(tp, NO_NVRAM);
12102                 return;
12103         }
12104
12105         tg3_nvram_get_pagesize(tp, nvcfg1);
12106         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12107                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12108 }
12109
12110 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12111 {
12112         u32 nvcfg1, nvmpinstrp;
12113
12114         nvcfg1 = tr32(NVRAM_CFG1);
12115         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12116
12117         switch (nvmpinstrp) {
12118         case FLASH_5720_EEPROM_HD:
12119         case FLASH_5720_EEPROM_LD:
12120                 tp->nvram_jedecnum = JEDEC_ATMEL;
12121                 tg3_flag_set(tp, NVRAM_BUFFERED);
12122
12123                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12124                 tw32(NVRAM_CFG1, nvcfg1);
12125                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12126                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12127                 else
12128                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12129                 return;
12130         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12131         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12132         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12133         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12134         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12135         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12136         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12137         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12138         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12139         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12140         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12141         case FLASH_5720VENDOR_ATMEL_45USPT:
12142                 tp->nvram_jedecnum = JEDEC_ATMEL;
12143                 tg3_flag_set(tp, NVRAM_BUFFERED);
12144                 tg3_flag_set(tp, FLASH);
12145
12146                 switch (nvmpinstrp) {
12147                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12148                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12149                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12150                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12151                         break;
12152                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12153                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12154                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12155                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12156                         break;
12157                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12158                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12159                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12160                         break;
12161                 default:
12162                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12163                         break;
12164                 }
12165                 break;
12166         case FLASH_5720VENDOR_M_ST_M25PE10:
12167         case FLASH_5720VENDOR_M_ST_M45PE10:
12168         case FLASH_5720VENDOR_A_ST_M25PE10:
12169         case FLASH_5720VENDOR_A_ST_M45PE10:
12170         case FLASH_5720VENDOR_M_ST_M25PE20:
12171         case FLASH_5720VENDOR_M_ST_M45PE20:
12172         case FLASH_5720VENDOR_A_ST_M25PE20:
12173         case FLASH_5720VENDOR_A_ST_M45PE20:
12174         case FLASH_5720VENDOR_M_ST_M25PE40:
12175         case FLASH_5720VENDOR_M_ST_M45PE40:
12176         case FLASH_5720VENDOR_A_ST_M25PE40:
12177         case FLASH_5720VENDOR_A_ST_M45PE40:
12178         case FLASH_5720VENDOR_M_ST_M25PE80:
12179         case FLASH_5720VENDOR_M_ST_M45PE80:
12180         case FLASH_5720VENDOR_A_ST_M25PE80:
12181         case FLASH_5720VENDOR_A_ST_M45PE80:
12182         case FLASH_5720VENDOR_ST_25USPT:
12183         case FLASH_5720VENDOR_ST_45USPT:
12184                 tp->nvram_jedecnum = JEDEC_ST;
12185                 tg3_flag_set(tp, NVRAM_BUFFERED);
12186                 tg3_flag_set(tp, FLASH);
12187
12188                 switch (nvmpinstrp) {
12189                 case FLASH_5720VENDOR_M_ST_M25PE20:
12190                 case FLASH_5720VENDOR_M_ST_M45PE20:
12191                 case FLASH_5720VENDOR_A_ST_M25PE20:
12192                 case FLASH_5720VENDOR_A_ST_M45PE20:
12193                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12194                         break;
12195                 case FLASH_5720VENDOR_M_ST_M25PE40:
12196                 case FLASH_5720VENDOR_M_ST_M45PE40:
12197                 case FLASH_5720VENDOR_A_ST_M25PE40:
12198                 case FLASH_5720VENDOR_A_ST_M45PE40:
12199                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12200                         break;
12201                 case FLASH_5720VENDOR_M_ST_M25PE80:
12202                 case FLASH_5720VENDOR_M_ST_M45PE80:
12203                 case FLASH_5720VENDOR_A_ST_M25PE80:
12204                 case FLASH_5720VENDOR_A_ST_M45PE80:
12205                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12206                         break;
12207                 default:
12208                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12209                         break;
12210                 }
12211                 break;
12212         default:
12213                 tg3_flag_set(tp, NO_NVRAM);
12214                 return;
12215         }
12216
12217         tg3_nvram_get_pagesize(tp, nvcfg1);
12218         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12219                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12220 }
12221
12222 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12223 static void __devinit tg3_nvram_init(struct tg3 *tp)
12224 {
12225         tw32_f(GRC_EEPROM_ADDR,
12226              (EEPROM_ADDR_FSM_RESET |
12227               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12228                EEPROM_ADDR_CLKPERD_SHIFT)));
12229
12230         msleep(1);
12231
12232         /* Enable seeprom accesses. */
12233         tw32_f(GRC_LOCAL_CTRL,
12234              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12235         udelay(100);
12236
12237         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12238             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12239                 tg3_flag_set(tp, NVRAM);
12240
12241                 if (tg3_nvram_lock(tp)) {
12242                         netdev_warn(tp->dev,
12243                                     "Cannot get nvram lock, %s failed\n",
12244                                     __func__);
12245                         return;
12246                 }
12247                 tg3_enable_nvram_access(tp);
12248
12249                 tp->nvram_size = 0;
12250
12251                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12252                         tg3_get_5752_nvram_info(tp);
12253                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12254                         tg3_get_5755_nvram_info(tp);
12255                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12256                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12257                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12258                         tg3_get_5787_nvram_info(tp);
12259                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12260                         tg3_get_5761_nvram_info(tp);
12261                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12262                         tg3_get_5906_nvram_info(tp);
12263                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12264                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12265                         tg3_get_57780_nvram_info(tp);
12266                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12267                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12268                         tg3_get_5717_nvram_info(tp);
12269                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12270                         tg3_get_5720_nvram_info(tp);
12271                 else
12272                         tg3_get_nvram_info(tp);
12273
12274                 if (tp->nvram_size == 0)
12275                         tg3_get_nvram_size(tp);
12276
12277                 tg3_disable_nvram_access(tp);
12278                 tg3_nvram_unlock(tp);
12279
12280         } else {
12281                 tg3_flag_clear(tp, NVRAM);
12282                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12283
12284                 tg3_get_eeprom_size(tp);
12285         }
12286 }
12287
12288 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12289                                     u32 offset, u32 len, u8 *buf)
12290 {
12291         int i, j, rc = 0;
12292         u32 val;
12293
12294         for (i = 0; i < len; i += 4) {
12295                 u32 addr;
12296                 __be32 data;
12297
12298                 addr = offset + i;
12299
12300                 memcpy(&data, buf + i, 4);
12301
12302                 /*
12303                  * The SEEPROM interface expects the data to always be opposite
12304                  * the native endian format.  We accomplish this by reversing
12305                  * all the operations that would have been performed on the
12306                  * data from a call to tg3_nvram_read_be32().
12307                  */
12308                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12309
12310                 val = tr32(GRC_EEPROM_ADDR);
12311                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12312
12313                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12314                         EEPROM_ADDR_READ);
12315                 tw32(GRC_EEPROM_ADDR, val |
12316                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12317                         (addr & EEPROM_ADDR_ADDR_MASK) |
12318                         EEPROM_ADDR_START |
12319                         EEPROM_ADDR_WRITE);
12320
12321                 for (j = 0; j < 1000; j++) {
12322                         val = tr32(GRC_EEPROM_ADDR);
12323
12324                         if (val & EEPROM_ADDR_COMPLETE)
12325                                 break;
12326                         msleep(1);
12327                 }
12328                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12329                         rc = -EBUSY;
12330                         break;
12331                 }
12332         }
12333
12334         return rc;
12335 }
12336
12337 /* offset and length are dword aligned */
12338 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12339                 u8 *buf)
12340 {
12341         int ret = 0;
12342         u32 pagesize = tp->nvram_pagesize;
12343         u32 pagemask = pagesize - 1;
12344         u32 nvram_cmd;
12345         u8 *tmp;
12346
12347         tmp = kmalloc(pagesize, GFP_KERNEL);
12348         if (tmp == NULL)
12349                 return -ENOMEM;
12350
12351         while (len) {
12352                 int j;
12353                 u32 phy_addr, page_off, size;
12354
12355                 phy_addr = offset & ~pagemask;
12356
12357                 for (j = 0; j < pagesize; j += 4) {
12358                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12359                                                   (__be32 *) (tmp + j));
12360                         if (ret)
12361                                 break;
12362                 }
12363                 if (ret)
12364                         break;
12365
12366                 page_off = offset & pagemask;
12367                 size = pagesize;
12368                 if (len < size)
12369                         size = len;
12370
12371                 len -= size;
12372
12373                 memcpy(tmp + page_off, buf, size);
12374
12375                 offset = offset + (pagesize - page_off);
12376
12377                 tg3_enable_nvram_access(tp);
12378
12379                 /*
12380                  * Before we can erase the flash page, we need
12381                  * to issue a special "write enable" command.
12382                  */
12383                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12384
12385                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12386                         break;
12387
12388                 /* Erase the target page */
12389                 tw32(NVRAM_ADDR, phy_addr);
12390
12391                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12392                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12393
12394                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12395                         break;
12396
12397                 /* Issue another write enable to start the write. */
12398                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12399
12400                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12401                         break;
12402
12403                 for (j = 0; j < pagesize; j += 4) {
12404                         __be32 data;
12405
12406                         data = *((__be32 *) (tmp + j));
12407
12408                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12409
12410                         tw32(NVRAM_ADDR, phy_addr + j);
12411
12412                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12413                                 NVRAM_CMD_WR;
12414
12415                         if (j == 0)
12416                                 nvram_cmd |= NVRAM_CMD_FIRST;
12417                         else if (j == (pagesize - 4))
12418                                 nvram_cmd |= NVRAM_CMD_LAST;
12419
12420                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12421                                 break;
12422                 }
12423                 if (ret)
12424                         break;
12425         }
12426
12427         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12428         tg3_nvram_exec_cmd(tp, nvram_cmd);
12429
12430         kfree(tmp);
12431
12432         return ret;
12433 }
12434
12435 /* offset and length are dword aligned */
12436 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12437                 u8 *buf)
12438 {
12439         int i, ret = 0;
12440
12441         for (i = 0; i < len; i += 4, offset += 4) {
12442                 u32 page_off, phy_addr, nvram_cmd;
12443                 __be32 data;
12444
12445                 memcpy(&data, buf + i, 4);
12446                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12447
12448                 page_off = offset % tp->nvram_pagesize;
12449
12450                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12451
12452                 tw32(NVRAM_ADDR, phy_addr);
12453
12454                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12455
12456                 if (page_off == 0 || i == 0)
12457                         nvram_cmd |= NVRAM_CMD_FIRST;
12458                 if (page_off == (tp->nvram_pagesize - 4))
12459                         nvram_cmd |= NVRAM_CMD_LAST;
12460
12461                 if (i == (len - 4))
12462                         nvram_cmd |= NVRAM_CMD_LAST;
12463
12464                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12465                     !tg3_flag(tp, 5755_PLUS) &&
12466                     (tp->nvram_jedecnum == JEDEC_ST) &&
12467                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12468
12469                         if ((ret = tg3_nvram_exec_cmd(tp,
12470                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12471                                 NVRAM_CMD_DONE)))
12472
12473                                 break;
12474                 }
12475                 if (!tg3_flag(tp, FLASH)) {
12476                         /* We always do complete word writes to eeprom. */
12477                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12478                 }
12479
12480                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12481                         break;
12482         }
12483         return ret;
12484 }
12485
12486 /* offset and length are dword aligned */
12487 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12488 {
12489         int ret;
12490
12491         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12492                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12493                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12494                 udelay(40);
12495         }
12496
12497         if (!tg3_flag(tp, NVRAM)) {
12498                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12499         } else {
12500                 u32 grc_mode;
12501
12502                 ret = tg3_nvram_lock(tp);
12503                 if (ret)
12504                         return ret;
12505
12506                 tg3_enable_nvram_access(tp);
12507                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12508                         tw32(NVRAM_WRITE1, 0x406);
12509
12510                 grc_mode = tr32(GRC_MODE);
12511                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12512
12513                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12514                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12515                                 buf);
12516                 } else {
12517                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12518                                 buf);
12519                 }
12520
12521                 grc_mode = tr32(GRC_MODE);
12522                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12523
12524                 tg3_disable_nvram_access(tp);
12525                 tg3_nvram_unlock(tp);
12526         }
12527
12528         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12529                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12530                 udelay(40);
12531         }
12532
12533         return ret;
12534 }
12535
12536 struct subsys_tbl_ent {
12537         u16 subsys_vendor, subsys_devid;
12538         u32 phy_id;
12539 };
12540
12541 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12542         /* Broadcom boards. */
12543         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12544           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12545         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12546           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12547         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12548           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12549         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12550           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12551         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12552           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12553         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12554           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12555         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12556           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12557         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12558           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12559         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12560           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12561         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12562           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12563         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12564           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12565
12566         /* 3com boards. */
12567         { TG3PCI_SUBVENDOR_ID_3COM,
12568           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12569         { TG3PCI_SUBVENDOR_ID_3COM,
12570           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12571         { TG3PCI_SUBVENDOR_ID_3COM,
12572           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12573         { TG3PCI_SUBVENDOR_ID_3COM,
12574           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12575         { TG3PCI_SUBVENDOR_ID_3COM,
12576           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12577
12578         /* DELL boards. */
12579         { TG3PCI_SUBVENDOR_ID_DELL,
12580           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12581         { TG3PCI_SUBVENDOR_ID_DELL,
12582           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12583         { TG3PCI_SUBVENDOR_ID_DELL,
12584           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12585         { TG3PCI_SUBVENDOR_ID_DELL,
12586           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12587
12588         /* Compaq boards. */
12589         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12590           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12591         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12592           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12593         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12594           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12595         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12596           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12597         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12598           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12599
12600         /* IBM boards. */
12601         { TG3PCI_SUBVENDOR_ID_IBM,
12602           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12603 };
12604
12605 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12606 {
12607         int i;
12608
12609         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12610                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12611                      tp->pdev->subsystem_vendor) &&
12612                     (subsys_id_to_phy_id[i].subsys_devid ==
12613                      tp->pdev->subsystem_device))
12614                         return &subsys_id_to_phy_id[i];
12615         }
12616         return NULL;
12617 }
12618
12619 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12620 {
12621         u32 val;
12622         u16 pmcsr;
12623
12624         /* On some early chips the SRAM cannot be accessed in D3hot state,
12625          * so need make sure we're in D0.
12626          */
12627         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12628         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12629         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12630         msleep(1);
12631
12632         /* Make sure register accesses (indirect or otherwise)
12633          * will function correctly.
12634          */
12635         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12636                                tp->misc_host_ctrl);
12637
12638         /* The memory arbiter has to be enabled in order for SRAM accesses
12639          * to succeed.  Normally on powerup the tg3 chip firmware will make
12640          * sure it is enabled, but other entities such as system netboot
12641          * code might disable it.
12642          */
12643         val = tr32(MEMARB_MODE);
12644         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12645
12646         tp->phy_id = TG3_PHY_ID_INVALID;
12647         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12648
12649         /* Assume an onboard device and WOL capable by default.  */
12650         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12651         tg3_flag_set(tp, WOL_CAP);
12652
12653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12654                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12655                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12656                         tg3_flag_set(tp, IS_NIC);
12657                 }
12658                 val = tr32(VCPU_CFGSHDW);
12659                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12660                         tg3_flag_set(tp, ASPM_WORKAROUND);
12661                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12662                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12663                         tg3_flag_set(tp, WOL_ENABLE);
12664                         device_set_wakeup_enable(&tp->pdev->dev, true);
12665                 }
12666                 goto done;
12667         }
12668
12669         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12670         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12671                 u32 nic_cfg, led_cfg;
12672                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12673                 int eeprom_phy_serdes = 0;
12674
12675                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12676                 tp->nic_sram_data_cfg = nic_cfg;
12677
12678                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12679                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12680                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12681                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12682                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12683                     (ver > 0) && (ver < 0x100))
12684                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12685
12686                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12687                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12688
12689                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12690                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12691                         eeprom_phy_serdes = 1;
12692
12693                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12694                 if (nic_phy_id != 0) {
12695                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12696                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12697
12698                         eeprom_phy_id  = (id1 >> 16) << 10;
12699                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12700                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12701                 } else
12702                         eeprom_phy_id = 0;
12703
12704                 tp->phy_id = eeprom_phy_id;
12705                 if (eeprom_phy_serdes) {
12706                         if (!tg3_flag(tp, 5705_PLUS))
12707                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12708                         else
12709                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12710                 }
12711
12712                 if (tg3_flag(tp, 5750_PLUS))
12713                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12714                                     SHASTA_EXT_LED_MODE_MASK);
12715                 else
12716                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12717
12718                 switch (led_cfg) {
12719                 default:
12720                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12721                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12722                         break;
12723
12724                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12725                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12726                         break;
12727
12728                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12729                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12730
12731                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12732                          * read on some older 5700/5701 bootcode.
12733                          */
12734                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12735                             ASIC_REV_5700 ||
12736                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12737                             ASIC_REV_5701)
12738                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12739
12740                         break;
12741
12742                 case SHASTA_EXT_LED_SHARED:
12743                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12744                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12745                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12746                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12747                                                  LED_CTRL_MODE_PHY_2);
12748                         break;
12749
12750                 case SHASTA_EXT_LED_MAC:
12751                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12752                         break;
12753
12754                 case SHASTA_EXT_LED_COMBO:
12755                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12756                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12757                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12758                                                  LED_CTRL_MODE_PHY_2);
12759                         break;
12760
12761                 }
12762
12763                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12764                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12765                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12766                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12767
12768                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12769                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12770
12771                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12772                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12773                         if ((tp->pdev->subsystem_vendor ==
12774                              PCI_VENDOR_ID_ARIMA) &&
12775                             (tp->pdev->subsystem_device == 0x205a ||
12776                              tp->pdev->subsystem_device == 0x2063))
12777                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12778                 } else {
12779                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12780                         tg3_flag_set(tp, IS_NIC);
12781                 }
12782
12783                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12784                         tg3_flag_set(tp, ENABLE_ASF);
12785                         if (tg3_flag(tp, 5750_PLUS))
12786                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12787                 }
12788
12789                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12790                     tg3_flag(tp, 5750_PLUS))
12791                         tg3_flag_set(tp, ENABLE_APE);
12792
12793                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12794                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12795                         tg3_flag_clear(tp, WOL_CAP);
12796
12797                 if (tg3_flag(tp, WOL_CAP) &&
12798                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12799                         tg3_flag_set(tp, WOL_ENABLE);
12800                         device_set_wakeup_enable(&tp->pdev->dev, true);
12801                 }
12802
12803                 if (cfg2 & (1 << 17))
12804                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12805
12806                 /* serdes signal pre-emphasis in register 0x590 set by */
12807                 /* bootcode if bit 18 is set */
12808                 if (cfg2 & (1 << 18))
12809                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12810
12811                 if ((tg3_flag(tp, 57765_PLUS) ||
12812                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12813                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12814                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12815                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12816
12817                 if (tg3_flag(tp, PCI_EXPRESS) &&
12818                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12819                     !tg3_flag(tp, 57765_PLUS)) {
12820                         u32 cfg3;
12821
12822                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12823                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12824                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12825                 }
12826
12827                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12828                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12829                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12830                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12831                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12832                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12833         }
12834 done:
12835         if (tg3_flag(tp, WOL_CAP))
12836                 device_set_wakeup_enable(&tp->pdev->dev,
12837                                          tg3_flag(tp, WOL_ENABLE));
12838         else
12839                 device_set_wakeup_capable(&tp->pdev->dev, false);
12840 }
12841
12842 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12843 {
12844         int i;
12845         u32 val;
12846
12847         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12848         tw32(OTP_CTRL, cmd);
12849
12850         /* Wait for up to 1 ms for command to execute. */
12851         for (i = 0; i < 100; i++) {
12852                 val = tr32(OTP_STATUS);
12853                 if (val & OTP_STATUS_CMD_DONE)
12854                         break;
12855                 udelay(10);
12856         }
12857
12858         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12859 }
12860
12861 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12862  * configuration is a 32-bit value that straddles the alignment boundary.
12863  * We do two 32-bit reads and then shift and merge the results.
12864  */
12865 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12866 {
12867         u32 bhalf_otp, thalf_otp;
12868
12869         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12870
12871         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12872                 return 0;
12873
12874         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12875
12876         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12877                 return 0;
12878
12879         thalf_otp = tr32(OTP_READ_DATA);
12880
12881         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12882
12883         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12884                 return 0;
12885
12886         bhalf_otp = tr32(OTP_READ_DATA);
12887
12888         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12889 }
12890
12891 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12892 {
12893         u32 adv = ADVERTISED_Autoneg |
12894                   ADVERTISED_Pause;
12895
12896         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12897                 adv |= ADVERTISED_1000baseT_Half |
12898                        ADVERTISED_1000baseT_Full;
12899
12900         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12901                 adv |= ADVERTISED_100baseT_Half |
12902                        ADVERTISED_100baseT_Full |
12903                        ADVERTISED_10baseT_Half |
12904                        ADVERTISED_10baseT_Full |
12905                        ADVERTISED_TP;
12906         else
12907                 adv |= ADVERTISED_FIBRE;
12908
12909         tp->link_config.advertising = adv;
12910         tp->link_config.speed = SPEED_INVALID;
12911         tp->link_config.duplex = DUPLEX_INVALID;
12912         tp->link_config.autoneg = AUTONEG_ENABLE;
12913         tp->link_config.active_speed = SPEED_INVALID;
12914         tp->link_config.active_duplex = DUPLEX_INVALID;
12915         tp->link_config.orig_speed = SPEED_INVALID;
12916         tp->link_config.orig_duplex = DUPLEX_INVALID;
12917         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12918 }
12919
12920 static int __devinit tg3_phy_probe(struct tg3 *tp)
12921 {
12922         u32 hw_phy_id_1, hw_phy_id_2;
12923         u32 hw_phy_id, hw_phy_id_masked;
12924         int err;
12925
12926         /* flow control autonegotiation is default behavior */
12927         tg3_flag_set(tp, PAUSE_AUTONEG);
12928         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12929
12930         if (tg3_flag(tp, USE_PHYLIB))
12931                 return tg3_phy_init(tp);
12932
12933         /* Reading the PHY ID register can conflict with ASF
12934          * firmware access to the PHY hardware.
12935          */
12936         err = 0;
12937         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12938                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12939         } else {
12940                 /* Now read the physical PHY_ID from the chip and verify
12941                  * that it is sane.  If it doesn't look good, we fall back
12942                  * to either the hard-coded table based PHY_ID and failing
12943                  * that the value found in the eeprom area.
12944                  */
12945                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12946                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12947
12948                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12949                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12950                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12951
12952                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12953         }
12954
12955         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12956                 tp->phy_id = hw_phy_id;
12957                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12958                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12959                 else
12960                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12961         } else {
12962                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12963                         /* Do nothing, phy ID already set up in
12964                          * tg3_get_eeprom_hw_cfg().
12965                          */
12966                 } else {
12967                         struct subsys_tbl_ent *p;
12968
12969                         /* No eeprom signature?  Try the hardcoded
12970                          * subsys device table.
12971                          */
12972                         p = tg3_lookup_by_subsys(tp);
12973                         if (!p)
12974                                 return -ENODEV;
12975
12976                         tp->phy_id = p->phy_id;
12977                         if (!tp->phy_id ||
12978                             tp->phy_id == TG3_PHY_ID_BCM8002)
12979                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12980                 }
12981         }
12982
12983         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12984             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12985               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12986              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12987               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12988                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12989
12990         tg3_phy_init_link_config(tp);
12991
12992         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12993             !tg3_flag(tp, ENABLE_APE) &&
12994             !tg3_flag(tp, ENABLE_ASF)) {
12995                 u32 bmsr, mask;
12996
12997                 tg3_readphy(tp, MII_BMSR, &bmsr);
12998                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12999                     (bmsr & BMSR_LSTATUS))
13000                         goto skip_phy_reset;
13001
13002                 err = tg3_phy_reset(tp);
13003                 if (err)
13004                         return err;
13005
13006                 tg3_phy_set_wirespeed(tp);
13007
13008                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13009                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13010                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13011                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13012                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13013                                             tp->link_config.flowctrl);
13014
13015                         tg3_writephy(tp, MII_BMCR,
13016                                      BMCR_ANENABLE | BMCR_ANRESTART);
13017                 }
13018         }
13019
13020 skip_phy_reset:
13021         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13022                 err = tg3_init_5401phy_dsp(tp);
13023                 if (err)
13024                         return err;
13025
13026                 err = tg3_init_5401phy_dsp(tp);
13027         }
13028
13029         return err;
13030 }
13031
13032 static void __devinit tg3_read_vpd(struct tg3 *tp)
13033 {
13034         u8 *vpd_data;
13035         unsigned int block_end, rosize, len;
13036         int j, i = 0;
13037
13038         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13039         if (!vpd_data)
13040                 goto out_no_vpd;
13041
13042         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13043                              PCI_VPD_LRDT_RO_DATA);
13044         if (i < 0)
13045                 goto out_not_found;
13046
13047         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13048         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13049         i += PCI_VPD_LRDT_TAG_SIZE;
13050
13051         if (block_end > TG3_NVM_VPD_LEN)
13052                 goto out_not_found;
13053
13054         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13055                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13056         if (j > 0) {
13057                 len = pci_vpd_info_field_size(&vpd_data[j]);
13058
13059                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13060                 if (j + len > block_end || len != 4 ||
13061                     memcmp(&vpd_data[j], "1028", 4))
13062                         goto partno;
13063
13064                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13065                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13066                 if (j < 0)
13067                         goto partno;
13068
13069                 len = pci_vpd_info_field_size(&vpd_data[j]);
13070
13071                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13072                 if (j + len > block_end)
13073                         goto partno;
13074
13075                 memcpy(tp->fw_ver, &vpd_data[j], len);
13076                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13077         }
13078
13079 partno:
13080         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13081                                       PCI_VPD_RO_KEYWORD_PARTNO);
13082         if (i < 0)
13083                 goto out_not_found;
13084
13085         len = pci_vpd_info_field_size(&vpd_data[i]);
13086
13087         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13088         if (len > TG3_BPN_SIZE ||
13089             (len + i) > TG3_NVM_VPD_LEN)
13090                 goto out_not_found;
13091
13092         memcpy(tp->board_part_number, &vpd_data[i], len);
13093
13094 out_not_found:
13095         kfree(vpd_data);
13096         if (tp->board_part_number[0])
13097                 return;
13098
13099 out_no_vpd:
13100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13101                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13102                         strcpy(tp->board_part_number, "BCM5717");
13103                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13104                         strcpy(tp->board_part_number, "BCM5718");
13105                 else
13106                         goto nomatch;
13107         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13108                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13109                         strcpy(tp->board_part_number, "BCM57780");
13110                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13111                         strcpy(tp->board_part_number, "BCM57760");
13112                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13113                         strcpy(tp->board_part_number, "BCM57790");
13114                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13115                         strcpy(tp->board_part_number, "BCM57788");
13116                 else
13117                         goto nomatch;
13118         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13119                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13120                         strcpy(tp->board_part_number, "BCM57761");
13121                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13122                         strcpy(tp->board_part_number, "BCM57765");
13123                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13124                         strcpy(tp->board_part_number, "BCM57781");
13125                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13126                         strcpy(tp->board_part_number, "BCM57785");
13127                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13128                         strcpy(tp->board_part_number, "BCM57791");
13129                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13130                         strcpy(tp->board_part_number, "BCM57795");
13131                 else
13132                         goto nomatch;
13133         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13134                 strcpy(tp->board_part_number, "BCM95906");
13135         } else {
13136 nomatch:
13137                 strcpy(tp->board_part_number, "none");
13138         }
13139 }
13140
13141 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13142 {
13143         u32 val;
13144
13145         if (tg3_nvram_read(tp, offset, &val) ||
13146             (val & 0xfc000000) != 0x0c000000 ||
13147             tg3_nvram_read(tp, offset + 4, &val) ||
13148             val != 0)
13149                 return 0;
13150
13151         return 1;
13152 }
13153
13154 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13155 {
13156         u32 val, offset, start, ver_offset;
13157         int i, dst_off;
13158         bool newver = false;
13159
13160         if (tg3_nvram_read(tp, 0xc, &offset) ||
13161             tg3_nvram_read(tp, 0x4, &start))
13162                 return;
13163
13164         offset = tg3_nvram_logical_addr(tp, offset);
13165
13166         if (tg3_nvram_read(tp, offset, &val))
13167                 return;
13168
13169         if ((val & 0xfc000000) == 0x0c000000) {
13170                 if (tg3_nvram_read(tp, offset + 4, &val))
13171                         return;
13172
13173                 if (val == 0)
13174                         newver = true;
13175         }
13176
13177         dst_off = strlen(tp->fw_ver);
13178
13179         if (newver) {
13180                 if (TG3_VER_SIZE - dst_off < 16 ||
13181                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13182                         return;
13183
13184                 offset = offset + ver_offset - start;
13185                 for (i = 0; i < 16; i += 4) {
13186                         __be32 v;
13187                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13188                                 return;
13189
13190                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13191                 }
13192         } else {
13193                 u32 major, minor;
13194
13195                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13196                         return;
13197
13198                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13199                         TG3_NVM_BCVER_MAJSFT;
13200                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13201                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13202                          "v%d.%02d", major, minor);
13203         }
13204 }
13205
13206 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13207 {
13208         u32 val, major, minor;
13209
13210         /* Use native endian representation */
13211         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13212                 return;
13213
13214         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13215                 TG3_NVM_HWSB_CFG1_MAJSFT;
13216         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13217                 TG3_NVM_HWSB_CFG1_MINSFT;
13218
13219         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13220 }
13221
13222 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13223 {
13224         u32 offset, major, minor, build;
13225
13226         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13227
13228         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13229                 return;
13230
13231         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13232         case TG3_EEPROM_SB_REVISION_0:
13233                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13234                 break;
13235         case TG3_EEPROM_SB_REVISION_2:
13236                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13237                 break;
13238         case TG3_EEPROM_SB_REVISION_3:
13239                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13240                 break;
13241         case TG3_EEPROM_SB_REVISION_4:
13242                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13243                 break;
13244         case TG3_EEPROM_SB_REVISION_5:
13245                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13246                 break;
13247         case TG3_EEPROM_SB_REVISION_6:
13248                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13249                 break;
13250         default:
13251                 return;
13252         }
13253
13254         if (tg3_nvram_read(tp, offset, &val))
13255                 return;
13256
13257         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13258                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13259         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13260                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13261         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13262
13263         if (minor > 99 || build > 26)
13264                 return;
13265
13266         offset = strlen(tp->fw_ver);
13267         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13268                  " v%d.%02d", major, minor);
13269
13270         if (build > 0) {
13271                 offset = strlen(tp->fw_ver);
13272                 if (offset < TG3_VER_SIZE - 1)
13273                         tp->fw_ver[offset] = 'a' + build - 1;
13274         }
13275 }
13276
13277 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13278 {
13279         u32 val, offset, start;
13280         int i, vlen;
13281
13282         for (offset = TG3_NVM_DIR_START;
13283              offset < TG3_NVM_DIR_END;
13284              offset += TG3_NVM_DIRENT_SIZE) {
13285                 if (tg3_nvram_read(tp, offset, &val))
13286                         return;
13287
13288                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13289                         break;
13290         }
13291
13292         if (offset == TG3_NVM_DIR_END)
13293                 return;
13294
13295         if (!tg3_flag(tp, 5705_PLUS))
13296                 start = 0x08000000;
13297         else if (tg3_nvram_read(tp, offset - 4, &start))
13298                 return;
13299
13300         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13301             !tg3_fw_img_is_valid(tp, offset) ||
13302             tg3_nvram_read(tp, offset + 8, &val))
13303                 return;
13304
13305         offset += val - start;
13306
13307         vlen = strlen(tp->fw_ver);
13308
13309         tp->fw_ver[vlen++] = ',';
13310         tp->fw_ver[vlen++] = ' ';
13311
13312         for (i = 0; i < 4; i++) {
13313                 __be32 v;
13314                 if (tg3_nvram_read_be32(tp, offset, &v))
13315                         return;
13316
13317                 offset += sizeof(v);
13318
13319                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13320                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13321                         break;
13322                 }
13323
13324                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13325                 vlen += sizeof(v);
13326         }
13327 }
13328
13329 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13330 {
13331         int vlen;
13332         u32 apedata;
13333         char *fwtype;
13334
13335         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13336                 return;
13337
13338         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13339         if (apedata != APE_SEG_SIG_MAGIC)
13340                 return;
13341
13342         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13343         if (!(apedata & APE_FW_STATUS_READY))
13344                 return;
13345
13346         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13347
13348         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13349                 tg3_flag_set(tp, APE_HAS_NCSI);
13350                 fwtype = "NCSI";
13351         } else {
13352                 fwtype = "DASH";
13353         }
13354
13355         vlen = strlen(tp->fw_ver);
13356
13357         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13358                  fwtype,
13359                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13360                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13361                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13362                  (apedata & APE_FW_VERSION_BLDMSK));
13363 }
13364
13365 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13366 {
13367         u32 val;
13368         bool vpd_vers = false;
13369
13370         if (tp->fw_ver[0] != 0)
13371                 vpd_vers = true;
13372
13373         if (tg3_flag(tp, NO_NVRAM)) {
13374                 strcat(tp->fw_ver, "sb");
13375                 return;
13376         }
13377
13378         if (tg3_nvram_read(tp, 0, &val))
13379                 return;
13380
13381         if (val == TG3_EEPROM_MAGIC)
13382                 tg3_read_bc_ver(tp);
13383         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13384                 tg3_read_sb_ver(tp, val);
13385         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13386                 tg3_read_hwsb_ver(tp);
13387         else
13388                 return;
13389
13390         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13391                 goto done;
13392
13393         tg3_read_mgmtfw_ver(tp);
13394
13395 done:
13396         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13397 }
13398
13399 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13400
13401 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13402 {
13403         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13404                 return TG3_RX_RET_MAX_SIZE_5717;
13405         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13406                 return TG3_RX_RET_MAX_SIZE_5700;
13407         else
13408                 return TG3_RX_RET_MAX_SIZE_5705;
13409 }
13410
13411 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13412         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13413         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13414         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13415         { },
13416 };
13417
13418 static int __devinit tg3_get_invariants(struct tg3 *tp)
13419 {
13420         u32 misc_ctrl_reg;
13421         u32 pci_state_reg, grc_misc_cfg;
13422         u32 val;
13423         u16 pci_cmd;
13424         int err;
13425
13426         /* Force memory write invalidate off.  If we leave it on,
13427          * then on 5700_BX chips we have to enable a workaround.
13428          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13429          * to match the cacheline size.  The Broadcom driver have this
13430          * workaround but turns MWI off all the times so never uses
13431          * it.  This seems to suggest that the workaround is insufficient.
13432          */
13433         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13434         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13435         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13436
13437         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13438          * has the register indirect write enable bit set before
13439          * we try to access any of the MMIO registers.  It is also
13440          * critical that the PCI-X hw workaround situation is decided
13441          * before that as well.
13442          */
13443         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13444                               &misc_ctrl_reg);
13445
13446         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13447                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13449                 u32 prod_id_asic_rev;
13450
13451                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13452                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13453                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13454                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13455                         pci_read_config_dword(tp->pdev,
13456                                               TG3PCI_GEN2_PRODID_ASICREV,
13457                                               &prod_id_asic_rev);
13458                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13459                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13460                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13461                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13462                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13463                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13464                         pci_read_config_dword(tp->pdev,
13465                                               TG3PCI_GEN15_PRODID_ASICREV,
13466                                               &prod_id_asic_rev);
13467                 else
13468                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13469                                               &prod_id_asic_rev);
13470
13471                 tp->pci_chip_rev_id = prod_id_asic_rev;
13472         }
13473
13474         /* Wrong chip ID in 5752 A0. This code can be removed later
13475          * as A0 is not in production.
13476          */
13477         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13478                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13479
13480         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13481          * we need to disable memory and use config. cycles
13482          * only to access all registers. The 5702/03 chips
13483          * can mistakenly decode the special cycles from the
13484          * ICH chipsets as memory write cycles, causing corruption
13485          * of register and memory space. Only certain ICH bridges
13486          * will drive special cycles with non-zero data during the
13487          * address phase which can fall within the 5703's address
13488          * range. This is not an ICH bug as the PCI spec allows
13489          * non-zero address during special cycles. However, only
13490          * these ICH bridges are known to drive non-zero addresses
13491          * during special cycles.
13492          *
13493          * Since special cycles do not cross PCI bridges, we only
13494          * enable this workaround if the 5703 is on the secondary
13495          * bus of these ICH bridges.
13496          */
13497         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13498             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13499                 static struct tg3_dev_id {
13500                         u32     vendor;
13501                         u32     device;
13502                         u32     rev;
13503                 } ich_chipsets[] = {
13504                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13505                           PCI_ANY_ID },
13506                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13507                           PCI_ANY_ID },
13508                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13509                           0xa },
13510                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13511                           PCI_ANY_ID },
13512                         { },
13513                 };
13514                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13515                 struct pci_dev *bridge = NULL;
13516
13517                 while (pci_id->vendor != 0) {
13518                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13519                                                 bridge);
13520                         if (!bridge) {
13521                                 pci_id++;
13522                                 continue;
13523                         }
13524                         if (pci_id->rev != PCI_ANY_ID) {
13525                                 if (bridge->revision > pci_id->rev)
13526                                         continue;
13527                         }
13528                         if (bridge->subordinate &&
13529                             (bridge->subordinate->number ==
13530                              tp->pdev->bus->number)) {
13531                                 tg3_flag_set(tp, ICH_WORKAROUND);
13532                                 pci_dev_put(bridge);
13533                                 break;
13534                         }
13535                 }
13536         }
13537
13538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13539                 static struct tg3_dev_id {
13540                         u32     vendor;
13541                         u32     device;
13542                 } bridge_chipsets[] = {
13543                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13544                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13545                         { },
13546                 };
13547                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13548                 struct pci_dev *bridge = NULL;
13549
13550                 while (pci_id->vendor != 0) {
13551                         bridge = pci_get_device(pci_id->vendor,
13552                                                 pci_id->device,
13553                                                 bridge);
13554                         if (!bridge) {
13555                                 pci_id++;
13556                                 continue;
13557                         }
13558                         if (bridge->subordinate &&
13559                             (bridge->subordinate->number <=
13560                              tp->pdev->bus->number) &&
13561                             (bridge->subordinate->subordinate >=
13562                              tp->pdev->bus->number)) {
13563                                 tg3_flag_set(tp, 5701_DMA_BUG);
13564                                 pci_dev_put(bridge);
13565                                 break;
13566                         }
13567                 }
13568         }
13569
13570         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13571          * DMA addresses > 40-bit. This bridge may have other additional
13572          * 57xx devices behind it in some 4-port NIC designs for example.
13573          * Any tg3 device found behind the bridge will also need the 40-bit
13574          * DMA workaround.
13575          */
13576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13577             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13578                 tg3_flag_set(tp, 5780_CLASS);
13579                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13580                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13581         } else {
13582                 struct pci_dev *bridge = NULL;
13583
13584                 do {
13585                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13586                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13587                                                 bridge);
13588                         if (bridge && bridge->subordinate &&
13589                             (bridge->subordinate->number <=
13590                              tp->pdev->bus->number) &&
13591                             (bridge->subordinate->subordinate >=
13592                              tp->pdev->bus->number)) {
13593                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13594                                 pci_dev_put(bridge);
13595                                 break;
13596                         }
13597                 } while (bridge);
13598         }
13599
13600         /* Initialize misc host control in PCI block. */
13601         tp->misc_host_ctrl |= (misc_ctrl_reg &
13602                                MISC_HOST_CTRL_CHIPREV);
13603         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13604                                tp->misc_host_ctrl);
13605
13606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13608             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13609             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13610                 tp->pdev_peer = tg3_find_peer(tp);
13611
13612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13614             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13615                 tg3_flag_set(tp, 5717_PLUS);
13616
13617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13618             tg3_flag(tp, 5717_PLUS))
13619                 tg3_flag_set(tp, 57765_PLUS);
13620
13621         /* Intentionally exclude ASIC_REV_5906 */
13622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13624             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13625             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13626             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13627             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13628             tg3_flag(tp, 57765_PLUS))
13629                 tg3_flag_set(tp, 5755_PLUS);
13630
13631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13632             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13633             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13634             tg3_flag(tp, 5755_PLUS) ||
13635             tg3_flag(tp, 5780_CLASS))
13636                 tg3_flag_set(tp, 5750_PLUS);
13637
13638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13639             tg3_flag(tp, 5750_PLUS))
13640                 tg3_flag_set(tp, 5705_PLUS);
13641
13642         /* Determine TSO capabilities */
13643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13644                 ; /* Do nothing. HW bug. */
13645         else if (tg3_flag(tp, 57765_PLUS))
13646                 tg3_flag_set(tp, HW_TSO_3);
13647         else if (tg3_flag(tp, 5755_PLUS) ||
13648                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13649                 tg3_flag_set(tp, HW_TSO_2);
13650         else if (tg3_flag(tp, 5750_PLUS)) {
13651                 tg3_flag_set(tp, HW_TSO_1);
13652                 tg3_flag_set(tp, TSO_BUG);
13653                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13654                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13655                         tg3_flag_clear(tp, TSO_BUG);
13656         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13657                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13658                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13659                         tg3_flag_set(tp, TSO_BUG);
13660                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13661                         tp->fw_needed = FIRMWARE_TG3TSO5;
13662                 else
13663                         tp->fw_needed = FIRMWARE_TG3TSO;
13664         }
13665
13666         /* Selectively allow TSO based on operating conditions */
13667         if (tg3_flag(tp, HW_TSO_1) ||
13668             tg3_flag(tp, HW_TSO_2) ||
13669             tg3_flag(tp, HW_TSO_3) ||
13670             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13671                 tg3_flag_set(tp, TSO_CAPABLE);
13672         else {
13673                 tg3_flag_clear(tp, TSO_CAPABLE);
13674                 tg3_flag_clear(tp, TSO_BUG);
13675                 tp->fw_needed = NULL;
13676         }
13677
13678         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13679                 tp->fw_needed = FIRMWARE_TG3;
13680
13681         tp->irq_max = 1;
13682
13683         if (tg3_flag(tp, 5750_PLUS)) {
13684                 tg3_flag_set(tp, SUPPORT_MSI);
13685                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13686                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13687                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13688                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13689                      tp->pdev_peer == tp->pdev))
13690                         tg3_flag_clear(tp, SUPPORT_MSI);
13691
13692                 if (tg3_flag(tp, 5755_PLUS) ||
13693                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13694                         tg3_flag_set(tp, 1SHOT_MSI);
13695                 }
13696
13697                 if (tg3_flag(tp, 57765_PLUS)) {
13698                         tg3_flag_set(tp, SUPPORT_MSIX);
13699                         tp->irq_max = TG3_IRQ_MAX_VECS;
13700                 }
13701         }
13702
13703         if (tg3_flag(tp, 5755_PLUS))
13704                 tg3_flag_set(tp, SHORT_DMA_BUG);
13705
13706         if (tg3_flag(tp, 5717_PLUS))
13707                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13708
13709         if (tg3_flag(tp, 57765_PLUS) &&
13710             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13711                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13712
13713         if (!tg3_flag(tp, 5705_PLUS) ||
13714             tg3_flag(tp, 5780_CLASS) ||
13715             tg3_flag(tp, USE_JUMBO_BDFLAG))
13716                 tg3_flag_set(tp, JUMBO_CAPABLE);
13717
13718         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13719                               &pci_state_reg);
13720
13721         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13722         if (tp->pcie_cap != 0) {
13723                 u16 lnkctl;
13724
13725                 tg3_flag_set(tp, PCI_EXPRESS);
13726
13727                 tp->pcie_readrq = 4096;
13728                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13729                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13730                         tp->pcie_readrq = 2048;
13731
13732                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13733
13734                 pci_read_config_word(tp->pdev,
13735                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13736                                      &lnkctl);
13737                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13738                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13739                             ASIC_REV_5906) {
13740                                 tg3_flag_clear(tp, HW_TSO_2);
13741                                 tg3_flag_clear(tp, TSO_CAPABLE);
13742                         }
13743                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13744                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13745                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13746                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13747                                 tg3_flag_set(tp, CLKREQ_BUG);
13748                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13749                         tg3_flag_set(tp, L1PLLPD_EN);
13750                 }
13751         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13752                 tg3_flag_set(tp, PCI_EXPRESS);
13753         } else if (!tg3_flag(tp, 5705_PLUS) ||
13754                    tg3_flag(tp, 5780_CLASS)) {
13755                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13756                 if (!tp->pcix_cap) {
13757                         dev_err(&tp->pdev->dev,
13758                                 "Cannot find PCI-X capability, aborting\n");
13759                         return -EIO;
13760                 }
13761
13762                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13763                         tg3_flag_set(tp, PCIX_MODE);
13764         }
13765
13766         /* If we have an AMD 762 or VIA K8T800 chipset, write
13767          * reordering to the mailbox registers done by the host
13768          * controller can cause major troubles.  We read back from
13769          * every mailbox register write to force the writes to be
13770          * posted to the chip in order.
13771          */
13772         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13773             !tg3_flag(tp, PCI_EXPRESS))
13774                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13775
13776         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13777                              &tp->pci_cacheline_sz);
13778         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13779                              &tp->pci_lat_timer);
13780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13781             tp->pci_lat_timer < 64) {
13782                 tp->pci_lat_timer = 64;
13783                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13784                                       tp->pci_lat_timer);
13785         }
13786
13787         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13788                 /* 5700 BX chips need to have their TX producer index
13789                  * mailboxes written twice to workaround a bug.
13790                  */
13791                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13792
13793                 /* If we are in PCI-X mode, enable register write workaround.
13794                  *
13795                  * The workaround is to use indirect register accesses
13796                  * for all chip writes not to mailbox registers.
13797                  */
13798                 if (tg3_flag(tp, PCIX_MODE)) {
13799                         u32 pm_reg;
13800
13801                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13802
13803                         /* The chip can have it's power management PCI config
13804                          * space registers clobbered due to this bug.
13805                          * So explicitly force the chip into D0 here.
13806                          */
13807                         pci_read_config_dword(tp->pdev,
13808                                               tp->pm_cap + PCI_PM_CTRL,
13809                                               &pm_reg);
13810                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13811                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13812                         pci_write_config_dword(tp->pdev,
13813                                                tp->pm_cap + PCI_PM_CTRL,
13814                                                pm_reg);
13815
13816                         /* Also, force SERR#/PERR# in PCI command. */
13817                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13818                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13819                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13820                 }
13821         }
13822
13823         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13824                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13825         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13826                 tg3_flag_set(tp, PCI_32BIT);
13827
13828         /* Chip-specific fixup from Broadcom driver */
13829         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13830             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13831                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13832                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13833         }
13834
13835         /* Default fast path register access methods */
13836         tp->read32 = tg3_read32;
13837         tp->write32 = tg3_write32;
13838         tp->read32_mbox = tg3_read32;
13839         tp->write32_mbox = tg3_write32;
13840         tp->write32_tx_mbox = tg3_write32;
13841         tp->write32_rx_mbox = tg3_write32;
13842
13843         /* Various workaround register access methods */
13844         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13845                 tp->write32 = tg3_write_indirect_reg32;
13846         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13847                  (tg3_flag(tp, PCI_EXPRESS) &&
13848                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13849                 /*
13850                  * Back to back register writes can cause problems on these
13851                  * chips, the workaround is to read back all reg writes
13852                  * except those to mailbox regs.
13853                  *
13854                  * See tg3_write_indirect_reg32().
13855                  */
13856                 tp->write32 = tg3_write_flush_reg32;
13857         }
13858
13859         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13860                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13861                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13862                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13863         }
13864
13865         if (tg3_flag(tp, ICH_WORKAROUND)) {
13866                 tp->read32 = tg3_read_indirect_reg32;
13867                 tp->write32 = tg3_write_indirect_reg32;
13868                 tp->read32_mbox = tg3_read_indirect_mbox;
13869                 tp->write32_mbox = tg3_write_indirect_mbox;
13870                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13871                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13872
13873                 iounmap(tp->regs);
13874                 tp->regs = NULL;
13875
13876                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13877                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13878                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13879         }
13880         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13881                 tp->read32_mbox = tg3_read32_mbox_5906;
13882                 tp->write32_mbox = tg3_write32_mbox_5906;
13883                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13884                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13885         }
13886
13887         if (tp->write32 == tg3_write_indirect_reg32 ||
13888             (tg3_flag(tp, PCIX_MODE) &&
13889              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13890               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13891                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13892
13893         /* Get eeprom hw config before calling tg3_set_power_state().
13894          * In particular, the TG3_FLAG_IS_NIC flag must be
13895          * determined before calling tg3_set_power_state() so that
13896          * we know whether or not to switch out of Vaux power.
13897          * When the flag is set, it means that GPIO1 is used for eeprom
13898          * write protect and also implies that it is a LOM where GPIOs
13899          * are not used to switch power.
13900          */
13901         tg3_get_eeprom_hw_cfg(tp);
13902
13903         if (tg3_flag(tp, ENABLE_APE)) {
13904                 /* Allow reads and writes to the
13905                  * APE register and memory space.
13906                  */
13907                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13908                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13909                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13910                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13911                                        pci_state_reg);
13912         }
13913
13914         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13918             tg3_flag(tp, 57765_PLUS))
13919                 tg3_flag_set(tp, CPMU_PRESENT);
13920
13921         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13922          * GPIO1 driven high will bring 5700's external PHY out of reset.
13923          * It is also used as eeprom write protect on LOMs.
13924          */
13925         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13927             tg3_flag(tp, EEPROM_WRITE_PROT))
13928                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13929                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13930         /* Unused GPIO3 must be driven as output on 5752 because there
13931          * are no pull-up resistors on unused GPIO pins.
13932          */
13933         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13934                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13935
13936         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13938             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13939                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13940
13941         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13942             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13943                 /* Turn off the debug UART. */
13944                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13945                 if (tg3_flag(tp, IS_NIC))
13946                         /* Keep VMain power. */
13947                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13948                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13949         }
13950
13951         /* Force the chip into D0. */
13952         err = tg3_power_up(tp);
13953         if (err) {
13954                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13955                 return err;
13956         }
13957
13958         /* Derive initial jumbo mode from MTU assigned in
13959          * ether_setup() via the alloc_etherdev() call
13960          */
13961         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13962                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13963
13964         /* Determine WakeOnLan speed to use. */
13965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13966             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13967             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13968             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13969                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13970         } else {
13971                 tg3_flag_set(tp, WOL_SPEED_100MB);
13972         }
13973
13974         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13975                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13976
13977         /* A few boards don't want Ethernet@WireSpeed phy feature */
13978         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13979             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13980              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13981              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13982             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13983             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13984                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13985
13986         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13987             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13988                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13989         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13990                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13991
13992         if (tg3_flag(tp, 5705_PLUS) &&
13993             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13994             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13995             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13996             !tg3_flag(tp, 57765_PLUS)) {
13997                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13998                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13999                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14000                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14001                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14002                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14003                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14004                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14005                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14006                 } else
14007                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14008         }
14009
14010         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14011             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14012                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14013                 if (tp->phy_otp == 0)
14014                         tp->phy_otp = TG3_OTP_DEFAULT;
14015         }
14016
14017         if (tg3_flag(tp, CPMU_PRESENT))
14018                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14019         else
14020                 tp->mi_mode = MAC_MI_MODE_BASE;
14021
14022         tp->coalesce_mode = 0;
14023         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14024             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14025                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14026
14027         /* Set these bits to enable statistics workaround. */
14028         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14029             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14030             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14031                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14032                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14033         }
14034
14035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14037                 tg3_flag_set(tp, USE_PHYLIB);
14038
14039         err = tg3_mdio_init(tp);
14040         if (err)
14041                 return err;
14042
14043         /* Initialize data/descriptor byte/word swapping. */
14044         val = tr32(GRC_MODE);
14045         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14046                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14047                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14048                         GRC_MODE_B2HRX_ENABLE |
14049                         GRC_MODE_HTX2B_ENABLE |
14050                         GRC_MODE_HOST_STACKUP);
14051         else
14052                 val &= GRC_MODE_HOST_STACKUP;
14053
14054         tw32(GRC_MODE, val | tp->grc_mode);
14055
14056         tg3_switch_clocks(tp);
14057
14058         /* Clear this out for sanity. */
14059         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14060
14061         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14062                               &pci_state_reg);
14063         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14064             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14065                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14066
14067                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14068                     chiprevid == CHIPREV_ID_5701_B0 ||
14069                     chiprevid == CHIPREV_ID_5701_B2 ||
14070                     chiprevid == CHIPREV_ID_5701_B5) {
14071                         void __iomem *sram_base;
14072
14073                         /* Write some dummy words into the SRAM status block
14074                          * area, see if it reads back correctly.  If the return
14075                          * value is bad, force enable the PCIX workaround.
14076                          */
14077                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14078
14079                         writel(0x00000000, sram_base);
14080                         writel(0x00000000, sram_base + 4);
14081                         writel(0xffffffff, sram_base + 4);
14082                         if (readl(sram_base) != 0x00000000)
14083                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14084                 }
14085         }
14086
14087         udelay(50);
14088         tg3_nvram_init(tp);
14089
14090         grc_misc_cfg = tr32(GRC_MISC_CFG);
14091         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14092
14093         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14094             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14095              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14096                 tg3_flag_set(tp, IS_5788);
14097
14098         if (!tg3_flag(tp, IS_5788) &&
14099             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14100                 tg3_flag_set(tp, TAGGED_STATUS);
14101         if (tg3_flag(tp, TAGGED_STATUS)) {
14102                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14103                                       HOSTCC_MODE_CLRTICK_TXBD);
14104
14105                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14106                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14107                                        tp->misc_host_ctrl);
14108         }
14109
14110         /* Preserve the APE MAC_MODE bits */
14111         if (tg3_flag(tp, ENABLE_APE))
14112                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14113         else
14114                 tp->mac_mode = TG3_DEF_MAC_MODE;
14115
14116         /* these are limited to 10/100 only */
14117         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14118              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14119             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14120              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14121              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14122               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14123               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14124             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14125              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14126               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14127               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14128             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14129             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14130             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14131             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14132                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14133
14134         err = tg3_phy_probe(tp);
14135         if (err) {
14136                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14137                 /* ... but do not return immediately ... */
14138                 tg3_mdio_fini(tp);
14139         }
14140
14141         tg3_read_vpd(tp);
14142         tg3_read_fw_ver(tp);
14143
14144         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14145                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14146         } else {
14147                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14148                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14149                 else
14150                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14151         }
14152
14153         /* 5700 {AX,BX} chips have a broken status block link
14154          * change bit implementation, so we must use the
14155          * status register in those cases.
14156          */
14157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14158                 tg3_flag_set(tp, USE_LINKCHG_REG);
14159         else
14160                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14161
14162         /* The led_ctrl is set during tg3_phy_probe, here we might
14163          * have to force the link status polling mechanism based
14164          * upon subsystem IDs.
14165          */
14166         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14167             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14168             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14169                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14170                 tg3_flag_set(tp, USE_LINKCHG_REG);
14171         }
14172
14173         /* For all SERDES we poll the MAC status register. */
14174         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14175                 tg3_flag_set(tp, POLL_SERDES);
14176         else
14177                 tg3_flag_clear(tp, POLL_SERDES);
14178
14179         tp->rx_offset = NET_IP_ALIGN;
14180         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14182             tg3_flag(tp, PCIX_MODE)) {
14183                 tp->rx_offset = 0;
14184 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14185                 tp->rx_copy_thresh = ~(u16)0;
14186 #endif
14187         }
14188
14189         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14190         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14191         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14192
14193         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14194
14195         /* Increment the rx prod index on the rx std ring by at most
14196          * 8 for these chips to workaround hw errata.
14197          */
14198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14199             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14200             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14201                 tp->rx_std_max_post = 8;
14202
14203         if (tg3_flag(tp, ASPM_WORKAROUND))
14204                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14205                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14206
14207         return err;
14208 }
14209
14210 #ifdef CONFIG_SPARC
14211 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14212 {
14213         struct net_device *dev = tp->dev;
14214         struct pci_dev *pdev = tp->pdev;
14215         struct device_node *dp = pci_device_to_OF_node(pdev);
14216         const unsigned char *addr;
14217         int len;
14218
14219         addr = of_get_property(dp, "local-mac-address", &len);
14220         if (addr && len == 6) {
14221                 memcpy(dev->dev_addr, addr, 6);
14222                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14223                 return 0;
14224         }
14225         return -ENODEV;
14226 }
14227
14228 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14229 {
14230         struct net_device *dev = tp->dev;
14231
14232         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14233         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14234         return 0;
14235 }
14236 #endif
14237
14238 static int __devinit tg3_get_device_address(struct tg3 *tp)
14239 {
14240         struct net_device *dev = tp->dev;
14241         u32 hi, lo, mac_offset;
14242         int addr_ok = 0;
14243
14244 #ifdef CONFIG_SPARC
14245         if (!tg3_get_macaddr_sparc(tp))
14246                 return 0;
14247 #endif
14248
14249         mac_offset = 0x7c;
14250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14251             tg3_flag(tp, 5780_CLASS)) {
14252                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14253                         mac_offset = 0xcc;
14254                 if (tg3_nvram_lock(tp))
14255                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14256                 else
14257                         tg3_nvram_unlock(tp);
14258         } else if (tg3_flag(tp, 5717_PLUS)) {
14259                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14260                         mac_offset = 0xcc;
14261                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14262                         mac_offset += 0x18c;
14263         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14264                 mac_offset = 0x10;
14265
14266         /* First try to get it from MAC address mailbox. */
14267         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14268         if ((hi >> 16) == 0x484b) {
14269                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14270                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14271
14272                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14273                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14274                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14275                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14276                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14277
14278                 /* Some old bootcode may report a 0 MAC address in SRAM */
14279                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14280         }
14281         if (!addr_ok) {
14282                 /* Next, try NVRAM. */
14283                 if (!tg3_flag(tp, NO_NVRAM) &&
14284                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14285                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14286                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14287                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14288                 }
14289                 /* Finally just fetch it out of the MAC control regs. */
14290                 else {
14291                         hi = tr32(MAC_ADDR_0_HIGH);
14292                         lo = tr32(MAC_ADDR_0_LOW);
14293
14294                         dev->dev_addr[5] = lo & 0xff;
14295                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14296                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14297                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14298                         dev->dev_addr[1] = hi & 0xff;
14299                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14300                 }
14301         }
14302
14303         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14304 #ifdef CONFIG_SPARC
14305                 if (!tg3_get_default_macaddr_sparc(tp))
14306                         return 0;
14307 #endif
14308                 return -EINVAL;
14309         }
14310         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14311         return 0;
14312 }
14313
14314 #define BOUNDARY_SINGLE_CACHELINE       1
14315 #define BOUNDARY_MULTI_CACHELINE        2
14316
14317 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14318 {
14319         int cacheline_size;
14320         u8 byte;
14321         int goal;
14322
14323         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14324         if (byte == 0)
14325                 cacheline_size = 1024;
14326         else
14327                 cacheline_size = (int) byte * 4;
14328
14329         /* On 5703 and later chips, the boundary bits have no
14330          * effect.
14331          */
14332         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14333             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14334             !tg3_flag(tp, PCI_EXPRESS))
14335                 goto out;
14336
14337 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14338         goal = BOUNDARY_MULTI_CACHELINE;
14339 #else
14340 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14341         goal = BOUNDARY_SINGLE_CACHELINE;
14342 #else
14343         goal = 0;
14344 #endif
14345 #endif
14346
14347         if (tg3_flag(tp, 57765_PLUS)) {
14348                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14349                 goto out;
14350         }
14351
14352         if (!goal)
14353                 goto out;
14354
14355         /* PCI controllers on most RISC systems tend to disconnect
14356          * when a device tries to burst across a cache-line boundary.
14357          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14358          *
14359          * Unfortunately, for PCI-E there are only limited
14360          * write-side controls for this, and thus for reads
14361          * we will still get the disconnects.  We'll also waste
14362          * these PCI cycles for both read and write for chips
14363          * other than 5700 and 5701 which do not implement the
14364          * boundary bits.
14365          */
14366         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14367                 switch (cacheline_size) {
14368                 case 16:
14369                 case 32:
14370                 case 64:
14371                 case 128:
14372                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14373                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14374                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14375                         } else {
14376                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14377                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14378                         }
14379                         break;
14380
14381                 case 256:
14382                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14383                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14384                         break;
14385
14386                 default:
14387                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14388                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14389                         break;
14390                 }
14391         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14392                 switch (cacheline_size) {
14393                 case 16:
14394                 case 32:
14395                 case 64:
14396                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14397                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14398                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14399                                 break;
14400                         }
14401                         /* fallthrough */
14402                 case 128:
14403                 default:
14404                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14405                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14406                         break;
14407                 }
14408         } else {
14409                 switch (cacheline_size) {
14410                 case 16:
14411                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14412                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14413                                         DMA_RWCTRL_WRITE_BNDRY_16);
14414                                 break;
14415                         }
14416                         /* fallthrough */
14417                 case 32:
14418                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14419                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14420                                         DMA_RWCTRL_WRITE_BNDRY_32);
14421                                 break;
14422                         }
14423                         /* fallthrough */
14424                 case 64:
14425                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14426                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14427                                         DMA_RWCTRL_WRITE_BNDRY_64);
14428                                 break;
14429                         }
14430                         /* fallthrough */
14431                 case 128:
14432                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14433                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14434                                         DMA_RWCTRL_WRITE_BNDRY_128);
14435                                 break;
14436                         }
14437                         /* fallthrough */
14438                 case 256:
14439                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14440                                 DMA_RWCTRL_WRITE_BNDRY_256);
14441                         break;
14442                 case 512:
14443                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14444                                 DMA_RWCTRL_WRITE_BNDRY_512);
14445                         break;
14446                 case 1024:
14447                 default:
14448                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14449                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14450                         break;
14451                 }
14452         }
14453
14454 out:
14455         return val;
14456 }
14457
14458 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14459 {
14460         struct tg3_internal_buffer_desc test_desc;
14461         u32 sram_dma_descs;
14462         int i, ret;
14463
14464         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14465
14466         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14467         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14468         tw32(RDMAC_STATUS, 0);
14469         tw32(WDMAC_STATUS, 0);
14470
14471         tw32(BUFMGR_MODE, 0);
14472         tw32(FTQ_RESET, 0);
14473
14474         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14475         test_desc.addr_lo = buf_dma & 0xffffffff;
14476         test_desc.nic_mbuf = 0x00002100;
14477         test_desc.len = size;
14478
14479         /*
14480          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14481          * the *second* time the tg3 driver was getting loaded after an
14482          * initial scan.
14483          *
14484          * Broadcom tells me:
14485          *   ...the DMA engine is connected to the GRC block and a DMA
14486          *   reset may affect the GRC block in some unpredictable way...
14487          *   The behavior of resets to individual blocks has not been tested.
14488          *
14489          * Broadcom noted the GRC reset will also reset all sub-components.
14490          */
14491         if (to_device) {
14492                 test_desc.cqid_sqid = (13 << 8) | 2;
14493
14494                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14495                 udelay(40);
14496         } else {
14497                 test_desc.cqid_sqid = (16 << 8) | 7;
14498
14499                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14500                 udelay(40);
14501         }
14502         test_desc.flags = 0x00000005;
14503
14504         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14505                 u32 val;
14506
14507                 val = *(((u32 *)&test_desc) + i);
14508                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14509                                        sram_dma_descs + (i * sizeof(u32)));
14510                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14511         }
14512         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14513
14514         if (to_device)
14515                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14516         else
14517                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14518
14519         ret = -ENODEV;
14520         for (i = 0; i < 40; i++) {
14521                 u32 val;
14522
14523                 if (to_device)
14524                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14525                 else
14526                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14527                 if ((val & 0xffff) == sram_dma_descs) {
14528                         ret = 0;
14529                         break;
14530                 }
14531
14532                 udelay(100);
14533         }
14534
14535         return ret;
14536 }
14537
14538 #define TEST_BUFFER_SIZE        0x2000
14539
14540 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14541         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14542         { },
14543 };
14544
14545 static int __devinit tg3_test_dma(struct tg3 *tp)
14546 {
14547         dma_addr_t buf_dma;
14548         u32 *buf, saved_dma_rwctrl;
14549         int ret = 0;
14550
14551         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14552                                  &buf_dma, GFP_KERNEL);
14553         if (!buf) {
14554                 ret = -ENOMEM;
14555                 goto out_nofree;
14556         }
14557
14558         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14559                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14560
14561         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14562
14563         if (tg3_flag(tp, 57765_PLUS))
14564                 goto out;
14565
14566         if (tg3_flag(tp, PCI_EXPRESS)) {
14567                 /* DMA read watermark not used on PCIE */
14568                 tp->dma_rwctrl |= 0x00180000;
14569         } else if (!tg3_flag(tp, PCIX_MODE)) {
14570                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14571                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14572                         tp->dma_rwctrl |= 0x003f0000;
14573                 else
14574                         tp->dma_rwctrl |= 0x003f000f;
14575         } else {
14576                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14577                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14578                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14579                         u32 read_water = 0x7;
14580
14581                         /* If the 5704 is behind the EPB bridge, we can
14582                          * do the less restrictive ONE_DMA workaround for
14583                          * better performance.
14584                          */
14585                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14586                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14587                                 tp->dma_rwctrl |= 0x8000;
14588                         else if (ccval == 0x6 || ccval == 0x7)
14589                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14590
14591                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14592                                 read_water = 4;
14593                         /* Set bit 23 to enable PCIX hw bug fix */
14594                         tp->dma_rwctrl |=
14595                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14596                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14597                                 (1 << 23);
14598                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14599                         /* 5780 always in PCIX mode */
14600                         tp->dma_rwctrl |= 0x00144000;
14601                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14602                         /* 5714 always in PCIX mode */
14603                         tp->dma_rwctrl |= 0x00148000;
14604                 } else {
14605                         tp->dma_rwctrl |= 0x001b000f;
14606                 }
14607         }
14608
14609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14610             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14611                 tp->dma_rwctrl &= 0xfffffff0;
14612
14613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14614             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14615                 /* Remove this if it causes problems for some boards. */
14616                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14617
14618                 /* On 5700/5701 chips, we need to set this bit.
14619                  * Otherwise the chip will issue cacheline transactions
14620                  * to streamable DMA memory with not all the byte
14621                  * enables turned on.  This is an error on several
14622                  * RISC PCI controllers, in particular sparc64.
14623                  *
14624                  * On 5703/5704 chips, this bit has been reassigned
14625                  * a different meaning.  In particular, it is used
14626                  * on those chips to enable a PCI-X workaround.
14627                  */
14628                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14629         }
14630
14631         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14632
14633 #if 0
14634         /* Unneeded, already done by tg3_get_invariants.  */
14635         tg3_switch_clocks(tp);
14636 #endif
14637
14638         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14639             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14640                 goto out;
14641
14642         /* It is best to perform DMA test with maximum write burst size
14643          * to expose the 5700/5701 write DMA bug.
14644          */
14645         saved_dma_rwctrl = tp->dma_rwctrl;
14646         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14647         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14648
14649         while (1) {
14650                 u32 *p = buf, i;
14651
14652                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14653                         p[i] = i;
14654
14655                 /* Send the buffer to the chip. */
14656                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14657                 if (ret) {
14658                         dev_err(&tp->pdev->dev,
14659                                 "%s: Buffer write failed. err = %d\n",
14660                                 __func__, ret);
14661                         break;
14662                 }
14663
14664 #if 0
14665                 /* validate data reached card RAM correctly. */
14666                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14667                         u32 val;
14668                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14669                         if (le32_to_cpu(val) != p[i]) {
14670                                 dev_err(&tp->pdev->dev,
14671                                         "%s: Buffer corrupted on device! "
14672                                         "(%d != %d)\n", __func__, val, i);
14673                                 /* ret = -ENODEV here? */
14674                         }
14675                         p[i] = 0;
14676                 }
14677 #endif
14678                 /* Now read it back. */
14679                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14680                 if (ret) {
14681                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14682                                 "err = %d\n", __func__, ret);
14683                         break;
14684                 }
14685
14686                 /* Verify it. */
14687                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14688                         if (p[i] == i)
14689                                 continue;
14690
14691                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14692                             DMA_RWCTRL_WRITE_BNDRY_16) {
14693                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14694                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14695                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14696                                 break;
14697                         } else {
14698                                 dev_err(&tp->pdev->dev,
14699                                         "%s: Buffer corrupted on read back! "
14700                                         "(%d != %d)\n", __func__, p[i], i);
14701                                 ret = -ENODEV;
14702                                 goto out;
14703                         }
14704                 }
14705
14706                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14707                         /* Success. */
14708                         ret = 0;
14709                         break;
14710                 }
14711         }
14712         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14713             DMA_RWCTRL_WRITE_BNDRY_16) {
14714                 /* DMA test passed without adjusting DMA boundary,
14715                  * now look for chipsets that are known to expose the
14716                  * DMA bug without failing the test.
14717                  */
14718                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14719                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14720                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14721                 } else {
14722                         /* Safe to use the calculated DMA boundary. */
14723                         tp->dma_rwctrl = saved_dma_rwctrl;
14724                 }
14725
14726                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14727         }
14728
14729 out:
14730         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14731 out_nofree:
14732         return ret;
14733 }
14734
14735 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14736 {
14737         if (tg3_flag(tp, 57765_PLUS)) {
14738                 tp->bufmgr_config.mbuf_read_dma_low_water =
14739                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14740                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14741                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14742                 tp->bufmgr_config.mbuf_high_water =
14743                         DEFAULT_MB_HIGH_WATER_57765;
14744
14745                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14746                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14747                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14748                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14749                 tp->bufmgr_config.mbuf_high_water_jumbo =
14750                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14751         } else if (tg3_flag(tp, 5705_PLUS)) {
14752                 tp->bufmgr_config.mbuf_read_dma_low_water =
14753                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14754                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14755                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14756                 tp->bufmgr_config.mbuf_high_water =
14757                         DEFAULT_MB_HIGH_WATER_5705;
14758                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14759                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14760                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14761                         tp->bufmgr_config.mbuf_high_water =
14762                                 DEFAULT_MB_HIGH_WATER_5906;
14763                 }
14764
14765                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14766                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14767                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14768                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14769                 tp->bufmgr_config.mbuf_high_water_jumbo =
14770                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14771         } else {
14772                 tp->bufmgr_config.mbuf_read_dma_low_water =
14773                         DEFAULT_MB_RDMA_LOW_WATER;
14774                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14775                         DEFAULT_MB_MACRX_LOW_WATER;
14776                 tp->bufmgr_config.mbuf_high_water =
14777                         DEFAULT_MB_HIGH_WATER;
14778
14779                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14780                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14781                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14782                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14783                 tp->bufmgr_config.mbuf_high_water_jumbo =
14784                         DEFAULT_MB_HIGH_WATER_JUMBO;
14785         }
14786
14787         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14788         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14789 }
14790
14791 static char * __devinit tg3_phy_string(struct tg3 *tp)
14792 {
14793         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14794         case TG3_PHY_ID_BCM5400:        return "5400";
14795         case TG3_PHY_ID_BCM5401:        return "5401";
14796         case TG3_PHY_ID_BCM5411:        return "5411";
14797         case TG3_PHY_ID_BCM5701:        return "5701";
14798         case TG3_PHY_ID_BCM5703:        return "5703";
14799         case TG3_PHY_ID_BCM5704:        return "5704";
14800         case TG3_PHY_ID_BCM5705:        return "5705";
14801         case TG3_PHY_ID_BCM5750:        return "5750";
14802         case TG3_PHY_ID_BCM5752:        return "5752";
14803         case TG3_PHY_ID_BCM5714:        return "5714";
14804         case TG3_PHY_ID_BCM5780:        return "5780";
14805         case TG3_PHY_ID_BCM5755:        return "5755";
14806         case TG3_PHY_ID_BCM5787:        return "5787";
14807         case TG3_PHY_ID_BCM5784:        return "5784";
14808         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14809         case TG3_PHY_ID_BCM5906:        return "5906";
14810         case TG3_PHY_ID_BCM5761:        return "5761";
14811         case TG3_PHY_ID_BCM5718C:       return "5718C";
14812         case TG3_PHY_ID_BCM5718S:       return "5718S";
14813         case TG3_PHY_ID_BCM57765:       return "57765";
14814         case TG3_PHY_ID_BCM5719C:       return "5719C";
14815         case TG3_PHY_ID_BCM5720C:       return "5720C";
14816         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14817         case 0:                 return "serdes";
14818         default:                return "unknown";
14819         }
14820 }
14821
14822 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14823 {
14824         if (tg3_flag(tp, PCI_EXPRESS)) {
14825                 strcpy(str, "PCI Express");
14826                 return str;
14827         } else if (tg3_flag(tp, PCIX_MODE)) {
14828                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14829
14830                 strcpy(str, "PCIX:");
14831
14832                 if ((clock_ctrl == 7) ||
14833                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14834                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14835                         strcat(str, "133MHz");
14836                 else if (clock_ctrl == 0)
14837                         strcat(str, "33MHz");
14838                 else if (clock_ctrl == 2)
14839                         strcat(str, "50MHz");
14840                 else if (clock_ctrl == 4)
14841                         strcat(str, "66MHz");
14842                 else if (clock_ctrl == 6)
14843                         strcat(str, "100MHz");
14844         } else {
14845                 strcpy(str, "PCI:");
14846                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14847                         strcat(str, "66MHz");
14848                 else
14849                         strcat(str, "33MHz");
14850         }
14851         if (tg3_flag(tp, PCI_32BIT))
14852                 strcat(str, ":32-bit");
14853         else
14854                 strcat(str, ":64-bit");
14855         return str;
14856 }
14857
14858 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14859 {
14860         struct pci_dev *peer;
14861         unsigned int func, devnr = tp->pdev->devfn & ~7;
14862
14863         for (func = 0; func < 8; func++) {
14864                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14865                 if (peer && peer != tp->pdev)
14866                         break;
14867                 pci_dev_put(peer);
14868         }
14869         /* 5704 can be configured in single-port mode, set peer to
14870          * tp->pdev in that case.
14871          */
14872         if (!peer) {
14873                 peer = tp->pdev;
14874                 return peer;
14875         }
14876
14877         /*
14878          * We don't need to keep the refcount elevated; there's no way
14879          * to remove one half of this device without removing the other
14880          */
14881         pci_dev_put(peer);
14882
14883         return peer;
14884 }
14885
14886 static void __devinit tg3_init_coal(struct tg3 *tp)
14887 {
14888         struct ethtool_coalesce *ec = &tp->coal;
14889
14890         memset(ec, 0, sizeof(*ec));
14891         ec->cmd = ETHTOOL_GCOALESCE;
14892         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14893         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14894         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14895         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14896         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14897         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14898         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14899         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14900         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14901
14902         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14903                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14904                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14905                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14906                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14907                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14908         }
14909
14910         if (tg3_flag(tp, 5705_PLUS)) {
14911                 ec->rx_coalesce_usecs_irq = 0;
14912                 ec->tx_coalesce_usecs_irq = 0;
14913                 ec->stats_block_coalesce_usecs = 0;
14914         }
14915 }
14916
14917 static const struct net_device_ops tg3_netdev_ops = {
14918         .ndo_open               = tg3_open,
14919         .ndo_stop               = tg3_close,
14920         .ndo_start_xmit         = tg3_start_xmit,
14921         .ndo_get_stats64        = tg3_get_stats64,
14922         .ndo_validate_addr      = eth_validate_addr,
14923         .ndo_set_multicast_list = tg3_set_rx_mode,
14924         .ndo_set_mac_address    = tg3_set_mac_addr,
14925         .ndo_do_ioctl           = tg3_ioctl,
14926         .ndo_tx_timeout         = tg3_tx_timeout,
14927         .ndo_change_mtu         = tg3_change_mtu,
14928         .ndo_fix_features       = tg3_fix_features,
14929         .ndo_set_features       = tg3_set_features,
14930 #ifdef CONFIG_NET_POLL_CONTROLLER
14931         .ndo_poll_controller    = tg3_poll_controller,
14932 #endif
14933 };
14934
14935 static int __devinit tg3_init_one(struct pci_dev *pdev,
14936                                   const struct pci_device_id *ent)
14937 {
14938         struct net_device *dev;
14939         struct tg3 *tp;
14940         int i, err, pm_cap;
14941         u32 sndmbx, rcvmbx, intmbx;
14942         char str[40];
14943         u64 dma_mask, persist_dma_mask;
14944         u32 features = 0;
14945
14946         printk_once(KERN_INFO "%s\n", version);
14947
14948         err = pci_enable_device(pdev);
14949         if (err) {
14950                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14951                 return err;
14952         }
14953
14954         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14955         if (err) {
14956                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14957                 goto err_out_disable_pdev;
14958         }
14959
14960         pci_set_master(pdev);
14961
14962         /* Find power-management capability. */
14963         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14964         if (pm_cap == 0) {
14965                 dev_err(&pdev->dev,
14966                         "Cannot find Power Management capability, aborting\n");
14967                 err = -EIO;
14968                 goto err_out_free_res;
14969         }
14970
14971         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14972         if (!dev) {
14973                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14974                 err = -ENOMEM;
14975                 goto err_out_free_res;
14976         }
14977
14978         SET_NETDEV_DEV(dev, &pdev->dev);
14979
14980         tp = netdev_priv(dev);
14981         tp->pdev = pdev;
14982         tp->dev = dev;
14983         tp->pm_cap = pm_cap;
14984         tp->rx_mode = TG3_DEF_RX_MODE;
14985         tp->tx_mode = TG3_DEF_TX_MODE;
14986
14987         if (tg3_debug > 0)
14988                 tp->msg_enable = tg3_debug;
14989         else
14990                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14991
14992         /* The word/byte swap controls here control register access byte
14993          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14994          * setting below.
14995          */
14996         tp->misc_host_ctrl =
14997                 MISC_HOST_CTRL_MASK_PCI_INT |
14998                 MISC_HOST_CTRL_WORD_SWAP |
14999                 MISC_HOST_CTRL_INDIR_ACCESS |
15000                 MISC_HOST_CTRL_PCISTATE_RW;
15001
15002         /* The NONFRM (non-frame) byte/word swap controls take effect
15003          * on descriptor entries, anything which isn't packet data.
15004          *
15005          * The StrongARM chips on the board (one for tx, one for rx)
15006          * are running in big-endian mode.
15007          */
15008         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15009                         GRC_MODE_WSWAP_NONFRM_DATA);
15010 #ifdef __BIG_ENDIAN
15011         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15012 #endif
15013         spin_lock_init(&tp->lock);
15014         spin_lock_init(&tp->indirect_lock);
15015         INIT_WORK(&tp->reset_task, tg3_reset_task);
15016
15017         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15018         if (!tp->regs) {
15019                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15020                 err = -ENOMEM;
15021                 goto err_out_free_dev;
15022         }
15023
15024         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15025         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15026
15027         dev->ethtool_ops = &tg3_ethtool_ops;
15028         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15029         dev->netdev_ops = &tg3_netdev_ops;
15030         dev->irq = pdev->irq;
15031
15032         err = tg3_get_invariants(tp);
15033         if (err) {
15034                 dev_err(&pdev->dev,
15035                         "Problem fetching invariants of chip, aborting\n");
15036                 goto err_out_iounmap;
15037         }
15038
15039         /* The EPB bridge inside 5714, 5715, and 5780 and any
15040          * device behind the EPB cannot support DMA addresses > 40-bit.
15041          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15042          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15043          * do DMA address check in tg3_start_xmit().
15044          */
15045         if (tg3_flag(tp, IS_5788))
15046                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15047         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15048                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15049 #ifdef CONFIG_HIGHMEM
15050                 dma_mask = DMA_BIT_MASK(64);
15051 #endif
15052         } else
15053                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15054
15055         /* Configure DMA attributes. */
15056         if (dma_mask > DMA_BIT_MASK(32)) {
15057                 err = pci_set_dma_mask(pdev, dma_mask);
15058                 if (!err) {
15059                         features |= NETIF_F_HIGHDMA;
15060                         err = pci_set_consistent_dma_mask(pdev,
15061                                                           persist_dma_mask);
15062                         if (err < 0) {
15063                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15064                                         "DMA for consistent allocations\n");
15065                                 goto err_out_iounmap;
15066                         }
15067                 }
15068         }
15069         if (err || dma_mask == DMA_BIT_MASK(32)) {
15070                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15071                 if (err) {
15072                         dev_err(&pdev->dev,
15073                                 "No usable DMA configuration, aborting\n");
15074                         goto err_out_iounmap;
15075                 }
15076         }
15077
15078         tg3_init_bufmgr_config(tp);
15079
15080         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15081
15082         /* 5700 B0 chips do not support checksumming correctly due
15083          * to hardware bugs.
15084          */
15085         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15086                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15087
15088                 if (tg3_flag(tp, 5755_PLUS))
15089                         features |= NETIF_F_IPV6_CSUM;
15090         }
15091
15092         /* TSO is on by default on chips that support hardware TSO.
15093          * Firmware TSO on older chips gives lower performance, so it
15094          * is off by default, but can be enabled using ethtool.
15095          */
15096         if ((tg3_flag(tp, HW_TSO_1) ||
15097              tg3_flag(tp, HW_TSO_2) ||
15098              tg3_flag(tp, HW_TSO_3)) &&
15099             (features & NETIF_F_IP_CSUM))
15100                 features |= NETIF_F_TSO;
15101         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15102                 if (features & NETIF_F_IPV6_CSUM)
15103                         features |= NETIF_F_TSO6;
15104                 if (tg3_flag(tp, HW_TSO_3) ||
15105                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15106                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15107                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15108                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15109                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15110                         features |= NETIF_F_TSO_ECN;
15111         }
15112
15113         dev->features |= features;
15114         dev->vlan_features |= features;
15115
15116         /*
15117          * Add loopback capability only for a subset of devices that support
15118          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15119          * loopback for the remaining devices.
15120          */
15121         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15122             !tg3_flag(tp, CPMU_PRESENT))
15123                 /* Add the loopback capability */
15124                 features |= NETIF_F_LOOPBACK;
15125
15126         dev->hw_features |= features;
15127
15128         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15129             !tg3_flag(tp, TSO_CAPABLE) &&
15130             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15131                 tg3_flag_set(tp, MAX_RXPEND_64);
15132                 tp->rx_pending = 63;
15133         }
15134
15135         err = tg3_get_device_address(tp);
15136         if (err) {
15137                 dev_err(&pdev->dev,
15138                         "Could not obtain valid ethernet address, aborting\n");
15139                 goto err_out_iounmap;
15140         }
15141
15142         if (tg3_flag(tp, ENABLE_APE)) {
15143                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15144                 if (!tp->aperegs) {
15145                         dev_err(&pdev->dev,
15146                                 "Cannot map APE registers, aborting\n");
15147                         err = -ENOMEM;
15148                         goto err_out_iounmap;
15149                 }
15150
15151                 tg3_ape_lock_init(tp);
15152
15153                 if (tg3_flag(tp, ENABLE_ASF))
15154                         tg3_read_dash_ver(tp);
15155         }
15156
15157         /*
15158          * Reset chip in case UNDI or EFI driver did not shutdown
15159          * DMA self test will enable WDMAC and we'll see (spurious)
15160          * pending DMA on the PCI bus at that point.
15161          */
15162         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15163             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15164                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15165                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15166         }
15167
15168         err = tg3_test_dma(tp);
15169         if (err) {
15170                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15171                 goto err_out_apeunmap;
15172         }
15173
15174         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15175         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15176         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15177         for (i = 0; i < tp->irq_max; i++) {
15178                 struct tg3_napi *tnapi = &tp->napi[i];
15179
15180                 tnapi->tp = tp;
15181                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15182
15183                 tnapi->int_mbox = intmbx;
15184                 if (i < 4)
15185                         intmbx += 0x8;
15186                 else
15187                         intmbx += 0x4;
15188
15189                 tnapi->consmbox = rcvmbx;
15190                 tnapi->prodmbox = sndmbx;
15191
15192                 if (i)
15193                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15194                 else
15195                         tnapi->coal_now = HOSTCC_MODE_NOW;
15196
15197                 if (!tg3_flag(tp, SUPPORT_MSIX))
15198                         break;
15199
15200                 /*
15201                  * If we support MSIX, we'll be using RSS.  If we're using
15202                  * RSS, the first vector only handles link interrupts and the
15203                  * remaining vectors handle rx and tx interrupts.  Reuse the
15204                  * mailbox values for the next iteration.  The values we setup
15205                  * above are still useful for the single vectored mode.
15206                  */
15207                 if (!i)
15208                         continue;
15209
15210                 rcvmbx += 0x8;
15211
15212                 if (sndmbx & 0x4)
15213                         sndmbx -= 0x4;
15214                 else
15215                         sndmbx += 0xc;
15216         }
15217
15218         tg3_init_coal(tp);
15219
15220         pci_set_drvdata(pdev, dev);
15221
15222         err = register_netdev(dev);
15223         if (err) {
15224                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15225                 goto err_out_apeunmap;
15226         }
15227
15228         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15229                     tp->board_part_number,
15230                     tp->pci_chip_rev_id,
15231                     tg3_bus_string(tp, str),
15232                     dev->dev_addr);
15233
15234         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15235                 struct phy_device *phydev;
15236                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15237                 netdev_info(dev,
15238                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15239                             phydev->drv->name, dev_name(&phydev->dev));
15240         } else {
15241                 char *ethtype;
15242
15243                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15244                         ethtype = "10/100Base-TX";
15245                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15246                         ethtype = "1000Base-SX";
15247                 else
15248                         ethtype = "10/100/1000Base-T";
15249
15250                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15251                             "(WireSpeed[%d], EEE[%d])\n",
15252                             tg3_phy_string(tp), ethtype,
15253                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15254                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15255         }
15256
15257         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15258                     (dev->features & NETIF_F_RXCSUM) != 0,
15259                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15260                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15261                     tg3_flag(tp, ENABLE_ASF) != 0,
15262                     tg3_flag(tp, TSO_CAPABLE) != 0);
15263         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15264                     tp->dma_rwctrl,
15265                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15266                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15267
15268         pci_save_state(pdev);
15269
15270         return 0;
15271
15272 err_out_apeunmap:
15273         if (tp->aperegs) {
15274                 iounmap(tp->aperegs);
15275                 tp->aperegs = NULL;
15276         }
15277
15278 err_out_iounmap:
15279         if (tp->regs) {
15280                 iounmap(tp->regs);
15281                 tp->regs = NULL;
15282         }
15283
15284 err_out_free_dev:
15285         free_netdev(dev);
15286
15287 err_out_free_res:
15288         pci_release_regions(pdev);
15289
15290 err_out_disable_pdev:
15291         pci_disable_device(pdev);
15292         pci_set_drvdata(pdev, NULL);
15293         return err;
15294 }
15295
15296 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15297 {
15298         struct net_device *dev = pci_get_drvdata(pdev);
15299
15300         if (dev) {
15301                 struct tg3 *tp = netdev_priv(dev);
15302
15303                 if (tp->fw)
15304                         release_firmware(tp->fw);
15305
15306                 cancel_work_sync(&tp->reset_task);
15307
15308                 if (!tg3_flag(tp, USE_PHYLIB)) {
15309                         tg3_phy_fini(tp);
15310                         tg3_mdio_fini(tp);
15311                 }
15312
15313                 unregister_netdev(dev);
15314                 if (tp->aperegs) {
15315                         iounmap(tp->aperegs);
15316                         tp->aperegs = NULL;
15317                 }
15318                 if (tp->regs) {
15319                         iounmap(tp->regs);
15320                         tp->regs = NULL;
15321                 }
15322                 free_netdev(dev);
15323                 pci_release_regions(pdev);
15324                 pci_disable_device(pdev);
15325                 pci_set_drvdata(pdev, NULL);
15326         }
15327 }
15328
15329 #ifdef CONFIG_PM_SLEEP
15330 static int tg3_suspend(struct device *device)
15331 {
15332         struct pci_dev *pdev = to_pci_dev(device);
15333         struct net_device *dev = pci_get_drvdata(pdev);
15334         struct tg3 *tp = netdev_priv(dev);
15335         int err;
15336
15337         if (!netif_running(dev))
15338                 return 0;
15339
15340         flush_work_sync(&tp->reset_task);
15341         tg3_phy_stop(tp);
15342         tg3_netif_stop(tp);
15343
15344         del_timer_sync(&tp->timer);
15345
15346         tg3_full_lock(tp, 1);
15347         tg3_disable_ints(tp);
15348         tg3_full_unlock(tp);
15349
15350         netif_device_detach(dev);
15351
15352         tg3_full_lock(tp, 0);
15353         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15354         tg3_flag_clear(tp, INIT_COMPLETE);
15355         tg3_full_unlock(tp);
15356
15357         err = tg3_power_down_prepare(tp);
15358         if (err) {
15359                 int err2;
15360
15361                 tg3_full_lock(tp, 0);
15362
15363                 tg3_flag_set(tp, INIT_COMPLETE);
15364                 err2 = tg3_restart_hw(tp, 1);
15365                 if (err2)
15366                         goto out;
15367
15368                 tp->timer.expires = jiffies + tp->timer_offset;
15369                 add_timer(&tp->timer);
15370
15371                 netif_device_attach(dev);
15372                 tg3_netif_start(tp);
15373
15374 out:
15375                 tg3_full_unlock(tp);
15376
15377                 if (!err2)
15378                         tg3_phy_start(tp);
15379         }
15380
15381         return err;
15382 }
15383
15384 static int tg3_resume(struct device *device)
15385 {
15386         struct pci_dev *pdev = to_pci_dev(device);
15387         struct net_device *dev = pci_get_drvdata(pdev);
15388         struct tg3 *tp = netdev_priv(dev);
15389         int err;
15390
15391         if (!netif_running(dev))
15392                 return 0;
15393
15394         netif_device_attach(dev);
15395
15396         tg3_full_lock(tp, 0);
15397
15398         tg3_flag_set(tp, INIT_COMPLETE);
15399         err = tg3_restart_hw(tp, 1);
15400         if (err)
15401                 goto out;
15402
15403         tp->timer.expires = jiffies + tp->timer_offset;
15404         add_timer(&tp->timer);
15405
15406         tg3_netif_start(tp);
15407
15408 out:
15409         tg3_full_unlock(tp);
15410
15411         if (!err)
15412                 tg3_phy_start(tp);
15413
15414         return err;
15415 }
15416
15417 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15418 #define TG3_PM_OPS (&tg3_pm_ops)
15419
15420 #else
15421
15422 #define TG3_PM_OPS NULL
15423
15424 #endif /* CONFIG_PM_SLEEP */
15425
15426 /**
15427  * tg3_io_error_detected - called when PCI error is detected
15428  * @pdev: Pointer to PCI device
15429  * @state: The current pci connection state
15430  *
15431  * This function is called after a PCI bus error affecting
15432  * this device has been detected.
15433  */
15434 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15435                                               pci_channel_state_t state)
15436 {
15437         struct net_device *netdev = pci_get_drvdata(pdev);
15438         struct tg3 *tp = netdev_priv(netdev);
15439         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15440
15441         netdev_info(netdev, "PCI I/O error detected\n");
15442
15443         rtnl_lock();
15444
15445         if (!netif_running(netdev))
15446                 goto done;
15447
15448         tg3_phy_stop(tp);
15449
15450         tg3_netif_stop(tp);
15451
15452         del_timer_sync(&tp->timer);
15453         tg3_flag_clear(tp, RESTART_TIMER);
15454
15455         /* Want to make sure that the reset task doesn't run */
15456         cancel_work_sync(&tp->reset_task);
15457         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15458         tg3_flag_clear(tp, RESTART_TIMER);
15459
15460         netif_device_detach(netdev);
15461
15462         /* Clean up software state, even if MMIO is blocked */
15463         tg3_full_lock(tp, 0);
15464         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15465         tg3_full_unlock(tp);
15466
15467 done:
15468         if (state == pci_channel_io_perm_failure)
15469                 err = PCI_ERS_RESULT_DISCONNECT;
15470         else
15471                 pci_disable_device(pdev);
15472
15473         rtnl_unlock();
15474
15475         return err;
15476 }
15477
15478 /**
15479  * tg3_io_slot_reset - called after the pci bus has been reset.
15480  * @pdev: Pointer to PCI device
15481  *
15482  * Restart the card from scratch, as if from a cold-boot.
15483  * At this point, the card has exprienced a hard reset,
15484  * followed by fixups by BIOS, and has its config space
15485  * set up identically to what it was at cold boot.
15486  */
15487 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15488 {
15489         struct net_device *netdev = pci_get_drvdata(pdev);
15490         struct tg3 *tp = netdev_priv(netdev);
15491         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15492         int err;
15493
15494         rtnl_lock();
15495
15496         if (pci_enable_device(pdev)) {
15497                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15498                 goto done;
15499         }
15500
15501         pci_set_master(pdev);
15502         pci_restore_state(pdev);
15503         pci_save_state(pdev);
15504
15505         if (!netif_running(netdev)) {
15506                 rc = PCI_ERS_RESULT_RECOVERED;
15507                 goto done;
15508         }
15509
15510         err = tg3_power_up(tp);
15511         if (err) {
15512                 netdev_err(netdev, "Failed to restore register access.\n");
15513                 goto done;
15514         }
15515
15516         rc = PCI_ERS_RESULT_RECOVERED;
15517
15518 done:
15519         rtnl_unlock();
15520
15521         return rc;
15522 }
15523
15524 /**
15525  * tg3_io_resume - called when traffic can start flowing again.
15526  * @pdev: Pointer to PCI device
15527  *
15528  * This callback is called when the error recovery driver tells
15529  * us that its OK to resume normal operation.
15530  */
15531 static void tg3_io_resume(struct pci_dev *pdev)
15532 {
15533         struct net_device *netdev = pci_get_drvdata(pdev);
15534         struct tg3 *tp = netdev_priv(netdev);
15535         int err;
15536
15537         rtnl_lock();
15538
15539         if (!netif_running(netdev))
15540                 goto done;
15541
15542         tg3_full_lock(tp, 0);
15543         tg3_flag_set(tp, INIT_COMPLETE);
15544         err = tg3_restart_hw(tp, 1);
15545         tg3_full_unlock(tp);
15546         if (err) {
15547                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15548                 goto done;
15549         }
15550
15551         netif_device_attach(netdev);
15552
15553         tp->timer.expires = jiffies + tp->timer_offset;
15554         add_timer(&tp->timer);
15555
15556         tg3_netif_start(tp);
15557
15558         tg3_phy_start(tp);
15559
15560 done:
15561         rtnl_unlock();
15562 }
15563
15564 static struct pci_error_handlers tg3_err_handler = {
15565         .error_detected = tg3_io_error_detected,
15566         .slot_reset     = tg3_io_slot_reset,
15567         .resume         = tg3_io_resume
15568 };
15569
15570 static struct pci_driver tg3_driver = {
15571         .name           = DRV_MODULE_NAME,
15572         .id_table       = tg3_pci_tbl,
15573         .probe          = tg3_init_one,
15574         .remove         = __devexit_p(tg3_remove_one),
15575         .err_handler    = &tg3_err_handler,
15576         .driver.pm      = TG3_PM_OPS,
15577 };
15578
15579 static int __init tg3_init(void)
15580 {
15581         return pci_register_driver(&tg3_driver);
15582 }
15583
15584 static void __exit tg3_cleanup(void)
15585 {
15586         pci_unregister_driver(&tg3_driver);
15587 }
15588
15589 module_init(tg3_init);
15590 module_exit(tg3_cleanup);