update rkwifi driver
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     119
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "May 18, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
296         {}
297 };
298
299 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300
301 static const struct {
302         const char string[ETH_GSTRING_LEN];
303 } ethtool_stats_keys[] = {
304         { "rx_octets" },
305         { "rx_fragments" },
306         { "rx_ucast_packets" },
307         { "rx_mcast_packets" },
308         { "rx_bcast_packets" },
309         { "rx_fcs_errors" },
310         { "rx_align_errors" },
311         { "rx_xon_pause_rcvd" },
312         { "rx_xoff_pause_rcvd" },
313         { "rx_mac_ctrl_rcvd" },
314         { "rx_xoff_entered" },
315         { "rx_frame_too_long_errors" },
316         { "rx_jabbers" },
317         { "rx_undersize_packets" },
318         { "rx_in_length_errors" },
319         { "rx_out_length_errors" },
320         { "rx_64_or_less_octet_packets" },
321         { "rx_65_to_127_octet_packets" },
322         { "rx_128_to_255_octet_packets" },
323         { "rx_256_to_511_octet_packets" },
324         { "rx_512_to_1023_octet_packets" },
325         { "rx_1024_to_1522_octet_packets" },
326         { "rx_1523_to_2047_octet_packets" },
327         { "rx_2048_to_4095_octet_packets" },
328         { "rx_4096_to_8191_octet_packets" },
329         { "rx_8192_to_9022_octet_packets" },
330
331         { "tx_octets" },
332         { "tx_collisions" },
333
334         { "tx_xon_sent" },
335         { "tx_xoff_sent" },
336         { "tx_flow_control" },
337         { "tx_mac_errors" },
338         { "tx_single_collisions" },
339         { "tx_mult_collisions" },
340         { "tx_deferred" },
341         { "tx_excessive_collisions" },
342         { "tx_late_collisions" },
343         { "tx_collide_2times" },
344         { "tx_collide_3times" },
345         { "tx_collide_4times" },
346         { "tx_collide_5times" },
347         { "tx_collide_6times" },
348         { "tx_collide_7times" },
349         { "tx_collide_8times" },
350         { "tx_collide_9times" },
351         { "tx_collide_10times" },
352         { "tx_collide_11times" },
353         { "tx_collide_12times" },
354         { "tx_collide_13times" },
355         { "tx_collide_14times" },
356         { "tx_collide_15times" },
357         { "tx_ucast_packets" },
358         { "tx_mcast_packets" },
359         { "tx_bcast_packets" },
360         { "tx_carrier_sense_errors" },
361         { "tx_discards" },
362         { "tx_errors" },
363
364         { "dma_writeq_full" },
365         { "dma_write_prioq_full" },
366         { "rxbds_empty" },
367         { "rx_discards" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" },
380
381         { "mbuf_lwm_thresh_hit" },
382 };
383
384 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
385
386
387 static const struct {
388         const char string[ETH_GSTRING_LEN];
389 } ethtool_test_keys[] = {
390         { "nvram test     (online) " },
391         { "link test      (online) " },
392         { "register test  (offline)" },
393         { "memory test    (offline)" },
394         { "loopback test  (offline)" },
395         { "interrupt test (offline)" },
396 };
397
398 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
399
400
401 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
402 {
403         writel(val, tp->regs + off);
404 }
405
406 static u32 tg3_read32(struct tg3 *tp, u32 off)
407 {
408         return readl(tp->regs + off);
409 }
410
411 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
412 {
413         writel(val, tp->aperegs + off);
414 }
415
416 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
417 {
418         return readl(tp->aperegs + off);
419 }
420
421 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
422 {
423         unsigned long flags;
424
425         spin_lock_irqsave(&tp->indirect_lock, flags);
426         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
428         spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 }
430
431 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->regs + off);
434         readl(tp->regs + off);
435 }
436
437 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
438 {
439         unsigned long flags;
440         u32 val;
441
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
444         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
445         spin_unlock_irqrestore(&tp->indirect_lock, flags);
446         return val;
447 }
448
449 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
454                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
455                                        TG3_64BIT_REG_LOW, val);
456                 return;
457         }
458         if (off == TG3_RX_STD_PROD_IDX_REG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
460                                        TG3_64BIT_REG_LOW, val);
461                 return;
462         }
463
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
467         spin_unlock_irqrestore(&tp->indirect_lock, flags);
468
469         /* In indirect mode when disabling interrupts, we also need
470          * to clear the interrupt bit in the GRC local ctrl register.
471          */
472         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
473             (val == 0x1)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
475                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
476         }
477 }
478
479 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
480 {
481         unsigned long flags;
482         u32 val;
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488         return val;
489 }
490
491 /* usec_wait specifies the wait time in usec when writing to certain registers
492  * where it is unsafe to read back the register without some delay.
493  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
494  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495  */
496 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
497 {
498         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
499                 /* Non-posted methods */
500                 tp->write32(tp, off, val);
501         else {
502                 /* Posted method */
503                 tg3_write32(tp, off, val);
504                 if (usec_wait)
505                         udelay(usec_wait);
506                 tp->read32(tp, off);
507         }
508         /* Wait again after the read for the posted method to guarantee that
509          * the wait time is met.
510          */
511         if (usec_wait)
512                 udelay(usec_wait);
513 }
514
515 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
516 {
517         tp->write32_mbox(tp, off, val);
518         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
519                 tp->read32_mbox(tp, off);
520 }
521
522 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         void __iomem *mbox = tp->regs + off;
525         writel(val, mbox);
526         if (tg3_flag(tp, TXD_MBOX_HWBUG))
527                 writel(val, mbox);
528         if (tg3_flag(tp, MBOX_WRITE_REORDER))
529                 readl(mbox);
530 }
531
532 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
533 {
534         return readl(tp->regs + off + GRCMBOX_BASE);
535 }
536
537 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
538 {
539         writel(val, tp->regs + off + GRCMBOX_BASE);
540 }
541
542 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
543 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
544 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
545 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
546 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
547
548 #define tw32(reg, val)                  tp->write32(tp, reg, val)
549 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
550 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
551 #define tr32(reg)                       tp->read32(tp, reg)
552
553 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
554 {
555         unsigned long flags;
556
557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
558             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
559                 return;
560
561         spin_lock_irqsave(&tp->indirect_lock, flags);
562         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
563                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
565
566                 /* Always leave this as zero. */
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
568         } else {
569                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
570                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
571
572                 /* Always leave this as zero. */
573                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
574         }
575         spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 }
577
578 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
579 {
580         unsigned long flags;
581
582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
583             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
584                 *val = 0;
585                 return;
586         }
587
588         spin_lock_irqsave(&tp->indirect_lock, flags);
589         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
590                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
591                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
592
593                 /* Always leave this as zero. */
594                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
595         } else {
596                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
597                 *val = tr32(TG3PCI_MEM_WIN_DATA);
598
599                 /* Always leave this as zero. */
600                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
601         }
602         spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 }
604
605 static void tg3_ape_lock_init(struct tg3 *tp)
606 {
607         int i;
608         u32 regbase;
609
610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
611                 regbase = TG3_APE_LOCK_GRANT;
612         else
613                 regbase = TG3_APE_PER_LOCK_GRANT;
614
615         /* Make sure the driver hasn't any stale locks. */
616         for (i = 0; i < 8; i++)
617                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 }
619
620 static int tg3_ape_lock(struct tg3 *tp, int locknum)
621 {
622         int i, off;
623         int ret = 0;
624         u32 status, req, gnt;
625
626         if (!tg3_flag(tp, ENABLE_APE))
627                 return 0;
628
629         switch (locknum) {
630         case TG3_APE_LOCK_GRC:
631         case TG3_APE_LOCK_MEM:
632                 break;
633         default:
634                 return -EINVAL;
635         }
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
638                 req = TG3_APE_LOCK_REQ;
639                 gnt = TG3_APE_LOCK_GRANT;
640         } else {
641                 req = TG3_APE_PER_LOCK_REQ;
642                 gnt = TG3_APE_PER_LOCK_GRANT;
643         }
644
645         off = 4 * locknum;
646
647         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
648
649         /* Wait for up to 1 millisecond to acquire lock. */
650         for (i = 0; i < 100; i++) {
651                 status = tg3_ape_read32(tp, gnt + off);
652                 if (status == APE_LOCK_GRANT_DRIVER)
653                         break;
654                 udelay(10);
655         }
656
657         if (status != APE_LOCK_GRANT_DRIVER) {
658                 /* Revoke the lock request. */
659                 tg3_ape_write32(tp, gnt + off,
660                                 APE_LOCK_GRANT_DRIVER);
661
662                 ret = -EBUSY;
663         }
664
665         return ret;
666 }
667
668 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
669 {
670         u32 gnt;
671
672         if (!tg3_flag(tp, ENABLE_APE))
673                 return;
674
675         switch (locknum) {
676         case TG3_APE_LOCK_GRC:
677         case TG3_APE_LOCK_MEM:
678                 break;
679         default:
680                 return;
681         }
682
683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
684                 gnt = TG3_APE_LOCK_GRANT;
685         else
686                 gnt = TG3_APE_PER_LOCK_GRANT;
687
688         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 }
690
691 static void tg3_disable_ints(struct tg3 *tp)
692 {
693         int i;
694
695         tw32(TG3PCI_MISC_HOST_CTRL,
696              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
697         for (i = 0; i < tp->irq_max; i++)
698                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 }
700
701 static void tg3_enable_ints(struct tg3 *tp)
702 {
703         int i;
704
705         tp->irq_sync = 0;
706         wmb();
707
708         tw32(TG3PCI_MISC_HOST_CTRL,
709              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
710
711         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
712         for (i = 0; i < tp->irq_cnt; i++) {
713                 struct tg3_napi *tnapi = &tp->napi[i];
714
715                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716                 if (tg3_flag(tp, 1SHOT_MSI))
717                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
718
719                 tp->coal_now |= tnapi->coal_now;
720         }
721
722         /* Force an initial interrupt */
723         if (!tg3_flag(tp, TAGGED_STATUS) &&
724             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
725                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
726         else
727                 tw32(HOSTCC_MODE, tp->coal_now);
728
729         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 }
731
732 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
733 {
734         struct tg3 *tp = tnapi->tp;
735         struct tg3_hw_status *sblk = tnapi->hw_status;
736         unsigned int work_exists = 0;
737
738         /* check for phy events */
739         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
740                 if (sblk->status & SD_STATUS_LINK_CHG)
741                         work_exists = 1;
742         }
743         /* check for RX/TX work to do */
744         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
745             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
746                 work_exists = 1;
747
748         return work_exists;
749 }
750
751 /* tg3_int_reenable
752  *  similar to tg3_enable_ints, but it accurately determines whether there
753  *  is new work pending and can return without flushing the PIO write
754  *  which reenables interrupts
755  */
756 static void tg3_int_reenable(struct tg3_napi *tnapi)
757 {
758         struct tg3 *tp = tnapi->tp;
759
760         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
761         mmiowb();
762
763         /* When doing tagged status, this work check is unnecessary.
764          * The last_tag we write above tells the chip which piece of
765          * work we've completed.
766          */
767         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
768                 tw32(HOSTCC_MODE, tp->coalesce_mode |
769                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
770 }
771
772 static void tg3_switch_clocks(struct tg3 *tp)
773 {
774         u32 clock_ctrl;
775         u32 orig_clock_ctrl;
776
777         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
778                 return;
779
780         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
781
782         orig_clock_ctrl = clock_ctrl;
783         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
784                        CLOCK_CTRL_CLKRUN_OENABLE |
785                        0x1f);
786         tp->pci_clock_ctrl = clock_ctrl;
787
788         if (tg3_flag(tp, 5705_PLUS)) {
789                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
790                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
791                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
792                 }
793         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
794                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
795                             clock_ctrl |
796                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
797                             40);
798                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
799                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
800                             40);
801         }
802         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
803 }
804
805 #define PHY_BUSY_LOOPS  5000
806
807 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
808 {
809         u32 frame_val;
810         unsigned int loops;
811         int ret;
812
813         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
814                 tw32_f(MAC_MI_MODE,
815                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
816                 udelay(80);
817         }
818
819         *val = 0x0;
820
821         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
822                       MI_COM_PHY_ADDR_MASK);
823         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
824                       MI_COM_REG_ADDR_MASK);
825         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
826
827         tw32_f(MAC_MI_COM, frame_val);
828
829         loops = PHY_BUSY_LOOPS;
830         while (loops != 0) {
831                 udelay(10);
832                 frame_val = tr32(MAC_MI_COM);
833
834                 if ((frame_val & MI_COM_BUSY) == 0) {
835                         udelay(5);
836                         frame_val = tr32(MAC_MI_COM);
837                         break;
838                 }
839                 loops -= 1;
840         }
841
842         ret = -EBUSY;
843         if (loops != 0) {
844                 *val = frame_val & MI_COM_DATA_MASK;
845                 ret = 0;
846         }
847
848         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
849                 tw32_f(MAC_MI_MODE, tp->mi_mode);
850                 udelay(80);
851         }
852
853         return ret;
854 }
855
856 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
857 {
858         u32 frame_val;
859         unsigned int loops;
860         int ret;
861
862         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
863             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
864                 return 0;
865
866         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
867                 tw32_f(MAC_MI_MODE,
868                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
869                 udelay(80);
870         }
871
872         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
873                       MI_COM_PHY_ADDR_MASK);
874         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
875                       MI_COM_REG_ADDR_MASK);
876         frame_val |= (val & MI_COM_DATA_MASK);
877         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
878
879         tw32_f(MAC_MI_COM, frame_val);
880
881         loops = PHY_BUSY_LOOPS;
882         while (loops != 0) {
883                 udelay(10);
884                 frame_val = tr32(MAC_MI_COM);
885                 if ((frame_val & MI_COM_BUSY) == 0) {
886                         udelay(5);
887                         frame_val = tr32(MAC_MI_COM);
888                         break;
889                 }
890                 loops -= 1;
891         }
892
893         ret = -EBUSY;
894         if (loops != 0)
895                 ret = 0;
896
897         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
898                 tw32_f(MAC_MI_MODE, tp->mi_mode);
899                 udelay(80);
900         }
901
902         return ret;
903 }
904
905 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
906 {
907         int err;
908
909         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
910         if (err)
911                 goto done;
912
913         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
914         if (err)
915                 goto done;
916
917         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
918                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
919         if (err)
920                 goto done;
921
922         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
923
924 done:
925         return err;
926 }
927
928 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
929 {
930         int err;
931
932         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
933         if (err)
934                 goto done;
935
936         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
937         if (err)
938                 goto done;
939
940         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
941                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
942         if (err)
943                 goto done;
944
945         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
946
947 done:
948         return err;
949 }
950
951 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
952 {
953         int err;
954
955         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
956         if (!err)
957                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
958
959         return err;
960 }
961
962 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
963 {
964         int err;
965
966         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
967         if (!err)
968                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
969
970         return err;
971 }
972
973 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
974 {
975         int err;
976
977         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
978                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
979                            MII_TG3_AUXCTL_SHDWSEL_MISC);
980         if (!err)
981                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
982
983         return err;
984 }
985
986 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
987 {
988         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
989                 set |= MII_TG3_AUXCTL_MISC_WREN;
990
991         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
992 }
993
994 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
995         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
996                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
997                              MII_TG3_AUXCTL_ACTL_TX_6DB)
998
999 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1000         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1001                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1002
1003 static int tg3_bmcr_reset(struct tg3 *tp)
1004 {
1005         u32 phy_control;
1006         int limit, err;
1007
1008         /* OK, reset it, and poll the BMCR_RESET bit until it
1009          * clears or we time out.
1010          */
1011         phy_control = BMCR_RESET;
1012         err = tg3_writephy(tp, MII_BMCR, phy_control);
1013         if (err != 0)
1014                 return -EBUSY;
1015
1016         limit = 5000;
1017         while (limit--) {
1018                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1019                 if (err != 0)
1020                         return -EBUSY;
1021
1022                 if ((phy_control & BMCR_RESET) == 0) {
1023                         udelay(40);
1024                         break;
1025                 }
1026                 udelay(10);
1027         }
1028         if (limit < 0)
1029                 return -EBUSY;
1030
1031         return 0;
1032 }
1033
1034 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1035 {
1036         struct tg3 *tp = bp->priv;
1037         u32 val;
1038
1039         spin_lock_bh(&tp->lock);
1040
1041         if (tg3_readphy(tp, reg, &val))
1042                 val = -EIO;
1043
1044         spin_unlock_bh(&tp->lock);
1045
1046         return val;
1047 }
1048
1049 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1050 {
1051         struct tg3 *tp = bp->priv;
1052         u32 ret = 0;
1053
1054         spin_lock_bh(&tp->lock);
1055
1056         if (tg3_writephy(tp, reg, val))
1057                 ret = -EIO;
1058
1059         spin_unlock_bh(&tp->lock);
1060
1061         return ret;
1062 }
1063
1064 static int tg3_mdio_reset(struct mii_bus *bp)
1065 {
1066         return 0;
1067 }
1068
1069 static void tg3_mdio_config_5785(struct tg3 *tp)
1070 {
1071         u32 val;
1072         struct phy_device *phydev;
1073
1074         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1075         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1076         case PHY_ID_BCM50610:
1077         case PHY_ID_BCM50610M:
1078                 val = MAC_PHYCFG2_50610_LED_MODES;
1079                 break;
1080         case PHY_ID_BCMAC131:
1081                 val = MAC_PHYCFG2_AC131_LED_MODES;
1082                 break;
1083         case PHY_ID_RTL8211C:
1084                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1085                 break;
1086         case PHY_ID_RTL8201E:
1087                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1088                 break;
1089         default:
1090                 return;
1091         }
1092
1093         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1094                 tw32(MAC_PHYCFG2, val);
1095
1096                 val = tr32(MAC_PHYCFG1);
1097                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1098                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1099                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1100                 tw32(MAC_PHYCFG1, val);
1101
1102                 return;
1103         }
1104
1105         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1106                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1107                        MAC_PHYCFG2_FMODE_MASK_MASK |
1108                        MAC_PHYCFG2_GMODE_MASK_MASK |
1109                        MAC_PHYCFG2_ACT_MASK_MASK   |
1110                        MAC_PHYCFG2_QUAL_MASK_MASK |
1111                        MAC_PHYCFG2_INBAND_ENABLE;
1112
1113         tw32(MAC_PHYCFG2, val);
1114
1115         val = tr32(MAC_PHYCFG1);
1116         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1117                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1118         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1119                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1120                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1121                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1122                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1123         }
1124         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1125                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1126         tw32(MAC_PHYCFG1, val);
1127
1128         val = tr32(MAC_EXT_RGMII_MODE);
1129         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1130                  MAC_RGMII_MODE_RX_QUALITY |
1131                  MAC_RGMII_MODE_RX_ACTIVITY |
1132                  MAC_RGMII_MODE_RX_ENG_DET |
1133                  MAC_RGMII_MODE_TX_ENABLE |
1134                  MAC_RGMII_MODE_TX_LOWPWR |
1135                  MAC_RGMII_MODE_TX_RESET);
1136         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1137                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1138                         val |= MAC_RGMII_MODE_RX_INT_B |
1139                                MAC_RGMII_MODE_RX_QUALITY |
1140                                MAC_RGMII_MODE_RX_ACTIVITY |
1141                                MAC_RGMII_MODE_RX_ENG_DET;
1142                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1143                         val |= MAC_RGMII_MODE_TX_ENABLE |
1144                                MAC_RGMII_MODE_TX_LOWPWR |
1145                                MAC_RGMII_MODE_TX_RESET;
1146         }
1147         tw32(MAC_EXT_RGMII_MODE, val);
1148 }
1149
1150 static void tg3_mdio_start(struct tg3 *tp)
1151 {
1152         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1153         tw32_f(MAC_MI_MODE, tp->mi_mode);
1154         udelay(80);
1155
1156         if (tg3_flag(tp, MDIOBUS_INITED) &&
1157             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1158                 tg3_mdio_config_5785(tp);
1159 }
1160
1161 static int tg3_mdio_init(struct tg3 *tp)
1162 {
1163         int i;
1164         u32 reg;
1165         struct phy_device *phydev;
1166
1167         if (tg3_flag(tp, 5717_PLUS)) {
1168                 u32 is_serdes;
1169
1170                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1171
1172                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1173                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1174                 else
1175                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1176                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1177                 if (is_serdes)
1178                         tp->phy_addr += 7;
1179         } else
1180                 tp->phy_addr = TG3_PHY_MII_ADDR;
1181
1182         tg3_mdio_start(tp);
1183
1184         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1185                 return 0;
1186
1187         tp->mdio_bus = mdiobus_alloc();
1188         if (tp->mdio_bus == NULL)
1189                 return -ENOMEM;
1190
1191         tp->mdio_bus->name     = "tg3 mdio bus";
1192         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1193                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1194         tp->mdio_bus->priv     = tp;
1195         tp->mdio_bus->parent   = &tp->pdev->dev;
1196         tp->mdio_bus->read     = &tg3_mdio_read;
1197         tp->mdio_bus->write    = &tg3_mdio_write;
1198         tp->mdio_bus->reset    = &tg3_mdio_reset;
1199         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1200         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1201
1202         for (i = 0; i < PHY_MAX_ADDR; i++)
1203                 tp->mdio_bus->irq[i] = PHY_POLL;
1204
1205         /* The bus registration will look for all the PHYs on the mdio bus.
1206          * Unfortunately, it does not ensure the PHY is powered up before
1207          * accessing the PHY ID registers.  A chip reset is the
1208          * quickest way to bring the device back to an operational state..
1209          */
1210         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1211                 tg3_bmcr_reset(tp);
1212
1213         i = mdiobus_register(tp->mdio_bus);
1214         if (i) {
1215                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1216                 mdiobus_free(tp->mdio_bus);
1217                 return i;
1218         }
1219
1220         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1221
1222         if (!phydev || !phydev->drv) {
1223                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1224                 mdiobus_unregister(tp->mdio_bus);
1225                 mdiobus_free(tp->mdio_bus);
1226                 return -ENODEV;
1227         }
1228
1229         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1230         case PHY_ID_BCM57780:
1231                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1232                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1233                 break;
1234         case PHY_ID_BCM50610:
1235         case PHY_ID_BCM50610M:
1236                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1237                                      PHY_BRCM_RX_REFCLK_UNUSED |
1238                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1239                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1240                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1241                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1242                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1243                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1244                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1245                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1246                 /* fallthru */
1247         case PHY_ID_RTL8211C:
1248                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1249                 break;
1250         case PHY_ID_RTL8201E:
1251         case PHY_ID_BCMAC131:
1252                 phydev->interface = PHY_INTERFACE_MODE_MII;
1253                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1254                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1255                 break;
1256         }
1257
1258         tg3_flag_set(tp, MDIOBUS_INITED);
1259
1260         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1261                 tg3_mdio_config_5785(tp);
1262
1263         return 0;
1264 }
1265
1266 static void tg3_mdio_fini(struct tg3 *tp)
1267 {
1268         if (tg3_flag(tp, MDIOBUS_INITED)) {
1269                 tg3_flag_clear(tp, MDIOBUS_INITED);
1270                 mdiobus_unregister(tp->mdio_bus);
1271                 mdiobus_free(tp->mdio_bus);
1272         }
1273 }
1274
1275 /* tp->lock is held. */
1276 static inline void tg3_generate_fw_event(struct tg3 *tp)
1277 {
1278         u32 val;
1279
1280         val = tr32(GRC_RX_CPU_EVENT);
1281         val |= GRC_RX_CPU_DRIVER_EVENT;
1282         tw32_f(GRC_RX_CPU_EVENT, val);
1283
1284         tp->last_event_jiffies = jiffies;
1285 }
1286
1287 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1288
1289 /* tp->lock is held. */
1290 static void tg3_wait_for_event_ack(struct tg3 *tp)
1291 {
1292         int i;
1293         unsigned int delay_cnt;
1294         long time_remain;
1295
1296         /* If enough time has passed, no wait is necessary. */
1297         time_remain = (long)(tp->last_event_jiffies + 1 +
1298                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1299                       (long)jiffies;
1300         if (time_remain < 0)
1301                 return;
1302
1303         /* Check if we can shorten the wait time. */
1304         delay_cnt = jiffies_to_usecs(time_remain);
1305         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1306                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1307         delay_cnt = (delay_cnt >> 3) + 1;
1308
1309         for (i = 0; i < delay_cnt; i++) {
1310                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1311                         break;
1312                 udelay(8);
1313         }
1314 }
1315
1316 /* tp->lock is held. */
1317 static void tg3_ump_link_report(struct tg3 *tp)
1318 {
1319         u32 reg;
1320         u32 val;
1321
1322         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1323                 return;
1324
1325         tg3_wait_for_event_ack(tp);
1326
1327         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1328
1329         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1330
1331         val = 0;
1332         if (!tg3_readphy(tp, MII_BMCR, &reg))
1333                 val = reg << 16;
1334         if (!tg3_readphy(tp, MII_BMSR, &reg))
1335                 val |= (reg & 0xffff);
1336         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1337
1338         val = 0;
1339         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1340                 val = reg << 16;
1341         if (!tg3_readphy(tp, MII_LPA, &reg))
1342                 val |= (reg & 0xffff);
1343         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1344
1345         val = 0;
1346         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1347                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1348                         val = reg << 16;
1349                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1350                         val |= (reg & 0xffff);
1351         }
1352         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1353
1354         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1355                 val = reg << 16;
1356         else
1357                 val = 0;
1358         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1359
1360         tg3_generate_fw_event(tp);
1361 }
1362
1363 static void tg3_link_report(struct tg3 *tp)
1364 {
1365         if (!netif_carrier_ok(tp->dev)) {
1366                 netif_info(tp, link, tp->dev, "Link is down\n");
1367                 tg3_ump_link_report(tp);
1368         } else if (netif_msg_link(tp)) {
1369                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1370                             (tp->link_config.active_speed == SPEED_1000 ?
1371                              1000 :
1372                              (tp->link_config.active_speed == SPEED_100 ?
1373                               100 : 10)),
1374                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1375                              "full" : "half"));
1376
1377                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1378                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1379                             "on" : "off",
1380                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1381                             "on" : "off");
1382
1383                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1384                         netdev_info(tp->dev, "EEE is %s\n",
1385                                     tp->setlpicnt ? "enabled" : "disabled");
1386
1387                 tg3_ump_link_report(tp);
1388         }
1389 }
1390
1391 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1392 {
1393         u16 miireg;
1394
1395         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1396                 miireg = ADVERTISE_PAUSE_CAP;
1397         else if (flow_ctrl & FLOW_CTRL_TX)
1398                 miireg = ADVERTISE_PAUSE_ASYM;
1399         else if (flow_ctrl & FLOW_CTRL_RX)
1400                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1401         else
1402                 miireg = 0;
1403
1404         return miireg;
1405 }
1406
1407 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1408 {
1409         u16 miireg;
1410
1411         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1412                 miireg = ADVERTISE_1000XPAUSE;
1413         else if (flow_ctrl & FLOW_CTRL_TX)
1414                 miireg = ADVERTISE_1000XPSE_ASYM;
1415         else if (flow_ctrl & FLOW_CTRL_RX)
1416                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1417         else
1418                 miireg = 0;
1419
1420         return miireg;
1421 }
1422
1423 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1424 {
1425         u8 cap = 0;
1426
1427         if (lcladv & ADVERTISE_1000XPAUSE) {
1428                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1429                         if (rmtadv & LPA_1000XPAUSE)
1430                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1431                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1432                                 cap = FLOW_CTRL_RX;
1433                 } else {
1434                         if (rmtadv & LPA_1000XPAUSE)
1435                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1436                 }
1437         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1438                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1439                         cap = FLOW_CTRL_TX;
1440         }
1441
1442         return cap;
1443 }
1444
1445 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1446 {
1447         u8 autoneg;
1448         u8 flowctrl = 0;
1449         u32 old_rx_mode = tp->rx_mode;
1450         u32 old_tx_mode = tp->tx_mode;
1451
1452         if (tg3_flag(tp, USE_PHYLIB))
1453                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1454         else
1455                 autoneg = tp->link_config.autoneg;
1456
1457         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1458                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1459                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1460                 else
1461                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1462         } else
1463                 flowctrl = tp->link_config.flowctrl;
1464
1465         tp->link_config.active_flowctrl = flowctrl;
1466
1467         if (flowctrl & FLOW_CTRL_RX)
1468                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1469         else
1470                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1471
1472         if (old_rx_mode != tp->rx_mode)
1473                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1474
1475         if (flowctrl & FLOW_CTRL_TX)
1476                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1477         else
1478                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1479
1480         if (old_tx_mode != tp->tx_mode)
1481                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1482 }
1483
1484 static void tg3_adjust_link(struct net_device *dev)
1485 {
1486         u8 oldflowctrl, linkmesg = 0;
1487         u32 mac_mode, lcl_adv, rmt_adv;
1488         struct tg3 *tp = netdev_priv(dev);
1489         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1490
1491         spin_lock_bh(&tp->lock);
1492
1493         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1494                                     MAC_MODE_HALF_DUPLEX);
1495
1496         oldflowctrl = tp->link_config.active_flowctrl;
1497
1498         if (phydev->link) {
1499                 lcl_adv = 0;
1500                 rmt_adv = 0;
1501
1502                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1503                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1504                 else if (phydev->speed == SPEED_1000 ||
1505                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1506                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1507                 else
1508                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1509
1510                 if (phydev->duplex == DUPLEX_HALF)
1511                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1512                 else {
1513                         lcl_adv = tg3_advert_flowctrl_1000T(
1514                                   tp->link_config.flowctrl);
1515
1516                         if (phydev->pause)
1517                                 rmt_adv = LPA_PAUSE_CAP;
1518                         if (phydev->asym_pause)
1519                                 rmt_adv |= LPA_PAUSE_ASYM;
1520                 }
1521
1522                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1523         } else
1524                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1525
1526         if (mac_mode != tp->mac_mode) {
1527                 tp->mac_mode = mac_mode;
1528                 tw32_f(MAC_MODE, tp->mac_mode);
1529                 udelay(40);
1530         }
1531
1532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1533                 if (phydev->speed == SPEED_10)
1534                         tw32(MAC_MI_STAT,
1535                              MAC_MI_STAT_10MBPS_MODE |
1536                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1537                 else
1538                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1539         }
1540
1541         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1542                 tw32(MAC_TX_LENGTHS,
1543                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1544                       (6 << TX_LENGTHS_IPG_SHIFT) |
1545                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1546         else
1547                 tw32(MAC_TX_LENGTHS,
1548                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1549                       (6 << TX_LENGTHS_IPG_SHIFT) |
1550                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1551
1552         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1553             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1554             phydev->speed != tp->link_config.active_speed ||
1555             phydev->duplex != tp->link_config.active_duplex ||
1556             oldflowctrl != tp->link_config.active_flowctrl)
1557                 linkmesg = 1;
1558
1559         tp->link_config.active_speed = phydev->speed;
1560         tp->link_config.active_duplex = phydev->duplex;
1561
1562         spin_unlock_bh(&tp->lock);
1563
1564         if (linkmesg)
1565                 tg3_link_report(tp);
1566 }
1567
1568 static int tg3_phy_init(struct tg3 *tp)
1569 {
1570         struct phy_device *phydev;
1571
1572         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1573                 return 0;
1574
1575         /* Bring the PHY back to a known state. */
1576         tg3_bmcr_reset(tp);
1577
1578         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1579
1580         /* Attach the MAC to the PHY. */
1581         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1582                              phydev->dev_flags, phydev->interface);
1583         if (IS_ERR(phydev)) {
1584                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1585                 return PTR_ERR(phydev);
1586         }
1587
1588         /* Mask with MAC supported features. */
1589         switch (phydev->interface) {
1590         case PHY_INTERFACE_MODE_GMII:
1591         case PHY_INTERFACE_MODE_RGMII:
1592                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1593                         phydev->supported &= (PHY_GBIT_FEATURES |
1594                                               SUPPORTED_Pause |
1595                                               SUPPORTED_Asym_Pause);
1596                         break;
1597                 }
1598                 /* fallthru */
1599         case PHY_INTERFACE_MODE_MII:
1600                 phydev->supported &= (PHY_BASIC_FEATURES |
1601                                       SUPPORTED_Pause |
1602                                       SUPPORTED_Asym_Pause);
1603                 break;
1604         default:
1605                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1606                 return -EINVAL;
1607         }
1608
1609         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1610
1611         phydev->advertising = phydev->supported;
1612
1613         return 0;
1614 }
1615
1616 static void tg3_phy_start(struct tg3 *tp)
1617 {
1618         struct phy_device *phydev;
1619
1620         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1621                 return;
1622
1623         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1624
1625         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1626                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1627                 phydev->speed = tp->link_config.orig_speed;
1628                 phydev->duplex = tp->link_config.orig_duplex;
1629                 phydev->autoneg = tp->link_config.orig_autoneg;
1630                 phydev->advertising = tp->link_config.orig_advertising;
1631         }
1632
1633         phy_start(phydev);
1634
1635         phy_start_aneg(phydev);
1636 }
1637
1638 static void tg3_phy_stop(struct tg3 *tp)
1639 {
1640         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1641                 return;
1642
1643         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1644 }
1645
1646 static void tg3_phy_fini(struct tg3 *tp)
1647 {
1648         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1649                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1650                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1651         }
1652 }
1653
1654 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1655 {
1656         u32 phytest;
1657
1658         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1659                 u32 phy;
1660
1661                 tg3_writephy(tp, MII_TG3_FET_TEST,
1662                              phytest | MII_TG3_FET_SHADOW_EN);
1663                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1664                         if (enable)
1665                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1666                         else
1667                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1668                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1669                 }
1670                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1671         }
1672 }
1673
1674 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1675 {
1676         u32 reg;
1677
1678         if (!tg3_flag(tp, 5705_PLUS) ||
1679             (tg3_flag(tp, 5717_PLUS) &&
1680              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1681                 return;
1682
1683         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1684                 tg3_phy_fet_toggle_apd(tp, enable);
1685                 return;
1686         }
1687
1688         reg = MII_TG3_MISC_SHDW_WREN |
1689               MII_TG3_MISC_SHDW_SCR5_SEL |
1690               MII_TG3_MISC_SHDW_SCR5_LPED |
1691               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1692               MII_TG3_MISC_SHDW_SCR5_SDTL |
1693               MII_TG3_MISC_SHDW_SCR5_C125OE;
1694         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1695                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1696
1697         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1698
1699
1700         reg = MII_TG3_MISC_SHDW_WREN |
1701               MII_TG3_MISC_SHDW_APD_SEL |
1702               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1703         if (enable)
1704                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1705
1706         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1707 }
1708
1709 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1710 {
1711         u32 phy;
1712
1713         if (!tg3_flag(tp, 5705_PLUS) ||
1714             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1715                 return;
1716
1717         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1718                 u32 ephy;
1719
1720                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1721                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1722
1723                         tg3_writephy(tp, MII_TG3_FET_TEST,
1724                                      ephy | MII_TG3_FET_SHADOW_EN);
1725                         if (!tg3_readphy(tp, reg, &phy)) {
1726                                 if (enable)
1727                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1728                                 else
1729                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1730                                 tg3_writephy(tp, reg, phy);
1731                         }
1732                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1733                 }
1734         } else {
1735                 int ret;
1736
1737                 ret = tg3_phy_auxctl_read(tp,
1738                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1739                 if (!ret) {
1740                         if (enable)
1741                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1742                         else
1743                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1744                         tg3_phy_auxctl_write(tp,
1745                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1746                 }
1747         }
1748 }
1749
1750 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1751 {
1752         int ret;
1753         u32 val;
1754
1755         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1756                 return;
1757
1758         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1759         if (!ret)
1760                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1761                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1762 }
1763
1764 static void tg3_phy_apply_otp(struct tg3 *tp)
1765 {
1766         u32 otp, phy;
1767
1768         if (!tp->phy_otp)
1769                 return;
1770
1771         otp = tp->phy_otp;
1772
1773         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1774                 return;
1775
1776         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1777         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1778         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1779
1780         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1781               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1782         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1783
1784         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1785         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1786         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1787
1788         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1789         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1790
1791         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1792         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1793
1794         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1795               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1796         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1797
1798         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1799 }
1800
1801 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1802 {
1803         u32 val;
1804
1805         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1806                 return;
1807
1808         tp->setlpicnt = 0;
1809
1810         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1811             current_link_up == 1 &&
1812             tp->link_config.active_duplex == DUPLEX_FULL &&
1813             (tp->link_config.active_speed == SPEED_100 ||
1814              tp->link_config.active_speed == SPEED_1000)) {
1815                 u32 eeectl;
1816
1817                 if (tp->link_config.active_speed == SPEED_1000)
1818                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1819                 else
1820                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1821
1822                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1823
1824                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1825                                   TG3_CL45_D7_EEERES_STAT, &val);
1826
1827                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1828                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1829                         tp->setlpicnt = 2;
1830         }
1831
1832         if (!tp->setlpicnt) {
1833                 val = tr32(TG3_CPMU_EEE_MODE);
1834                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1835         }
1836 }
1837
1838 static void tg3_phy_eee_enable(struct tg3 *tp)
1839 {
1840         u32 val;
1841
1842         if (tp->link_config.active_speed == SPEED_1000 &&
1843             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1844              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1846             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1847                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1848                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1849         }
1850
1851         val = tr32(TG3_CPMU_EEE_MODE);
1852         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1853 }
1854
1855 static int tg3_wait_macro_done(struct tg3 *tp)
1856 {
1857         int limit = 100;
1858
1859         while (limit--) {
1860                 u32 tmp32;
1861
1862                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1863                         if ((tmp32 & 0x1000) == 0)
1864                                 break;
1865                 }
1866         }
1867         if (limit < 0)
1868                 return -EBUSY;
1869
1870         return 0;
1871 }
1872
1873 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1874 {
1875         static const u32 test_pat[4][6] = {
1876         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1877         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1878         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1879         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1880         };
1881         int chan;
1882
1883         for (chan = 0; chan < 4; chan++) {
1884                 int i;
1885
1886                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1887                              (chan * 0x2000) | 0x0200);
1888                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1889
1890                 for (i = 0; i < 6; i++)
1891                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1892                                      test_pat[chan][i]);
1893
1894                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1895                 if (tg3_wait_macro_done(tp)) {
1896                         *resetp = 1;
1897                         return -EBUSY;
1898                 }
1899
1900                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1901                              (chan * 0x2000) | 0x0200);
1902                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1903                 if (tg3_wait_macro_done(tp)) {
1904                         *resetp = 1;
1905                         return -EBUSY;
1906                 }
1907
1908                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1909                 if (tg3_wait_macro_done(tp)) {
1910                         *resetp = 1;
1911                         return -EBUSY;
1912                 }
1913
1914                 for (i = 0; i < 6; i += 2) {
1915                         u32 low, high;
1916
1917                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1918                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1919                             tg3_wait_macro_done(tp)) {
1920                                 *resetp = 1;
1921                                 return -EBUSY;
1922                         }
1923                         low &= 0x7fff;
1924                         high &= 0x000f;
1925                         if (low != test_pat[chan][i] ||
1926                             high != test_pat[chan][i+1]) {
1927                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1928                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1929                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1930
1931                                 return -EBUSY;
1932                         }
1933                 }
1934         }
1935
1936         return 0;
1937 }
1938
1939 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1940 {
1941         int chan;
1942
1943         for (chan = 0; chan < 4; chan++) {
1944                 int i;
1945
1946                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1947                              (chan * 0x2000) | 0x0200);
1948                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1949                 for (i = 0; i < 6; i++)
1950                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1951                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1952                 if (tg3_wait_macro_done(tp))
1953                         return -EBUSY;
1954         }
1955
1956         return 0;
1957 }
1958
1959 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1960 {
1961         u32 reg32, phy9_orig;
1962         int retries, do_phy_reset, err;
1963
1964         retries = 10;
1965         do_phy_reset = 1;
1966         do {
1967                 if (do_phy_reset) {
1968                         err = tg3_bmcr_reset(tp);
1969                         if (err)
1970                                 return err;
1971                         do_phy_reset = 0;
1972                 }
1973
1974                 /* Disable transmitter and interrupt.  */
1975                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1976                         continue;
1977
1978                 reg32 |= 0x3000;
1979                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1980
1981                 /* Set full-duplex, 1000 mbps.  */
1982                 tg3_writephy(tp, MII_BMCR,
1983                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1984
1985                 /* Set to master mode.  */
1986                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1987                         continue;
1988
1989                 tg3_writephy(tp, MII_TG3_CTRL,
1990                              (MII_TG3_CTRL_AS_MASTER |
1991                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1992
1993                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1994                 if (err)
1995                         return err;
1996
1997                 /* Block the PHY control access.  */
1998                 tg3_phydsp_write(tp, 0x8005, 0x0800);
1999
2000                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2001                 if (!err)
2002                         break;
2003         } while (--retries);
2004
2005         err = tg3_phy_reset_chanpat(tp);
2006         if (err)
2007                 return err;
2008
2009         tg3_phydsp_write(tp, 0x8005, 0x0000);
2010
2011         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2012         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2013
2014         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2015
2016         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2017
2018         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2019                 reg32 &= ~0x3000;
2020                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2021         } else if (!err)
2022                 err = -EBUSY;
2023
2024         return err;
2025 }
2026
2027 /* This will reset the tigon3 PHY if there is no valid
2028  * link unless the FORCE argument is non-zero.
2029  */
2030 static int tg3_phy_reset(struct tg3 *tp)
2031 {
2032         u32 val, cpmuctrl;
2033         int err;
2034
2035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2036                 val = tr32(GRC_MISC_CFG);
2037                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2038                 udelay(40);
2039         }
2040         err  = tg3_readphy(tp, MII_BMSR, &val);
2041         err |= tg3_readphy(tp, MII_BMSR, &val);
2042         if (err != 0)
2043                 return -EBUSY;
2044
2045         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2046                 netif_carrier_off(tp->dev);
2047                 tg3_link_report(tp);
2048         }
2049
2050         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2051             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2053                 err = tg3_phy_reset_5703_4_5(tp);
2054                 if (err)
2055                         return err;
2056                 goto out;
2057         }
2058
2059         cpmuctrl = 0;
2060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2061             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2062                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2063                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2064                         tw32(TG3_CPMU_CTRL,
2065                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2066         }
2067
2068         err = tg3_bmcr_reset(tp);
2069         if (err)
2070                 return err;
2071
2072         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2073                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2074                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2075
2076                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2077         }
2078
2079         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2080             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2081                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2082                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2083                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2084                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2085                         udelay(40);
2086                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2087                 }
2088         }
2089
2090         if (tg3_flag(tp, 5717_PLUS) &&
2091             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2092                 return 0;
2093
2094         tg3_phy_apply_otp(tp);
2095
2096         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2097                 tg3_phy_toggle_apd(tp, true);
2098         else
2099                 tg3_phy_toggle_apd(tp, false);
2100
2101 out:
2102         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2103             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2104                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2105                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2106                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2107         }
2108
2109         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2110                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2111                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112         }
2113
2114         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2115                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2116                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2117                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2118                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2119                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2120                 }
2121         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2122                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2123                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2124                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2125                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2126                                 tg3_writephy(tp, MII_TG3_TEST1,
2127                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2128                         } else
2129                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2130
2131                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2132                 }
2133         }
2134
2135         /* Set Extended packet length bit (bit 14) on all chips that */
2136         /* support jumbo frames */
2137         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2138                 /* Cannot do read-modify-write on 5401 */
2139                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2140         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2141                 /* Set bit 14 with read-modify-write to preserve other bits */
2142                 err = tg3_phy_auxctl_read(tp,
2143                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2144                 if (!err)
2145                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2146                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2147         }
2148
2149         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2150          * jumbo frames transmission.
2151          */
2152         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2153                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2154                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2155                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2156         }
2157
2158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2159                 /* adjust output voltage */
2160                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2161         }
2162
2163         tg3_phy_toggle_automdix(tp, 1);
2164         tg3_phy_set_wirespeed(tp);
2165         return 0;
2166 }
2167
2168 static void tg3_frob_aux_power(struct tg3 *tp)
2169 {
2170         bool need_vaux = false;
2171
2172         /* The GPIOs do something completely different on 57765. */
2173         if (!tg3_flag(tp, IS_NIC) ||
2174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2176                 return;
2177
2178         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2179              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2181              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2182             tp->pdev_peer != tp->pdev) {
2183                 struct net_device *dev_peer;
2184
2185                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2186
2187                 /* remove_one() may have been run on the peer. */
2188                 if (dev_peer) {
2189                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2190
2191                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2192                                 return;
2193
2194                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2195                             tg3_flag(tp_peer, ENABLE_ASF))
2196                                 need_vaux = true;
2197                 }
2198         }
2199
2200         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2201                 need_vaux = true;
2202
2203         if (need_vaux) {
2204                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2205                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2206                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2207                                     (GRC_LCLCTRL_GPIO_OE0 |
2208                                      GRC_LCLCTRL_GPIO_OE1 |
2209                                      GRC_LCLCTRL_GPIO_OE2 |
2210                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2211                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2212                                     100);
2213                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2214                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2215                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2216                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2217                                              GRC_LCLCTRL_GPIO_OE1 |
2218                                              GRC_LCLCTRL_GPIO_OE2 |
2219                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2220                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2221                                              tp->grc_local_ctrl;
2222                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2223
2224                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2225                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2226
2227                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2228                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2229                 } else {
2230                         u32 no_gpio2;
2231                         u32 grc_local_ctrl = 0;
2232
2233                         /* Workaround to prevent overdrawing Amps. */
2234                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2235                             ASIC_REV_5714) {
2236                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2237                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2238                                             grc_local_ctrl, 100);
2239                         }
2240
2241                         /* On 5753 and variants, GPIO2 cannot be used. */
2242                         no_gpio2 = tp->nic_sram_data_cfg &
2243                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2244
2245                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2246                                          GRC_LCLCTRL_GPIO_OE1 |
2247                                          GRC_LCLCTRL_GPIO_OE2 |
2248                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2249                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2250                         if (no_gpio2) {
2251                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2252                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2253                         }
2254                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2255                                                     grc_local_ctrl, 100);
2256
2257                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2258
2259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2260                                                     grc_local_ctrl, 100);
2261
2262                         if (!no_gpio2) {
2263                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2264                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2265                                             grc_local_ctrl, 100);
2266                         }
2267                 }
2268         } else {
2269                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2270                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2271                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2272                                     (GRC_LCLCTRL_GPIO_OE1 |
2273                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2274
2275                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2276                                     GRC_LCLCTRL_GPIO_OE1, 100);
2277
2278                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2279                                     (GRC_LCLCTRL_GPIO_OE1 |
2280                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2281                 }
2282         }
2283 }
2284
2285 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2286 {
2287         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2288                 return 1;
2289         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2290                 if (speed != SPEED_10)
2291                         return 1;
2292         } else if (speed == SPEED_10)
2293                 return 1;
2294
2295         return 0;
2296 }
2297
2298 static int tg3_setup_phy(struct tg3 *, int);
2299
2300 #define RESET_KIND_SHUTDOWN     0
2301 #define RESET_KIND_INIT         1
2302 #define RESET_KIND_SUSPEND      2
2303
2304 static void tg3_write_sig_post_reset(struct tg3 *, int);
2305 static int tg3_halt_cpu(struct tg3 *, u32);
2306
2307 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2308 {
2309         u32 val;
2310
2311         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2312                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2313                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2314                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2315
2316                         sg_dig_ctrl |=
2317                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2318                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2319                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2320                 }
2321                 return;
2322         }
2323
2324         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2325                 tg3_bmcr_reset(tp);
2326                 val = tr32(GRC_MISC_CFG);
2327                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2328                 udelay(40);
2329                 return;
2330         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2331                 u32 phytest;
2332                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2333                         u32 phy;
2334
2335                         tg3_writephy(tp, MII_ADVERTISE, 0);
2336                         tg3_writephy(tp, MII_BMCR,
2337                                      BMCR_ANENABLE | BMCR_ANRESTART);
2338
2339                         tg3_writephy(tp, MII_TG3_FET_TEST,
2340                                      phytest | MII_TG3_FET_SHADOW_EN);
2341                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2342                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2343                                 tg3_writephy(tp,
2344                                              MII_TG3_FET_SHDW_AUXMODE4,
2345                                              phy);
2346                         }
2347                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2348                 }
2349                 return;
2350         } else if (do_low_power) {
2351                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2352                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2353
2354                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2355                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2356                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2357                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2358         }
2359
2360         /* The PHY should not be powered down on some chips because
2361          * of bugs.
2362          */
2363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2365             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2366              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2367                 return;
2368
2369         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2370             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2371                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2372                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2373                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2374                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2375         }
2376
2377         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2378 }
2379
2380 /* tp->lock is held. */
2381 static int tg3_nvram_lock(struct tg3 *tp)
2382 {
2383         if (tg3_flag(tp, NVRAM)) {
2384                 int i;
2385
2386                 if (tp->nvram_lock_cnt == 0) {
2387                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2388                         for (i = 0; i < 8000; i++) {
2389                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2390                                         break;
2391                                 udelay(20);
2392                         }
2393                         if (i == 8000) {
2394                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2395                                 return -ENODEV;
2396                         }
2397                 }
2398                 tp->nvram_lock_cnt++;
2399         }
2400         return 0;
2401 }
2402
2403 /* tp->lock is held. */
2404 static void tg3_nvram_unlock(struct tg3 *tp)
2405 {
2406         if (tg3_flag(tp, NVRAM)) {
2407                 if (tp->nvram_lock_cnt > 0)
2408                         tp->nvram_lock_cnt--;
2409                 if (tp->nvram_lock_cnt == 0)
2410                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2411         }
2412 }
2413
2414 /* tp->lock is held. */
2415 static void tg3_enable_nvram_access(struct tg3 *tp)
2416 {
2417         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2418                 u32 nvaccess = tr32(NVRAM_ACCESS);
2419
2420                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2421         }
2422 }
2423
2424 /* tp->lock is held. */
2425 static void tg3_disable_nvram_access(struct tg3 *tp)
2426 {
2427         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2428                 u32 nvaccess = tr32(NVRAM_ACCESS);
2429
2430                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2431         }
2432 }
2433
2434 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2435                                         u32 offset, u32 *val)
2436 {
2437         u32 tmp;
2438         int i;
2439
2440         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2441                 return -EINVAL;
2442
2443         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2444                                         EEPROM_ADDR_DEVID_MASK |
2445                                         EEPROM_ADDR_READ);
2446         tw32(GRC_EEPROM_ADDR,
2447              tmp |
2448              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2449              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2450               EEPROM_ADDR_ADDR_MASK) |
2451              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2452
2453         for (i = 0; i < 1000; i++) {
2454                 tmp = tr32(GRC_EEPROM_ADDR);
2455
2456                 if (tmp & EEPROM_ADDR_COMPLETE)
2457                         break;
2458                 msleep(1);
2459         }
2460         if (!(tmp & EEPROM_ADDR_COMPLETE))
2461                 return -EBUSY;
2462
2463         tmp = tr32(GRC_EEPROM_DATA);
2464
2465         /*
2466          * The data will always be opposite the native endian
2467          * format.  Perform a blind byteswap to compensate.
2468          */
2469         *val = swab32(tmp);
2470
2471         return 0;
2472 }
2473
2474 #define NVRAM_CMD_TIMEOUT 10000
2475
2476 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2477 {
2478         int i;
2479
2480         tw32(NVRAM_CMD, nvram_cmd);
2481         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2482                 udelay(10);
2483                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2484                         udelay(10);
2485                         break;
2486                 }
2487         }
2488
2489         if (i == NVRAM_CMD_TIMEOUT)
2490                 return -EBUSY;
2491
2492         return 0;
2493 }
2494
2495 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2496 {
2497         if (tg3_flag(tp, NVRAM) &&
2498             tg3_flag(tp, NVRAM_BUFFERED) &&
2499             tg3_flag(tp, FLASH) &&
2500             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2501             (tp->nvram_jedecnum == JEDEC_ATMEL))
2502
2503                 addr = ((addr / tp->nvram_pagesize) <<
2504                         ATMEL_AT45DB0X1B_PAGE_POS) +
2505                        (addr % tp->nvram_pagesize);
2506
2507         return addr;
2508 }
2509
2510 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2511 {
2512         if (tg3_flag(tp, NVRAM) &&
2513             tg3_flag(tp, NVRAM_BUFFERED) &&
2514             tg3_flag(tp, FLASH) &&
2515             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2516             (tp->nvram_jedecnum == JEDEC_ATMEL))
2517
2518                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2519                         tp->nvram_pagesize) +
2520                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2521
2522         return addr;
2523 }
2524
2525 /* NOTE: Data read in from NVRAM is byteswapped according to
2526  * the byteswapping settings for all other register accesses.
2527  * tg3 devices are BE devices, so on a BE machine, the data
2528  * returned will be exactly as it is seen in NVRAM.  On a LE
2529  * machine, the 32-bit value will be byteswapped.
2530  */
2531 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2532 {
2533         int ret;
2534
2535         if (!tg3_flag(tp, NVRAM))
2536                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2537
2538         offset = tg3_nvram_phys_addr(tp, offset);
2539
2540         if (offset > NVRAM_ADDR_MSK)
2541                 return -EINVAL;
2542
2543         ret = tg3_nvram_lock(tp);
2544         if (ret)
2545                 return ret;
2546
2547         tg3_enable_nvram_access(tp);
2548
2549         tw32(NVRAM_ADDR, offset);
2550         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2551                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2552
2553         if (ret == 0)
2554                 *val = tr32(NVRAM_RDDATA);
2555
2556         tg3_disable_nvram_access(tp);
2557
2558         tg3_nvram_unlock(tp);
2559
2560         return ret;
2561 }
2562
2563 /* Ensures NVRAM data is in bytestream format. */
2564 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2565 {
2566         u32 v;
2567         int res = tg3_nvram_read(tp, offset, &v);
2568         if (!res)
2569                 *val = cpu_to_be32(v);
2570         return res;
2571 }
2572
2573 /* tp->lock is held. */
2574 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2575 {
2576         u32 addr_high, addr_low;
2577         int i;
2578
2579         addr_high = ((tp->dev->dev_addr[0] << 8) |
2580                      tp->dev->dev_addr[1]);
2581         addr_low = ((tp->dev->dev_addr[2] << 24) |
2582                     (tp->dev->dev_addr[3] << 16) |
2583                     (tp->dev->dev_addr[4] <<  8) |
2584                     (tp->dev->dev_addr[5] <<  0));
2585         for (i = 0; i < 4; i++) {
2586                 if (i == 1 && skip_mac_1)
2587                         continue;
2588                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2589                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2590         }
2591
2592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2594                 for (i = 0; i < 12; i++) {
2595                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2596                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2597                 }
2598         }
2599
2600         addr_high = (tp->dev->dev_addr[0] +
2601                      tp->dev->dev_addr[1] +
2602                      tp->dev->dev_addr[2] +
2603                      tp->dev->dev_addr[3] +
2604                      tp->dev->dev_addr[4] +
2605                      tp->dev->dev_addr[5]) &
2606                 TX_BACKOFF_SEED_MASK;
2607         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2608 }
2609
2610 static void tg3_enable_register_access(struct tg3 *tp)
2611 {
2612         /*
2613          * Make sure register accesses (indirect or otherwise) will function
2614          * correctly.
2615          */
2616         pci_write_config_dword(tp->pdev,
2617                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2618 }
2619
2620 static int tg3_power_up(struct tg3 *tp)
2621 {
2622         tg3_enable_register_access(tp);
2623
2624         pci_set_power_state(tp->pdev, PCI_D0);
2625
2626         /* Switch out of Vaux if it is a NIC */
2627         if (tg3_flag(tp, IS_NIC))
2628                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2629
2630         return 0;
2631 }
2632
2633 static int tg3_power_down_prepare(struct tg3 *tp)
2634 {
2635         u32 misc_host_ctrl;
2636         bool device_should_wake, do_low_power;
2637
2638         tg3_enable_register_access(tp);
2639
2640         /* Restore the CLKREQ setting. */
2641         if (tg3_flag(tp, CLKREQ_BUG)) {
2642                 u16 lnkctl;
2643
2644                 pci_read_config_word(tp->pdev,
2645                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2646                                      &lnkctl);
2647                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2648                 pci_write_config_word(tp->pdev,
2649                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2650                                       lnkctl);
2651         }
2652
2653         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2654         tw32(TG3PCI_MISC_HOST_CTRL,
2655              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2656
2657         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2658                              tg3_flag(tp, WOL_ENABLE);
2659
2660         if (tg3_flag(tp, USE_PHYLIB)) {
2661                 do_low_power = false;
2662                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2663                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2664                         struct phy_device *phydev;
2665                         u32 phyid, advertising;
2666
2667                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2668
2669                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2670
2671                         tp->link_config.orig_speed = phydev->speed;
2672                         tp->link_config.orig_duplex = phydev->duplex;
2673                         tp->link_config.orig_autoneg = phydev->autoneg;
2674                         tp->link_config.orig_advertising = phydev->advertising;
2675
2676                         advertising = ADVERTISED_TP |
2677                                       ADVERTISED_Pause |
2678                                       ADVERTISED_Autoneg |
2679                                       ADVERTISED_10baseT_Half;
2680
2681                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2682                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2683                                         advertising |=
2684                                                 ADVERTISED_100baseT_Half |
2685                                                 ADVERTISED_100baseT_Full |
2686                                                 ADVERTISED_10baseT_Full;
2687                                 else
2688                                         advertising |= ADVERTISED_10baseT_Full;
2689                         }
2690
2691                         phydev->advertising = advertising;
2692
2693                         phy_start_aneg(phydev);
2694
2695                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2696                         if (phyid != PHY_ID_BCMAC131) {
2697                                 phyid &= PHY_BCM_OUI_MASK;
2698                                 if (phyid == PHY_BCM_OUI_1 ||
2699                                     phyid == PHY_BCM_OUI_2 ||
2700                                     phyid == PHY_BCM_OUI_3)
2701                                         do_low_power = true;
2702                         }
2703                 }
2704         } else {
2705                 do_low_power = true;
2706
2707                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2708                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2709                         tp->link_config.orig_speed = tp->link_config.speed;
2710                         tp->link_config.orig_duplex = tp->link_config.duplex;
2711                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2712                 }
2713
2714                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2715                         tp->link_config.speed = SPEED_10;
2716                         tp->link_config.duplex = DUPLEX_HALF;
2717                         tp->link_config.autoneg = AUTONEG_ENABLE;
2718                         tg3_setup_phy(tp, 0);
2719                 }
2720         }
2721
2722         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2723                 u32 val;
2724
2725                 val = tr32(GRC_VCPU_EXT_CTRL);
2726                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2727         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2728                 int i;
2729                 u32 val;
2730
2731                 for (i = 0; i < 200; i++) {
2732                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2733                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2734                                 break;
2735                         msleep(1);
2736                 }
2737         }
2738         if (tg3_flag(tp, WOL_CAP))
2739                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2740                                                      WOL_DRV_STATE_SHUTDOWN |
2741                                                      WOL_DRV_WOL |
2742                                                      WOL_SET_MAGIC_PKT);
2743
2744         if (device_should_wake) {
2745                 u32 mac_mode;
2746
2747                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2748                         if (do_low_power &&
2749                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2750                                 tg3_phy_auxctl_write(tp,
2751                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2752                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2753                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2754                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2755                                 udelay(40);
2756                         }
2757
2758                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2759                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2760                         else
2761                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2762
2763                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2764                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2765                             ASIC_REV_5700) {
2766                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2767                                              SPEED_100 : SPEED_10;
2768                                 if (tg3_5700_link_polarity(tp, speed))
2769                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2770                                 else
2771                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2772                         }
2773                 } else {
2774                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2775                 }
2776
2777                 if (!tg3_flag(tp, 5750_PLUS))
2778                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2779
2780                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2781                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2782                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2783                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2784
2785                 if (tg3_flag(tp, ENABLE_APE))
2786                         mac_mode |= MAC_MODE_APE_TX_EN |
2787                                     MAC_MODE_APE_RX_EN |
2788                                     MAC_MODE_TDE_ENABLE;
2789
2790                 tw32_f(MAC_MODE, mac_mode);
2791                 udelay(100);
2792
2793                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2794                 udelay(10);
2795         }
2796
2797         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2798             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2799              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2800                 u32 base_val;
2801
2802                 base_val = tp->pci_clock_ctrl;
2803                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2804                              CLOCK_CTRL_TXCLK_DISABLE);
2805
2806                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2807                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2808         } else if (tg3_flag(tp, 5780_CLASS) ||
2809                    tg3_flag(tp, CPMU_PRESENT) ||
2810                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2811                 /* do nothing */
2812         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2813                 u32 newbits1, newbits2;
2814
2815                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2816                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2817                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2818                                     CLOCK_CTRL_TXCLK_DISABLE |
2819                                     CLOCK_CTRL_ALTCLK);
2820                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2821                 } else if (tg3_flag(tp, 5705_PLUS)) {
2822                         newbits1 = CLOCK_CTRL_625_CORE;
2823                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2824                 } else {
2825                         newbits1 = CLOCK_CTRL_ALTCLK;
2826                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2827                 }
2828
2829                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2830                             40);
2831
2832                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2833                             40);
2834
2835                 if (!tg3_flag(tp, 5705_PLUS)) {
2836                         u32 newbits3;
2837
2838                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2839                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2840                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2841                                             CLOCK_CTRL_TXCLK_DISABLE |
2842                                             CLOCK_CTRL_44MHZ_CORE);
2843                         } else {
2844                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2845                         }
2846
2847                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2848                                     tp->pci_clock_ctrl | newbits3, 40);
2849                 }
2850         }
2851
2852         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2853                 tg3_power_down_phy(tp, do_low_power);
2854
2855         tg3_frob_aux_power(tp);
2856
2857         /* Workaround for unstable PLL clock */
2858         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2859             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2860                 u32 val = tr32(0x7d00);
2861
2862                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2863                 tw32(0x7d00, val);
2864                 if (!tg3_flag(tp, ENABLE_ASF)) {
2865                         int err;
2866
2867                         err = tg3_nvram_lock(tp);
2868                         tg3_halt_cpu(tp, RX_CPU_BASE);
2869                         if (!err)
2870                                 tg3_nvram_unlock(tp);
2871                 }
2872         }
2873
2874         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2875
2876         return 0;
2877 }
2878
2879 static void tg3_power_down(struct tg3 *tp)
2880 {
2881         tg3_power_down_prepare(tp);
2882
2883         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2884         pci_set_power_state(tp->pdev, PCI_D3hot);
2885 }
2886
2887 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2888 {
2889         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2890         case MII_TG3_AUX_STAT_10HALF:
2891                 *speed = SPEED_10;
2892                 *duplex = DUPLEX_HALF;
2893                 break;
2894
2895         case MII_TG3_AUX_STAT_10FULL:
2896                 *speed = SPEED_10;
2897                 *duplex = DUPLEX_FULL;
2898                 break;
2899
2900         case MII_TG3_AUX_STAT_100HALF:
2901                 *speed = SPEED_100;
2902                 *duplex = DUPLEX_HALF;
2903                 break;
2904
2905         case MII_TG3_AUX_STAT_100FULL:
2906                 *speed = SPEED_100;
2907                 *duplex = DUPLEX_FULL;
2908                 break;
2909
2910         case MII_TG3_AUX_STAT_1000HALF:
2911                 *speed = SPEED_1000;
2912                 *duplex = DUPLEX_HALF;
2913                 break;
2914
2915         case MII_TG3_AUX_STAT_1000FULL:
2916                 *speed = SPEED_1000;
2917                 *duplex = DUPLEX_FULL;
2918                 break;
2919
2920         default:
2921                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2922                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2923                                  SPEED_10;
2924                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2925                                   DUPLEX_HALF;
2926                         break;
2927                 }
2928                 *speed = SPEED_INVALID;
2929                 *duplex = DUPLEX_INVALID;
2930                 break;
2931         }
2932 }
2933
2934 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2935 {
2936         int err = 0;
2937         u32 val, new_adv;
2938
2939         new_adv = ADVERTISE_CSMA;
2940         if (advertise & ADVERTISED_10baseT_Half)
2941                 new_adv |= ADVERTISE_10HALF;
2942         if (advertise & ADVERTISED_10baseT_Full)
2943                 new_adv |= ADVERTISE_10FULL;
2944         if (advertise & ADVERTISED_100baseT_Half)
2945                 new_adv |= ADVERTISE_100HALF;
2946         if (advertise & ADVERTISED_100baseT_Full)
2947                 new_adv |= ADVERTISE_100FULL;
2948
2949         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2950
2951         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2952         if (err)
2953                 goto done;
2954
2955         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2956                 goto done;
2957
2958         new_adv = 0;
2959         if (advertise & ADVERTISED_1000baseT_Half)
2960                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2961         if (advertise & ADVERTISED_1000baseT_Full)
2962                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2963
2964         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2965             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2966                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2967                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2968
2969         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2970         if (err)
2971                 goto done;
2972
2973         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2974                 goto done;
2975
2976         tw32(TG3_CPMU_EEE_MODE,
2977              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2978
2979         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2980         if (!err) {
2981                 u32 err2;
2982
2983                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2984                 case ASIC_REV_5717:
2985                 case ASIC_REV_57765:
2986                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2987                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2988                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2989                         /* Fall through */
2990                 case ASIC_REV_5719:
2991                         val = MII_TG3_DSP_TAP26_ALNOKO |
2992                               MII_TG3_DSP_TAP26_RMRXSTO |
2993                               MII_TG3_DSP_TAP26_OPCSINPT;
2994                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2995                 }
2996
2997                 val = 0;
2998                 /* Advertise 100-BaseTX EEE ability */
2999                 if (advertise & ADVERTISED_100baseT_Full)
3000                         val |= MDIO_AN_EEE_ADV_100TX;
3001                 /* Advertise 1000-BaseT EEE ability */
3002                 if (advertise & ADVERTISED_1000baseT_Full)
3003                         val |= MDIO_AN_EEE_ADV_1000T;
3004                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3005
3006                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3007                 if (!err)
3008                         err = err2;
3009         }
3010
3011 done:
3012         return err;
3013 }
3014
3015 static void tg3_phy_copper_begin(struct tg3 *tp)
3016 {
3017         u32 new_adv;
3018         int i;
3019
3020         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3021                 new_adv = ADVERTISED_10baseT_Half |
3022                           ADVERTISED_10baseT_Full;
3023                 if (tg3_flag(tp, WOL_SPEED_100MB))
3024                         new_adv |= ADVERTISED_100baseT_Half |
3025                                    ADVERTISED_100baseT_Full;
3026
3027                 tg3_phy_autoneg_cfg(tp, new_adv,
3028                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3029         } else if (tp->link_config.speed == SPEED_INVALID) {
3030                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3031                         tp->link_config.advertising &=
3032                                 ~(ADVERTISED_1000baseT_Half |
3033                                   ADVERTISED_1000baseT_Full);
3034
3035                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3036                                     tp->link_config.flowctrl);
3037         } else {
3038                 /* Asking for a specific link mode. */
3039                 if (tp->link_config.speed == SPEED_1000) {
3040                         if (tp->link_config.duplex == DUPLEX_FULL)
3041                                 new_adv = ADVERTISED_1000baseT_Full;
3042                         else
3043                                 new_adv = ADVERTISED_1000baseT_Half;
3044                 } else if (tp->link_config.speed == SPEED_100) {
3045                         if (tp->link_config.duplex == DUPLEX_FULL)
3046                                 new_adv = ADVERTISED_100baseT_Full;
3047                         else
3048                                 new_adv = ADVERTISED_100baseT_Half;
3049                 } else {
3050                         if (tp->link_config.duplex == DUPLEX_FULL)
3051                                 new_adv = ADVERTISED_10baseT_Full;
3052                         else
3053                                 new_adv = ADVERTISED_10baseT_Half;
3054                 }
3055
3056                 tg3_phy_autoneg_cfg(tp, new_adv,
3057                                     tp->link_config.flowctrl);
3058         }
3059
3060         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3061             tp->link_config.speed != SPEED_INVALID) {
3062                 u32 bmcr, orig_bmcr;
3063
3064                 tp->link_config.active_speed = tp->link_config.speed;
3065                 tp->link_config.active_duplex = tp->link_config.duplex;
3066
3067                 bmcr = 0;
3068                 switch (tp->link_config.speed) {
3069                 default:
3070                 case SPEED_10:
3071                         break;
3072
3073                 case SPEED_100:
3074                         bmcr |= BMCR_SPEED100;
3075                         break;
3076
3077                 case SPEED_1000:
3078                         bmcr |= TG3_BMCR_SPEED1000;
3079                         break;
3080                 }
3081
3082                 if (tp->link_config.duplex == DUPLEX_FULL)
3083                         bmcr |= BMCR_FULLDPLX;
3084
3085                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3086                     (bmcr != orig_bmcr)) {
3087                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3088                         for (i = 0; i < 1500; i++) {
3089                                 u32 tmp;
3090
3091                                 udelay(10);
3092                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3093                                     tg3_readphy(tp, MII_BMSR, &tmp))
3094                                         continue;
3095                                 if (!(tmp & BMSR_LSTATUS)) {
3096                                         udelay(40);
3097                                         break;
3098                                 }
3099                         }
3100                         tg3_writephy(tp, MII_BMCR, bmcr);
3101                         udelay(40);
3102                 }
3103         } else {
3104                 tg3_writephy(tp, MII_BMCR,
3105                              BMCR_ANENABLE | BMCR_ANRESTART);
3106         }
3107 }
3108
3109 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3110 {
3111         int err;
3112
3113         /* Turn off tap power management. */
3114         /* Set Extended packet length bit */
3115         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3116
3117         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3118         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3119         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3120         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3121         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3122
3123         udelay(40);
3124
3125         return err;
3126 }
3127
3128 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3129 {
3130         u32 adv_reg, all_mask = 0;
3131
3132         if (mask & ADVERTISED_10baseT_Half)
3133                 all_mask |= ADVERTISE_10HALF;
3134         if (mask & ADVERTISED_10baseT_Full)
3135                 all_mask |= ADVERTISE_10FULL;
3136         if (mask & ADVERTISED_100baseT_Half)
3137                 all_mask |= ADVERTISE_100HALF;
3138         if (mask & ADVERTISED_100baseT_Full)
3139                 all_mask |= ADVERTISE_100FULL;
3140
3141         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3142                 return 0;
3143
3144         if ((adv_reg & all_mask) != all_mask)
3145                 return 0;
3146         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3147                 u32 tg3_ctrl;
3148
3149                 all_mask = 0;
3150                 if (mask & ADVERTISED_1000baseT_Half)
3151                         all_mask |= ADVERTISE_1000HALF;
3152                 if (mask & ADVERTISED_1000baseT_Full)
3153                         all_mask |= ADVERTISE_1000FULL;
3154
3155                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3156                         return 0;
3157
3158                 if ((tg3_ctrl & all_mask) != all_mask)
3159                         return 0;
3160         }
3161         return 1;
3162 }
3163
3164 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3165 {
3166         u32 curadv, reqadv;
3167
3168         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3169                 return 1;
3170
3171         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3172         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3173
3174         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3175                 if (curadv != reqadv)
3176                         return 0;
3177
3178                 if (tg3_flag(tp, PAUSE_AUTONEG))
3179                         tg3_readphy(tp, MII_LPA, rmtadv);
3180         } else {
3181                 /* Reprogram the advertisement register, even if it
3182                  * does not affect the current link.  If the link
3183                  * gets renegotiated in the future, we can save an
3184                  * additional renegotiation cycle by advertising
3185                  * it correctly in the first place.
3186                  */
3187                 if (curadv != reqadv) {
3188                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3189                                      ADVERTISE_PAUSE_ASYM);
3190                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3191                 }
3192         }
3193
3194         return 1;
3195 }
3196
3197 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3198 {
3199         int current_link_up;
3200         u32 bmsr, val;
3201         u32 lcl_adv, rmt_adv;
3202         u16 current_speed;
3203         u8 current_duplex;
3204         int i, err;
3205
3206         tw32(MAC_EVENT, 0);
3207
3208         tw32_f(MAC_STATUS,
3209              (MAC_STATUS_SYNC_CHANGED |
3210               MAC_STATUS_CFG_CHANGED |
3211               MAC_STATUS_MI_COMPLETION |
3212               MAC_STATUS_LNKSTATE_CHANGED));
3213         udelay(40);
3214
3215         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3216                 tw32_f(MAC_MI_MODE,
3217                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3218                 udelay(80);
3219         }
3220
3221         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3222
3223         /* Some third-party PHYs need to be reset on link going
3224          * down.
3225          */
3226         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3227              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3228              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3229             netif_carrier_ok(tp->dev)) {
3230                 tg3_readphy(tp, MII_BMSR, &bmsr);
3231                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3232                     !(bmsr & BMSR_LSTATUS))
3233                         force_reset = 1;
3234         }
3235         if (force_reset)
3236                 tg3_phy_reset(tp);
3237
3238         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3239                 tg3_readphy(tp, MII_BMSR, &bmsr);
3240                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3241                     !tg3_flag(tp, INIT_COMPLETE))
3242                         bmsr = 0;
3243
3244                 if (!(bmsr & BMSR_LSTATUS)) {
3245                         err = tg3_init_5401phy_dsp(tp);
3246                         if (err)
3247                                 return err;
3248
3249                         tg3_readphy(tp, MII_BMSR, &bmsr);
3250                         for (i = 0; i < 1000; i++) {
3251                                 udelay(10);
3252                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3253                                     (bmsr & BMSR_LSTATUS)) {
3254                                         udelay(40);
3255                                         break;
3256                                 }
3257                         }
3258
3259                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3260                             TG3_PHY_REV_BCM5401_B0 &&
3261                             !(bmsr & BMSR_LSTATUS) &&
3262                             tp->link_config.active_speed == SPEED_1000) {
3263                                 err = tg3_phy_reset(tp);
3264                                 if (!err)
3265                                         err = tg3_init_5401phy_dsp(tp);
3266                                 if (err)
3267                                         return err;
3268                         }
3269                 }
3270         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3271                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3272                 /* 5701 {A0,B0} CRC bug workaround */
3273                 tg3_writephy(tp, 0x15, 0x0a75);
3274                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3276                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3277         }
3278
3279         /* Clear pending interrupts... */
3280         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3281         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282
3283         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3284                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3285         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3286                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3287
3288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3290                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3291                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3292                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3293                 else
3294                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3295         }
3296
3297         current_link_up = 0;
3298         current_speed = SPEED_INVALID;
3299         current_duplex = DUPLEX_INVALID;
3300
3301         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3302                 err = tg3_phy_auxctl_read(tp,
3303                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3304                                           &val);
3305                 if (!err && !(val & (1 << 10))) {
3306                         tg3_phy_auxctl_write(tp,
3307                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3308                                              val | (1 << 10));
3309                         goto relink;
3310                 }
3311         }
3312
3313         bmsr = 0;
3314         for (i = 0; i < 100; i++) {
3315                 tg3_readphy(tp, MII_BMSR, &bmsr);
3316                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3317                     (bmsr & BMSR_LSTATUS))
3318                         break;
3319                 udelay(40);
3320         }
3321
3322         if (bmsr & BMSR_LSTATUS) {
3323                 u32 aux_stat, bmcr;
3324
3325                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3326                 for (i = 0; i < 2000; i++) {
3327                         udelay(10);
3328                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3329                             aux_stat)
3330                                 break;
3331                 }
3332
3333                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3334                                              &current_speed,
3335                                              &current_duplex);
3336
3337                 bmcr = 0;
3338                 for (i = 0; i < 200; i++) {
3339                         tg3_readphy(tp, MII_BMCR, &bmcr);
3340                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3341                                 continue;
3342                         if (bmcr && bmcr != 0x7fff)
3343                                 break;
3344                         udelay(10);
3345                 }
3346
3347                 lcl_adv = 0;
3348                 rmt_adv = 0;
3349
3350                 tp->link_config.active_speed = current_speed;
3351                 tp->link_config.active_duplex = current_duplex;
3352
3353                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3354                         if ((bmcr & BMCR_ANENABLE) &&
3355                             tg3_copper_is_advertising_all(tp,
3356                                                 tp->link_config.advertising)) {
3357                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3358                                                                   &rmt_adv))
3359                                         current_link_up = 1;
3360                         }
3361                 } else {
3362                         if (!(bmcr & BMCR_ANENABLE) &&
3363                             tp->link_config.speed == current_speed &&
3364                             tp->link_config.duplex == current_duplex &&
3365                             tp->link_config.flowctrl ==
3366                             tp->link_config.active_flowctrl) {
3367                                 current_link_up = 1;
3368                         }
3369                 }
3370
3371                 if (current_link_up == 1 &&
3372                     tp->link_config.active_duplex == DUPLEX_FULL)
3373                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3374         }
3375
3376 relink:
3377         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3378                 tg3_phy_copper_begin(tp);
3379
3380                 tg3_readphy(tp, MII_BMSR, &bmsr);
3381                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3382                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3383                         current_link_up = 1;
3384         }
3385
3386         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3387         if (current_link_up == 1) {
3388                 if (tp->link_config.active_speed == SPEED_100 ||
3389                     tp->link_config.active_speed == SPEED_10)
3390                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3391                 else
3392                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3393         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3394                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3395         else
3396                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3397
3398         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3399         if (tp->link_config.active_duplex == DUPLEX_HALF)
3400                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3401
3402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3403                 if (current_link_up == 1 &&
3404                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3405                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3406                 else
3407                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3408         }
3409
3410         /* ??? Without this setting Netgear GA302T PHY does not
3411          * ??? send/receive packets...
3412          */
3413         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3414             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3415                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3416                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3417                 udelay(80);
3418         }
3419
3420         tw32_f(MAC_MODE, tp->mac_mode);
3421         udelay(40);
3422
3423         tg3_phy_eee_adjust(tp, current_link_up);
3424
3425         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3426                 /* Polled via timer. */
3427                 tw32_f(MAC_EVENT, 0);
3428         } else {
3429                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3430         }
3431         udelay(40);
3432
3433         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3434             current_link_up == 1 &&
3435             tp->link_config.active_speed == SPEED_1000 &&
3436             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3437                 udelay(120);
3438                 tw32_f(MAC_STATUS,
3439                      (MAC_STATUS_SYNC_CHANGED |
3440                       MAC_STATUS_CFG_CHANGED));
3441                 udelay(40);
3442                 tg3_write_mem(tp,
3443                               NIC_SRAM_FIRMWARE_MBOX,
3444                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3445         }
3446
3447         /* Prevent send BD corruption. */
3448         if (tg3_flag(tp, CLKREQ_BUG)) {
3449                 u16 oldlnkctl, newlnkctl;
3450
3451                 pci_read_config_word(tp->pdev,
3452                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3453                                      &oldlnkctl);
3454                 if (tp->link_config.active_speed == SPEED_100 ||
3455                     tp->link_config.active_speed == SPEED_10)
3456                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3457                 else
3458                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3459                 if (newlnkctl != oldlnkctl)
3460                         pci_write_config_word(tp->pdev,
3461                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3462                                               newlnkctl);
3463         }
3464
3465         if (current_link_up != netif_carrier_ok(tp->dev)) {
3466                 if (current_link_up)
3467                         netif_carrier_on(tp->dev);
3468                 else
3469                         netif_carrier_off(tp->dev);
3470                 tg3_link_report(tp);
3471         }
3472
3473         return 0;
3474 }
3475
3476 struct tg3_fiber_aneginfo {
3477         int state;
3478 #define ANEG_STATE_UNKNOWN              0
3479 #define ANEG_STATE_AN_ENABLE            1
3480 #define ANEG_STATE_RESTART_INIT         2
3481 #define ANEG_STATE_RESTART              3
3482 #define ANEG_STATE_DISABLE_LINK_OK      4
3483 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3484 #define ANEG_STATE_ABILITY_DETECT       6
3485 #define ANEG_STATE_ACK_DETECT_INIT      7
3486 #define ANEG_STATE_ACK_DETECT           8
3487 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3488 #define ANEG_STATE_COMPLETE_ACK         10
3489 #define ANEG_STATE_IDLE_DETECT_INIT     11
3490 #define ANEG_STATE_IDLE_DETECT          12
3491 #define ANEG_STATE_LINK_OK              13
3492 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3493 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3494
3495         u32 flags;
3496 #define MR_AN_ENABLE            0x00000001
3497 #define MR_RESTART_AN           0x00000002
3498 #define MR_AN_COMPLETE          0x00000004
3499 #define MR_PAGE_RX              0x00000008
3500 #define MR_NP_LOADED            0x00000010
3501 #define MR_TOGGLE_TX            0x00000020
3502 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3503 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3504 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3505 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3506 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3507 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3508 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3509 #define MR_TOGGLE_RX            0x00002000
3510 #define MR_NP_RX                0x00004000
3511
3512 #define MR_LINK_OK              0x80000000
3513
3514         unsigned long link_time, cur_time;
3515
3516         u32 ability_match_cfg;
3517         int ability_match_count;
3518
3519         char ability_match, idle_match, ack_match;
3520
3521         u32 txconfig, rxconfig;
3522 #define ANEG_CFG_NP             0x00000080
3523 #define ANEG_CFG_ACK            0x00000040
3524 #define ANEG_CFG_RF2            0x00000020
3525 #define ANEG_CFG_RF1            0x00000010
3526 #define ANEG_CFG_PS2            0x00000001
3527 #define ANEG_CFG_PS1            0x00008000
3528 #define ANEG_CFG_HD             0x00004000
3529 #define ANEG_CFG_FD             0x00002000
3530 #define ANEG_CFG_INVAL          0x00001f06
3531
3532 };
3533 #define ANEG_OK         0
3534 #define ANEG_DONE       1
3535 #define ANEG_TIMER_ENAB 2
3536 #define ANEG_FAILED     -1
3537
3538 #define ANEG_STATE_SETTLE_TIME  10000
3539
3540 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3541                                    struct tg3_fiber_aneginfo *ap)
3542 {
3543         u16 flowctrl;
3544         unsigned long delta;
3545         u32 rx_cfg_reg;
3546         int ret;
3547
3548         if (ap->state == ANEG_STATE_UNKNOWN) {
3549                 ap->rxconfig = 0;
3550                 ap->link_time = 0;
3551                 ap->cur_time = 0;
3552                 ap->ability_match_cfg = 0;
3553                 ap->ability_match_count = 0;
3554                 ap->ability_match = 0;
3555                 ap->idle_match = 0;
3556                 ap->ack_match = 0;
3557         }
3558         ap->cur_time++;
3559
3560         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3561                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3562
3563                 if (rx_cfg_reg != ap->ability_match_cfg) {
3564                         ap->ability_match_cfg = rx_cfg_reg;
3565                         ap->ability_match = 0;
3566                         ap->ability_match_count = 0;
3567                 } else {
3568                         if (++ap->ability_match_count > 1) {
3569                                 ap->ability_match = 1;
3570                                 ap->ability_match_cfg = rx_cfg_reg;
3571                         }
3572                 }
3573                 if (rx_cfg_reg & ANEG_CFG_ACK)
3574                         ap->ack_match = 1;
3575                 else
3576                         ap->ack_match = 0;
3577
3578                 ap->idle_match = 0;
3579         } else {
3580                 ap->idle_match = 1;
3581                 ap->ability_match_cfg = 0;
3582                 ap->ability_match_count = 0;
3583                 ap->ability_match = 0;
3584                 ap->ack_match = 0;
3585
3586                 rx_cfg_reg = 0;
3587         }
3588
3589         ap->rxconfig = rx_cfg_reg;
3590         ret = ANEG_OK;
3591
3592         switch (ap->state) {
3593         case ANEG_STATE_UNKNOWN:
3594                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3595                         ap->state = ANEG_STATE_AN_ENABLE;
3596
3597                 /* fallthru */
3598         case ANEG_STATE_AN_ENABLE:
3599                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3600                 if (ap->flags & MR_AN_ENABLE) {
3601                         ap->link_time = 0;
3602                         ap->cur_time = 0;
3603                         ap->ability_match_cfg = 0;
3604                         ap->ability_match_count = 0;
3605                         ap->ability_match = 0;
3606                         ap->idle_match = 0;
3607                         ap->ack_match = 0;
3608
3609                         ap->state = ANEG_STATE_RESTART_INIT;
3610                 } else {
3611                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3612                 }
3613                 break;
3614
3615         case ANEG_STATE_RESTART_INIT:
3616                 ap->link_time = ap->cur_time;
3617                 ap->flags &= ~(MR_NP_LOADED);
3618                 ap->txconfig = 0;
3619                 tw32(MAC_TX_AUTO_NEG, 0);
3620                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3621                 tw32_f(MAC_MODE, tp->mac_mode);
3622                 udelay(40);
3623
3624                 ret = ANEG_TIMER_ENAB;
3625                 ap->state = ANEG_STATE_RESTART;
3626
3627                 /* fallthru */
3628         case ANEG_STATE_RESTART:
3629                 delta = ap->cur_time - ap->link_time;
3630                 if (delta > ANEG_STATE_SETTLE_TIME)
3631                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3632                 else
3633                         ret = ANEG_TIMER_ENAB;
3634                 break;
3635
3636         case ANEG_STATE_DISABLE_LINK_OK:
3637                 ret = ANEG_DONE;
3638                 break;
3639
3640         case ANEG_STATE_ABILITY_DETECT_INIT:
3641                 ap->flags &= ~(MR_TOGGLE_TX);
3642                 ap->txconfig = ANEG_CFG_FD;
3643                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3644                 if (flowctrl & ADVERTISE_1000XPAUSE)
3645                         ap->txconfig |= ANEG_CFG_PS1;
3646                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3647                         ap->txconfig |= ANEG_CFG_PS2;
3648                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3649                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3650                 tw32_f(MAC_MODE, tp->mac_mode);
3651                 udelay(40);
3652
3653                 ap->state = ANEG_STATE_ABILITY_DETECT;
3654                 break;
3655
3656         case ANEG_STATE_ABILITY_DETECT:
3657                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3658                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3659                 break;
3660
3661         case ANEG_STATE_ACK_DETECT_INIT:
3662                 ap->txconfig |= ANEG_CFG_ACK;
3663                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3664                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3665                 tw32_f(MAC_MODE, tp->mac_mode);
3666                 udelay(40);
3667
3668                 ap->state = ANEG_STATE_ACK_DETECT;
3669
3670                 /* fallthru */
3671         case ANEG_STATE_ACK_DETECT:
3672                 if (ap->ack_match != 0) {
3673                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3674                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3675                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3676                         } else {
3677                                 ap->state = ANEG_STATE_AN_ENABLE;
3678                         }
3679                 } else if (ap->ability_match != 0 &&
3680                            ap->rxconfig == 0) {
3681                         ap->state = ANEG_STATE_AN_ENABLE;
3682                 }
3683                 break;
3684
3685         case ANEG_STATE_COMPLETE_ACK_INIT:
3686                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3687                         ret = ANEG_FAILED;
3688                         break;
3689                 }
3690                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3691                                MR_LP_ADV_HALF_DUPLEX |
3692                                MR_LP_ADV_SYM_PAUSE |
3693                                MR_LP_ADV_ASYM_PAUSE |
3694                                MR_LP_ADV_REMOTE_FAULT1 |
3695                                MR_LP_ADV_REMOTE_FAULT2 |
3696                                MR_LP_ADV_NEXT_PAGE |
3697                                MR_TOGGLE_RX |
3698                                MR_NP_RX);
3699                 if (ap->rxconfig & ANEG_CFG_FD)
3700                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3701                 if (ap->rxconfig & ANEG_CFG_HD)
3702                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3703                 if (ap->rxconfig & ANEG_CFG_PS1)
3704                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3705                 if (ap->rxconfig & ANEG_CFG_PS2)
3706                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3707                 if (ap->rxconfig & ANEG_CFG_RF1)
3708                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3709                 if (ap->rxconfig & ANEG_CFG_RF2)
3710                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3711                 if (ap->rxconfig & ANEG_CFG_NP)
3712                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3713
3714                 ap->link_time = ap->cur_time;
3715
3716                 ap->flags ^= (MR_TOGGLE_TX);
3717                 if (ap->rxconfig & 0x0008)
3718                         ap->flags |= MR_TOGGLE_RX;
3719                 if (ap->rxconfig & ANEG_CFG_NP)
3720                         ap->flags |= MR_NP_RX;
3721                 ap->flags |= MR_PAGE_RX;
3722
3723                 ap->state = ANEG_STATE_COMPLETE_ACK;
3724                 ret = ANEG_TIMER_ENAB;
3725                 break;
3726
3727         case ANEG_STATE_COMPLETE_ACK:
3728                 if (ap->ability_match != 0 &&
3729                     ap->rxconfig == 0) {
3730                         ap->state = ANEG_STATE_AN_ENABLE;
3731                         break;
3732                 }
3733                 delta = ap->cur_time - ap->link_time;
3734                 if (delta > ANEG_STATE_SETTLE_TIME) {
3735                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3736                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3737                         } else {
3738                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3739                                     !(ap->flags & MR_NP_RX)) {
3740                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3741                                 } else {
3742                                         ret = ANEG_FAILED;
3743                                 }
3744                         }
3745                 }
3746                 break;
3747
3748         case ANEG_STATE_IDLE_DETECT_INIT:
3749                 ap->link_time = ap->cur_time;
3750                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3751                 tw32_f(MAC_MODE, tp->mac_mode);
3752                 udelay(40);
3753
3754                 ap->state = ANEG_STATE_IDLE_DETECT;
3755                 ret = ANEG_TIMER_ENAB;
3756                 break;
3757
3758         case ANEG_STATE_IDLE_DETECT:
3759                 if (ap->ability_match != 0 &&
3760                     ap->rxconfig == 0) {
3761                         ap->state = ANEG_STATE_AN_ENABLE;
3762                         break;
3763                 }
3764                 delta = ap->cur_time - ap->link_time;
3765                 if (delta > ANEG_STATE_SETTLE_TIME) {
3766                         /* XXX another gem from the Broadcom driver :( */
3767                         ap->state = ANEG_STATE_LINK_OK;
3768                 }
3769                 break;
3770
3771         case ANEG_STATE_LINK_OK:
3772                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3773                 ret = ANEG_DONE;
3774                 break;
3775
3776         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3777                 /* ??? unimplemented */
3778                 break;
3779
3780         case ANEG_STATE_NEXT_PAGE_WAIT:
3781                 /* ??? unimplemented */
3782                 break;
3783
3784         default:
3785                 ret = ANEG_FAILED;
3786                 break;
3787         }
3788
3789         return ret;
3790 }
3791
3792 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3793 {
3794         int res = 0;
3795         struct tg3_fiber_aneginfo aninfo;
3796         int status = ANEG_FAILED;
3797         unsigned int tick;
3798         u32 tmp;
3799
3800         tw32_f(MAC_TX_AUTO_NEG, 0);
3801
3802         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3803         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3804         udelay(40);
3805
3806         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3807         udelay(40);
3808
3809         memset(&aninfo, 0, sizeof(aninfo));
3810         aninfo.flags |= MR_AN_ENABLE;
3811         aninfo.state = ANEG_STATE_UNKNOWN;
3812         aninfo.cur_time = 0;
3813         tick = 0;
3814         while (++tick < 195000) {
3815                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3816                 if (status == ANEG_DONE || status == ANEG_FAILED)
3817                         break;
3818
3819                 udelay(1);
3820         }
3821
3822         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3823         tw32_f(MAC_MODE, tp->mac_mode);
3824         udelay(40);
3825
3826         *txflags = aninfo.txconfig;
3827         *rxflags = aninfo.flags;
3828
3829         if (status == ANEG_DONE &&
3830             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3831                              MR_LP_ADV_FULL_DUPLEX)))
3832                 res = 1;
3833
3834         return res;
3835 }
3836
3837 static void tg3_init_bcm8002(struct tg3 *tp)
3838 {
3839         u32 mac_status = tr32(MAC_STATUS);
3840         int i;
3841
3842         /* Reset when initting first time or we have a link. */
3843         if (tg3_flag(tp, INIT_COMPLETE) &&
3844             !(mac_status & MAC_STATUS_PCS_SYNCED))
3845                 return;
3846
3847         /* Set PLL lock range. */
3848         tg3_writephy(tp, 0x16, 0x8007);
3849
3850         /* SW reset */
3851         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3852
3853         /* Wait for reset to complete. */
3854         /* XXX schedule_timeout() ... */
3855         for (i = 0; i < 500; i++)
3856                 udelay(10);
3857
3858         /* Config mode; select PMA/Ch 1 regs. */
3859         tg3_writephy(tp, 0x10, 0x8411);
3860
3861         /* Enable auto-lock and comdet, select txclk for tx. */
3862         tg3_writephy(tp, 0x11, 0x0a10);
3863
3864         tg3_writephy(tp, 0x18, 0x00a0);
3865         tg3_writephy(tp, 0x16, 0x41ff);
3866
3867         /* Assert and deassert POR. */
3868         tg3_writephy(tp, 0x13, 0x0400);
3869         udelay(40);
3870         tg3_writephy(tp, 0x13, 0x0000);
3871
3872         tg3_writephy(tp, 0x11, 0x0a50);
3873         udelay(40);
3874         tg3_writephy(tp, 0x11, 0x0a10);
3875
3876         /* Wait for signal to stabilize */
3877         /* XXX schedule_timeout() ... */
3878         for (i = 0; i < 15000; i++)
3879                 udelay(10);
3880
3881         /* Deselect the channel register so we can read the PHYID
3882          * later.
3883          */
3884         tg3_writephy(tp, 0x10, 0x8011);
3885 }
3886
3887 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3888 {
3889         u16 flowctrl;
3890         u32 sg_dig_ctrl, sg_dig_status;
3891         u32 serdes_cfg, expected_sg_dig_ctrl;
3892         int workaround, port_a;
3893         int current_link_up;
3894
3895         serdes_cfg = 0;
3896         expected_sg_dig_ctrl = 0;
3897         workaround = 0;
3898         port_a = 1;
3899         current_link_up = 0;
3900
3901         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3902             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3903                 workaround = 1;
3904                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3905                         port_a = 0;
3906
3907                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3908                 /* preserve bits 20-23 for voltage regulator */
3909                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3910         }
3911
3912         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3913
3914         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3915                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3916                         if (workaround) {
3917                                 u32 val = serdes_cfg;
3918
3919                                 if (port_a)
3920                                         val |= 0xc010000;
3921                                 else
3922                                         val |= 0x4010000;
3923                                 tw32_f(MAC_SERDES_CFG, val);
3924                         }
3925
3926                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3927                 }
3928                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3929                         tg3_setup_flow_control(tp, 0, 0);
3930                         current_link_up = 1;
3931                 }
3932                 goto out;
3933         }
3934
3935         /* Want auto-negotiation.  */
3936         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3937
3938         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3939         if (flowctrl & ADVERTISE_1000XPAUSE)
3940                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3941         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3942                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3943
3944         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3945                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3946                     tp->serdes_counter &&
3947                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3948                                     MAC_STATUS_RCVD_CFG)) ==
3949                      MAC_STATUS_PCS_SYNCED)) {
3950                         tp->serdes_counter--;
3951                         current_link_up = 1;
3952                         goto out;
3953                 }
3954 restart_autoneg:
3955                 if (workaround)
3956                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3957                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3958                 udelay(5);
3959                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3960
3961                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3962                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3963         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3964                                  MAC_STATUS_SIGNAL_DET)) {
3965                 sg_dig_status = tr32(SG_DIG_STATUS);
3966                 mac_status = tr32(MAC_STATUS);
3967
3968                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3969                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3970                         u32 local_adv = 0, remote_adv = 0;
3971
3972                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3973                                 local_adv |= ADVERTISE_1000XPAUSE;
3974                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3975                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3976
3977                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3978                                 remote_adv |= LPA_1000XPAUSE;
3979                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3980                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3981
3982                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3983                         current_link_up = 1;
3984                         tp->serdes_counter = 0;
3985                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3986                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3987                         if (tp->serdes_counter)
3988                                 tp->serdes_counter--;
3989                         else {
3990                                 if (workaround) {
3991                                         u32 val = serdes_cfg;
3992
3993                                         if (port_a)
3994                                                 val |= 0xc010000;
3995                                         else
3996                                                 val |= 0x4010000;
3997
3998                                         tw32_f(MAC_SERDES_CFG, val);
3999                                 }
4000
4001                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4002                                 udelay(40);
4003
4004                                 /* Link parallel detection - link is up */
4005                                 /* only if we have PCS_SYNC and not */
4006                                 /* receiving config code words */
4007                                 mac_status = tr32(MAC_STATUS);
4008                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4009                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4010                                         tg3_setup_flow_control(tp, 0, 0);
4011                                         current_link_up = 1;
4012                                         tp->phy_flags |=
4013                                                 TG3_PHYFLG_PARALLEL_DETECT;
4014                                         tp->serdes_counter =
4015                                                 SERDES_PARALLEL_DET_TIMEOUT;
4016                                 } else
4017                                         goto restart_autoneg;
4018                         }
4019                 }
4020         } else {
4021                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4022                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4023         }
4024
4025 out:
4026         return current_link_up;
4027 }
4028
4029 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4030 {
4031         int current_link_up = 0;
4032
4033         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4034                 goto out;
4035
4036         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4037                 u32 txflags, rxflags;
4038                 int i;
4039
4040                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4041                         u32 local_adv = 0, remote_adv = 0;
4042
4043                         if (txflags & ANEG_CFG_PS1)
4044                                 local_adv |= ADVERTISE_1000XPAUSE;
4045                         if (txflags & ANEG_CFG_PS2)
4046                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4047
4048                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4049                                 remote_adv |= LPA_1000XPAUSE;
4050                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4051                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4052
4053                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4054
4055                         current_link_up = 1;
4056                 }
4057                 for (i = 0; i < 30; i++) {
4058                         udelay(20);
4059                         tw32_f(MAC_STATUS,
4060                                (MAC_STATUS_SYNC_CHANGED |
4061                                 MAC_STATUS_CFG_CHANGED));
4062                         udelay(40);
4063                         if ((tr32(MAC_STATUS) &
4064                              (MAC_STATUS_SYNC_CHANGED |
4065                               MAC_STATUS_CFG_CHANGED)) == 0)
4066                                 break;
4067                 }
4068
4069                 mac_status = tr32(MAC_STATUS);
4070                 if (current_link_up == 0 &&
4071                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4072                     !(mac_status & MAC_STATUS_RCVD_CFG))
4073                         current_link_up = 1;
4074         } else {
4075                 tg3_setup_flow_control(tp, 0, 0);
4076
4077                 /* Forcing 1000FD link up. */
4078                 current_link_up = 1;
4079
4080                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4081                 udelay(40);
4082
4083                 tw32_f(MAC_MODE, tp->mac_mode);
4084                 udelay(40);
4085         }
4086
4087 out:
4088         return current_link_up;
4089 }
4090
4091 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4092 {
4093         u32 orig_pause_cfg;
4094         u16 orig_active_speed;
4095         u8 orig_active_duplex;
4096         u32 mac_status;
4097         int current_link_up;
4098         int i;
4099
4100         orig_pause_cfg = tp->link_config.active_flowctrl;
4101         orig_active_speed = tp->link_config.active_speed;
4102         orig_active_duplex = tp->link_config.active_duplex;
4103
4104         if (!tg3_flag(tp, HW_AUTONEG) &&
4105             netif_carrier_ok(tp->dev) &&
4106             tg3_flag(tp, INIT_COMPLETE)) {
4107                 mac_status = tr32(MAC_STATUS);
4108                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4109                                MAC_STATUS_SIGNAL_DET |
4110                                MAC_STATUS_CFG_CHANGED |
4111                                MAC_STATUS_RCVD_CFG);
4112                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4113                                    MAC_STATUS_SIGNAL_DET)) {
4114                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4115                                             MAC_STATUS_CFG_CHANGED));
4116                         return 0;
4117                 }
4118         }
4119
4120         tw32_f(MAC_TX_AUTO_NEG, 0);
4121
4122         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4123         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4124         tw32_f(MAC_MODE, tp->mac_mode);
4125         udelay(40);
4126
4127         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4128                 tg3_init_bcm8002(tp);
4129
4130         /* Enable link change event even when serdes polling.  */
4131         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4132         udelay(40);
4133
4134         current_link_up = 0;
4135         mac_status = tr32(MAC_STATUS);
4136
4137         if (tg3_flag(tp, HW_AUTONEG))
4138                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4139         else
4140                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4141
4142         tp->napi[0].hw_status->status =
4143                 (SD_STATUS_UPDATED |
4144                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4145
4146         for (i = 0; i < 100; i++) {
4147                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4148                                     MAC_STATUS_CFG_CHANGED));
4149                 udelay(5);
4150                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4151                                          MAC_STATUS_CFG_CHANGED |
4152                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4153                         break;
4154         }
4155
4156         mac_status = tr32(MAC_STATUS);
4157         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4158                 current_link_up = 0;
4159                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4160                     tp->serdes_counter == 0) {
4161                         tw32_f(MAC_MODE, (tp->mac_mode |
4162                                           MAC_MODE_SEND_CONFIGS));
4163                         udelay(1);
4164                         tw32_f(MAC_MODE, tp->mac_mode);
4165                 }
4166         }
4167
4168         if (current_link_up == 1) {
4169                 tp->link_config.active_speed = SPEED_1000;
4170                 tp->link_config.active_duplex = DUPLEX_FULL;
4171                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4172                                     LED_CTRL_LNKLED_OVERRIDE |
4173                                     LED_CTRL_1000MBPS_ON));
4174         } else {
4175                 tp->link_config.active_speed = SPEED_INVALID;
4176                 tp->link_config.active_duplex = DUPLEX_INVALID;
4177                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4178                                     LED_CTRL_LNKLED_OVERRIDE |
4179                                     LED_CTRL_TRAFFIC_OVERRIDE));
4180         }
4181
4182         if (current_link_up != netif_carrier_ok(tp->dev)) {
4183                 if (current_link_up)
4184                         netif_carrier_on(tp->dev);
4185                 else
4186                         netif_carrier_off(tp->dev);
4187                 tg3_link_report(tp);
4188         } else {
4189                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4190                 if (orig_pause_cfg != now_pause_cfg ||
4191                     orig_active_speed != tp->link_config.active_speed ||
4192                     orig_active_duplex != tp->link_config.active_duplex)
4193                         tg3_link_report(tp);
4194         }
4195
4196         return 0;
4197 }
4198
4199 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4200 {
4201         int current_link_up, err = 0;
4202         u32 bmsr, bmcr;
4203         u16 current_speed;
4204         u8 current_duplex;
4205         u32 local_adv, remote_adv;
4206
4207         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4208         tw32_f(MAC_MODE, tp->mac_mode);
4209         udelay(40);
4210
4211         tw32(MAC_EVENT, 0);
4212
4213         tw32_f(MAC_STATUS,
4214              (MAC_STATUS_SYNC_CHANGED |
4215               MAC_STATUS_CFG_CHANGED |
4216               MAC_STATUS_MI_COMPLETION |
4217               MAC_STATUS_LNKSTATE_CHANGED));
4218         udelay(40);
4219
4220         if (force_reset)
4221                 tg3_phy_reset(tp);
4222
4223         current_link_up = 0;
4224         current_speed = SPEED_INVALID;
4225         current_duplex = DUPLEX_INVALID;
4226
4227         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4228         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4230                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4231                         bmsr |= BMSR_LSTATUS;
4232                 else
4233                         bmsr &= ~BMSR_LSTATUS;
4234         }
4235
4236         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4237
4238         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4239             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4240                 /* do nothing, just check for link up at the end */
4241         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4242                 u32 adv, new_adv;
4243
4244                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4245                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4246                                   ADVERTISE_1000XPAUSE |
4247                                   ADVERTISE_1000XPSE_ASYM |
4248                                   ADVERTISE_SLCT);
4249
4250                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4251
4252                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4253                         new_adv |= ADVERTISE_1000XHALF;
4254                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4255                         new_adv |= ADVERTISE_1000XFULL;
4256
4257                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4258                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4259                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4260                         tg3_writephy(tp, MII_BMCR, bmcr);
4261
4262                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4263                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4264                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4265
4266                         return err;
4267                 }
4268         } else {
4269                 u32 new_bmcr;
4270
4271                 bmcr &= ~BMCR_SPEED1000;
4272                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4273
4274                 if (tp->link_config.duplex == DUPLEX_FULL)
4275                         new_bmcr |= BMCR_FULLDPLX;
4276
4277                 if (new_bmcr != bmcr) {
4278                         /* BMCR_SPEED1000 is a reserved bit that needs
4279                          * to be set on write.
4280                          */
4281                         new_bmcr |= BMCR_SPEED1000;
4282
4283                         /* Force a linkdown */
4284                         if (netif_carrier_ok(tp->dev)) {
4285                                 u32 adv;
4286
4287                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4288                                 adv &= ~(ADVERTISE_1000XFULL |
4289                                          ADVERTISE_1000XHALF |
4290                                          ADVERTISE_SLCT);
4291                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4292                                 tg3_writephy(tp, MII_BMCR, bmcr |
4293                                                            BMCR_ANRESTART |
4294                                                            BMCR_ANENABLE);
4295                                 udelay(10);
4296                                 netif_carrier_off(tp->dev);
4297                         }
4298                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4299                         bmcr = new_bmcr;
4300                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4301                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4303                             ASIC_REV_5714) {
4304                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4305                                         bmsr |= BMSR_LSTATUS;
4306                                 else
4307                                         bmsr &= ~BMSR_LSTATUS;
4308                         }
4309                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4310                 }
4311         }
4312
4313         if (bmsr & BMSR_LSTATUS) {
4314                 current_speed = SPEED_1000;
4315                 current_link_up = 1;
4316                 if (bmcr & BMCR_FULLDPLX)
4317                         current_duplex = DUPLEX_FULL;
4318                 else
4319                         current_duplex = DUPLEX_HALF;
4320
4321                 local_adv = 0;
4322                 remote_adv = 0;
4323
4324                 if (bmcr & BMCR_ANENABLE) {
4325                         u32 common;
4326
4327                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4328                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4329                         common = local_adv & remote_adv;
4330                         if (common & (ADVERTISE_1000XHALF |
4331                                       ADVERTISE_1000XFULL)) {
4332                                 if (common & ADVERTISE_1000XFULL)
4333                                         current_duplex = DUPLEX_FULL;
4334                                 else
4335                                         current_duplex = DUPLEX_HALF;
4336                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4337                                 /* Link is up via parallel detect */
4338                         } else {
4339                                 current_link_up = 0;
4340                         }
4341                 }
4342         }
4343
4344         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4345                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4346
4347         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4348         if (tp->link_config.active_duplex == DUPLEX_HALF)
4349                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4350
4351         tw32_f(MAC_MODE, tp->mac_mode);
4352         udelay(40);
4353
4354         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4355
4356         tp->link_config.active_speed = current_speed;
4357         tp->link_config.active_duplex = current_duplex;
4358
4359         if (current_link_up != netif_carrier_ok(tp->dev)) {
4360                 if (current_link_up)
4361                         netif_carrier_on(tp->dev);
4362                 else {
4363                         netif_carrier_off(tp->dev);
4364                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4365                 }
4366                 tg3_link_report(tp);
4367         }
4368         return err;
4369 }
4370
4371 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4372 {
4373         if (tp->serdes_counter) {
4374                 /* Give autoneg time to complete. */
4375                 tp->serdes_counter--;
4376                 return;
4377         }
4378
4379         if (!netif_carrier_ok(tp->dev) &&
4380             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4381                 u32 bmcr;
4382
4383                 tg3_readphy(tp, MII_BMCR, &bmcr);
4384                 if (bmcr & BMCR_ANENABLE) {
4385                         u32 phy1, phy2;
4386
4387                         /* Select shadow register 0x1f */
4388                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4389                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4390
4391                         /* Select expansion interrupt status register */
4392                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4393                                          MII_TG3_DSP_EXP1_INT_STAT);
4394                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4395                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396
4397                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4398                                 /* We have signal detect and not receiving
4399                                  * config code words, link is up by parallel
4400                                  * detection.
4401                                  */
4402
4403                                 bmcr &= ~BMCR_ANENABLE;
4404                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4405                                 tg3_writephy(tp, MII_BMCR, bmcr);
4406                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4407                         }
4408                 }
4409         } else if (netif_carrier_ok(tp->dev) &&
4410                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4411                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4412                 u32 phy2;
4413
4414                 /* Select expansion interrupt status register */
4415                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4416                                  MII_TG3_DSP_EXP1_INT_STAT);
4417                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4418                 if (phy2 & 0x20) {
4419                         u32 bmcr;
4420
4421                         /* Config code words received, turn on autoneg. */
4422                         tg3_readphy(tp, MII_BMCR, &bmcr);
4423                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4424
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427                 }
4428         }
4429 }
4430
4431 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4432 {
4433         u32 val;
4434         int err;
4435
4436         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4437                 err = tg3_setup_fiber_phy(tp, force_reset);
4438         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4439                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4440         else
4441                 err = tg3_setup_copper_phy(tp, force_reset);
4442
4443         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4444                 u32 scale;
4445
4446                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4447                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4448                         scale = 65;
4449                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4450                         scale = 6;
4451                 else
4452                         scale = 12;
4453
4454                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4455                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4456                 tw32(GRC_MISC_CFG, val);
4457         }
4458
4459         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4460               (6 << TX_LENGTHS_IPG_SHIFT);
4461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4462                 val |= tr32(MAC_TX_LENGTHS) &
4463                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4464                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4465
4466         if (tp->link_config.active_speed == SPEED_1000 &&
4467             tp->link_config.active_duplex == DUPLEX_HALF)
4468                 tw32(MAC_TX_LENGTHS, val |
4469                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4470         else
4471                 tw32(MAC_TX_LENGTHS, val |
4472                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4473
4474         if (!tg3_flag(tp, 5705_PLUS)) {
4475                 if (netif_carrier_ok(tp->dev)) {
4476                         tw32(HOSTCC_STAT_COAL_TICKS,
4477                              tp->coal.stats_block_coalesce_usecs);
4478                 } else {
4479                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4480                 }
4481         }
4482
4483         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4484                 val = tr32(PCIE_PWR_MGMT_THRESH);
4485                 if (!netif_carrier_ok(tp->dev))
4486                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4487                               tp->pwrmgmt_thresh;
4488                 else
4489                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4490                 tw32(PCIE_PWR_MGMT_THRESH, val);
4491         }
4492
4493         return err;
4494 }
4495
4496 static inline int tg3_irq_sync(struct tg3 *tp)
4497 {
4498         return tp->irq_sync;
4499 }
4500
4501 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4502 {
4503         int i;
4504
4505         dst = (u32 *)((u8 *)dst + off);
4506         for (i = 0; i < len; i += sizeof(u32))
4507                 *dst++ = tr32(off + i);
4508 }
4509
4510 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4511 {
4512         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4513         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4514         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4515         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4516         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4517         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4518         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4519         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4520         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4521         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4522         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4524         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4525         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4526         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4527         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4528         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4529         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4530         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4531
4532         if (tg3_flag(tp, SUPPORT_MSIX))
4533                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4534
4535         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4536         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4537         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4538         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4539         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4540         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4542         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4543
4544         if (!tg3_flag(tp, 5705_PLUS)) {
4545                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4546                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4547                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4548         }
4549
4550         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4551         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4552         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4553         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4554         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4555
4556         if (tg3_flag(tp, NVRAM))
4557                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4558 }
4559
4560 static void tg3_dump_state(struct tg3 *tp)
4561 {
4562         int i;
4563         u32 *regs;
4564
4565         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4566         if (!regs) {
4567                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4568                 return;
4569         }
4570
4571         if (tg3_flag(tp, PCI_EXPRESS)) {
4572                 /* Read up to but not including private PCI registers */
4573                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4574                         regs[i / sizeof(u32)] = tr32(i);
4575         } else
4576                 tg3_dump_legacy_regs(tp, regs);
4577
4578         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4579                 if (!regs[i + 0] && !regs[i + 1] &&
4580                     !regs[i + 2] && !regs[i + 3])
4581                         continue;
4582
4583                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4584                            i * 4,
4585                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4586         }
4587
4588         kfree(regs);
4589
4590         for (i = 0; i < tp->irq_cnt; i++) {
4591                 struct tg3_napi *tnapi = &tp->napi[i];
4592
4593                 /* SW status block */
4594                 netdev_err(tp->dev,
4595                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4596                            i,
4597                            tnapi->hw_status->status,
4598                            tnapi->hw_status->status_tag,
4599                            tnapi->hw_status->rx_jumbo_consumer,
4600                            tnapi->hw_status->rx_consumer,
4601                            tnapi->hw_status->rx_mini_consumer,
4602                            tnapi->hw_status->idx[0].rx_producer,
4603                            tnapi->hw_status->idx[0].tx_consumer);
4604
4605                 netdev_err(tp->dev,
4606                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4607                            i,
4608                            tnapi->last_tag, tnapi->last_irq_tag,
4609                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4610                            tnapi->rx_rcb_ptr,
4611                            tnapi->prodring.rx_std_prod_idx,
4612                            tnapi->prodring.rx_std_cons_idx,
4613                            tnapi->prodring.rx_jmb_prod_idx,
4614                            tnapi->prodring.rx_jmb_cons_idx);
4615         }
4616 }
4617
4618 /* This is called whenever we suspect that the system chipset is re-
4619  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4620  * is bogus tx completions. We try to recover by setting the
4621  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4622  * in the workqueue.
4623  */
4624 static void tg3_tx_recover(struct tg3 *tp)
4625 {
4626         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4627                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4628
4629         netdev_warn(tp->dev,
4630                     "The system may be re-ordering memory-mapped I/O "
4631                     "cycles to the network device, attempting to recover. "
4632                     "Please report the problem to the driver maintainer "
4633                     "and include system chipset information.\n");
4634
4635         spin_lock(&tp->lock);
4636         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4637         spin_unlock(&tp->lock);
4638 }
4639
4640 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4641 {
4642         /* Tell compiler to fetch tx indices from memory. */
4643         barrier();
4644         return tnapi->tx_pending -
4645                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4646 }
4647
4648 /* Tigon3 never reports partial packet sends.  So we do not
4649  * need special logic to handle SKBs that have not had all
4650  * of their frags sent yet, like SunGEM does.
4651  */
4652 static void tg3_tx(struct tg3_napi *tnapi)
4653 {
4654         struct tg3 *tp = tnapi->tp;
4655         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4656         u32 sw_idx = tnapi->tx_cons;
4657         struct netdev_queue *txq;
4658         int index = tnapi - tp->napi;
4659
4660         if (tg3_flag(tp, ENABLE_TSS))
4661                 index--;
4662
4663         txq = netdev_get_tx_queue(tp->dev, index);
4664
4665         while (sw_idx != hw_idx) {
4666                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4667                 struct sk_buff *skb = ri->skb;
4668                 int i, tx_bug = 0;
4669
4670                 if (unlikely(skb == NULL)) {
4671                         tg3_tx_recover(tp);
4672                         return;
4673                 }
4674
4675                 pci_unmap_single(tp->pdev,
4676                                  dma_unmap_addr(ri, mapping),
4677                                  skb_headlen(skb),
4678                                  PCI_DMA_TODEVICE);
4679
4680                 ri->skb = NULL;
4681
4682                 sw_idx = NEXT_TX(sw_idx);
4683
4684                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4685                         ri = &tnapi->tx_buffers[sw_idx];
4686                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4687                                 tx_bug = 1;
4688
4689                         pci_unmap_page(tp->pdev,
4690                                        dma_unmap_addr(ri, mapping),
4691                                        skb_shinfo(skb)->frags[i].size,
4692                                        PCI_DMA_TODEVICE);
4693                         sw_idx = NEXT_TX(sw_idx);
4694                 }
4695
4696                 dev_kfree_skb(skb);
4697
4698                 if (unlikely(tx_bug)) {
4699                         tg3_tx_recover(tp);
4700                         return;
4701                 }
4702         }
4703
4704         tnapi->tx_cons = sw_idx;
4705
4706         /* Need to make the tx_cons update visible to tg3_start_xmit()
4707          * before checking for netif_queue_stopped().  Without the
4708          * memory barrier, there is a small possibility that tg3_start_xmit()
4709          * will miss it and cause the queue to be stopped forever.
4710          */
4711         smp_mb();
4712
4713         if (unlikely(netif_tx_queue_stopped(txq) &&
4714                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4715                 __netif_tx_lock(txq, smp_processor_id());
4716                 if (netif_tx_queue_stopped(txq) &&
4717                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4718                         netif_tx_wake_queue(txq);
4719                 __netif_tx_unlock(txq);
4720         }
4721 }
4722
4723 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4724 {
4725         if (!ri->skb)
4726                 return;
4727
4728         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4729                          map_sz, PCI_DMA_FROMDEVICE);
4730         dev_kfree_skb_any(ri->skb);
4731         ri->skb = NULL;
4732 }
4733
4734 /* Returns size of skb allocated or < 0 on error.
4735  *
4736  * We only need to fill in the address because the other members
4737  * of the RX descriptor are invariant, see tg3_init_rings.
4738  *
4739  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4740  * posting buffers we only dirty the first cache line of the RX
4741  * descriptor (containing the address).  Whereas for the RX status
4742  * buffers the cpu only reads the last cacheline of the RX descriptor
4743  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4744  */
4745 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4746                             u32 opaque_key, u32 dest_idx_unmasked)
4747 {
4748         struct tg3_rx_buffer_desc *desc;
4749         struct ring_info *map;
4750         struct sk_buff *skb;
4751         dma_addr_t mapping;
4752         int skb_size, dest_idx;
4753
4754         switch (opaque_key) {
4755         case RXD_OPAQUE_RING_STD:
4756                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4757                 desc = &tpr->rx_std[dest_idx];
4758                 map = &tpr->rx_std_buffers[dest_idx];
4759                 skb_size = tp->rx_pkt_map_sz;
4760                 break;
4761
4762         case RXD_OPAQUE_RING_JUMBO:
4763                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4764                 desc = &tpr->rx_jmb[dest_idx].std;
4765                 map = &tpr->rx_jmb_buffers[dest_idx];
4766                 skb_size = TG3_RX_JMB_MAP_SZ;
4767                 break;
4768
4769         default:
4770                 return -EINVAL;
4771         }
4772
4773         /* Do not overwrite any of the map or rp information
4774          * until we are sure we can commit to a new buffer.
4775          *
4776          * Callers depend upon this behavior and assume that
4777          * we leave everything unchanged if we fail.
4778          */
4779         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4780         if (skb == NULL)
4781                 return -ENOMEM;
4782
4783         skb_reserve(skb, tp->rx_offset);
4784
4785         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4786                                  PCI_DMA_FROMDEVICE);
4787         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4788                 dev_kfree_skb(skb);
4789                 return -EIO;
4790         }
4791
4792         map->skb = skb;
4793         dma_unmap_addr_set(map, mapping, mapping);
4794
4795         desc->addr_hi = ((u64)mapping >> 32);
4796         desc->addr_lo = ((u64)mapping & 0xffffffff);
4797
4798         return skb_size;
4799 }
4800
4801 /* We only need to move over in the address because the other
4802  * members of the RX descriptor are invariant.  See notes above
4803  * tg3_alloc_rx_skb for full details.
4804  */
4805 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4806                            struct tg3_rx_prodring_set *dpr,
4807                            u32 opaque_key, int src_idx,
4808                            u32 dest_idx_unmasked)
4809 {
4810         struct tg3 *tp = tnapi->tp;
4811         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4812         struct ring_info *src_map, *dest_map;
4813         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4814         int dest_idx;
4815
4816         switch (opaque_key) {
4817         case RXD_OPAQUE_RING_STD:
4818                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4819                 dest_desc = &dpr->rx_std[dest_idx];
4820                 dest_map = &dpr->rx_std_buffers[dest_idx];
4821                 src_desc = &spr->rx_std[src_idx];
4822                 src_map = &spr->rx_std_buffers[src_idx];
4823                 break;
4824
4825         case RXD_OPAQUE_RING_JUMBO:
4826                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4827                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4828                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4829                 src_desc = &spr->rx_jmb[src_idx].std;
4830                 src_map = &spr->rx_jmb_buffers[src_idx];
4831                 break;
4832
4833         default:
4834                 return;
4835         }
4836
4837         dest_map->skb = src_map->skb;
4838         dma_unmap_addr_set(dest_map, mapping,
4839                            dma_unmap_addr(src_map, mapping));
4840         dest_desc->addr_hi = src_desc->addr_hi;
4841         dest_desc->addr_lo = src_desc->addr_lo;
4842
4843         /* Ensure that the update to the skb happens after the physical
4844          * addresses have been transferred to the new BD location.
4845          */
4846         smp_wmb();
4847
4848         src_map->skb = NULL;
4849 }
4850
4851 /* The RX ring scheme is composed of multiple rings which post fresh
4852  * buffers to the chip, and one special ring the chip uses to report
4853  * status back to the host.
4854  *
4855  * The special ring reports the status of received packets to the
4856  * host.  The chip does not write into the original descriptor the
4857  * RX buffer was obtained from.  The chip simply takes the original
4858  * descriptor as provided by the host, updates the status and length
4859  * field, then writes this into the next status ring entry.
4860  *
4861  * Each ring the host uses to post buffers to the chip is described
4862  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4863  * it is first placed into the on-chip ram.  When the packet's length
4864  * is known, it walks down the TG3_BDINFO entries to select the ring.
4865  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4866  * which is within the range of the new packet's length is chosen.
4867  *
4868  * The "separate ring for rx status" scheme may sound queer, but it makes
4869  * sense from a cache coherency perspective.  If only the host writes
4870  * to the buffer post rings, and only the chip writes to the rx status
4871  * rings, then cache lines never move beyond shared-modified state.
4872  * If both the host and chip were to write into the same ring, cache line
4873  * eviction could occur since both entities want it in an exclusive state.
4874  */
4875 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4876 {
4877         struct tg3 *tp = tnapi->tp;
4878         u32 work_mask, rx_std_posted = 0;
4879         u32 std_prod_idx, jmb_prod_idx;
4880         u32 sw_idx = tnapi->rx_rcb_ptr;
4881         u16 hw_idx;
4882         int received;
4883         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4884
4885         hw_idx = *(tnapi->rx_rcb_prod_idx);
4886         /*
4887          * We need to order the read of hw_idx and the read of
4888          * the opaque cookie.
4889          */
4890         rmb();
4891         work_mask = 0;
4892         received = 0;
4893         std_prod_idx = tpr->rx_std_prod_idx;
4894         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4895         while (sw_idx != hw_idx && budget > 0) {
4896                 struct ring_info *ri;
4897                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4898                 unsigned int len;
4899                 struct sk_buff *skb;
4900                 dma_addr_t dma_addr;
4901                 u32 opaque_key, desc_idx, *post_ptr;
4902
4903                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4904                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4905                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4906                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4907                         dma_addr = dma_unmap_addr(ri, mapping);
4908                         skb = ri->skb;
4909                         post_ptr = &std_prod_idx;
4910                         rx_std_posted++;
4911                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4912                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4913                         dma_addr = dma_unmap_addr(ri, mapping);
4914                         skb = ri->skb;
4915                         post_ptr = &jmb_prod_idx;
4916                 } else
4917                         goto next_pkt_nopost;
4918
4919                 work_mask |= opaque_key;
4920
4921                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4922                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4923                 drop_it:
4924                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4925                                        desc_idx, *post_ptr);
4926                 drop_it_no_recycle:
4927                         /* Other statistics kept track of by card. */
4928                         tp->rx_dropped++;
4929                         goto next_pkt;
4930                 }
4931
4932                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4933                       ETH_FCS_LEN;
4934
4935                 if (len > TG3_RX_COPY_THRESH(tp)) {
4936                         int skb_size;
4937
4938                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4939                                                     *post_ptr);
4940                         if (skb_size < 0)
4941                                 goto drop_it;
4942
4943                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4944                                          PCI_DMA_FROMDEVICE);
4945
4946                         /* Ensure that the update to the skb happens
4947                          * after the usage of the old DMA mapping.
4948                          */
4949                         smp_wmb();
4950
4951                         ri->skb = NULL;
4952
4953                         skb_put(skb, len);
4954                 } else {
4955                         struct sk_buff *copy_skb;
4956
4957                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4958                                        desc_idx, *post_ptr);
4959
4960                         copy_skb = netdev_alloc_skb(tp->dev, len +
4961                                                     TG3_RAW_IP_ALIGN);
4962                         if (copy_skb == NULL)
4963                                 goto drop_it_no_recycle;
4964
4965                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4966                         skb_put(copy_skb, len);
4967                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4968                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4969                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4970
4971                         /* We'll reuse the original ring buffer. */
4972                         skb = copy_skb;
4973                 }
4974
4975                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4976                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4977                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4978                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4979                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4980                 else
4981                         skb_checksum_none_assert(skb);
4982
4983                 skb->protocol = eth_type_trans(skb, tp->dev);
4984
4985                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4986                     skb->protocol != htons(ETH_P_8021Q)) {
4987                         dev_kfree_skb(skb);
4988                         goto drop_it_no_recycle;
4989                 }
4990
4991                 if (desc->type_flags & RXD_FLAG_VLAN &&
4992                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4993                         __vlan_hwaccel_put_tag(skb,
4994                                                desc->err_vlan & RXD_VLAN_MASK);
4995
4996                 napi_gro_receive(&tnapi->napi, skb);
4997
4998                 received++;
4999                 budget--;
5000
5001 next_pkt:
5002                 (*post_ptr)++;
5003
5004                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5005                         tpr->rx_std_prod_idx = std_prod_idx &
5006                                                tp->rx_std_ring_mask;
5007                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5008                                      tpr->rx_std_prod_idx);
5009                         work_mask &= ~RXD_OPAQUE_RING_STD;
5010                         rx_std_posted = 0;
5011                 }
5012 next_pkt_nopost:
5013                 sw_idx++;
5014                 sw_idx &= tp->rx_ret_ring_mask;
5015
5016                 /* Refresh hw_idx to see if there is new work */
5017                 if (sw_idx == hw_idx) {
5018                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5019                         rmb();
5020                 }
5021         }
5022
5023         /* ACK the status ring. */
5024         tnapi->rx_rcb_ptr = sw_idx;
5025         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5026
5027         /* Refill RX ring(s). */
5028         if (!tg3_flag(tp, ENABLE_RSS)) {
5029                 if (work_mask & RXD_OPAQUE_RING_STD) {
5030                         tpr->rx_std_prod_idx = std_prod_idx &
5031                                                tp->rx_std_ring_mask;
5032                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5033                                      tpr->rx_std_prod_idx);
5034                 }
5035                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5036                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5037                                                tp->rx_jmb_ring_mask;
5038                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5039                                      tpr->rx_jmb_prod_idx);
5040                 }
5041                 mmiowb();
5042         } else if (work_mask) {
5043                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5044                  * updated before the producer indices can be updated.
5045                  */
5046                 smp_wmb();
5047
5048                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5049                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5050
5051                 if (tnapi != &tp->napi[1])
5052                         napi_schedule(&tp->napi[1].napi);
5053         }
5054
5055         return received;
5056 }
5057
5058 static void tg3_poll_link(struct tg3 *tp)
5059 {
5060         /* handle link change and other phy events */
5061         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5062                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5063
5064                 if (sblk->status & SD_STATUS_LINK_CHG) {
5065                         sblk->status = SD_STATUS_UPDATED |
5066                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5067                         spin_lock(&tp->lock);
5068                         if (tg3_flag(tp, USE_PHYLIB)) {
5069                                 tw32_f(MAC_STATUS,
5070                                      (MAC_STATUS_SYNC_CHANGED |
5071                                       MAC_STATUS_CFG_CHANGED |
5072                                       MAC_STATUS_MI_COMPLETION |
5073                                       MAC_STATUS_LNKSTATE_CHANGED));
5074                                 udelay(40);
5075                         } else
5076                                 tg3_setup_phy(tp, 0);
5077                         spin_unlock(&tp->lock);
5078                 }
5079         }
5080 }
5081
5082 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5083                                 struct tg3_rx_prodring_set *dpr,
5084                                 struct tg3_rx_prodring_set *spr)
5085 {
5086         u32 si, di, cpycnt, src_prod_idx;
5087         int i, err = 0;
5088
5089         while (1) {
5090                 src_prod_idx = spr->rx_std_prod_idx;
5091
5092                 /* Make sure updates to the rx_std_buffers[] entries and the
5093                  * standard producer index are seen in the correct order.
5094                  */
5095                 smp_rmb();
5096
5097                 if (spr->rx_std_cons_idx == src_prod_idx)
5098                         break;
5099
5100                 if (spr->rx_std_cons_idx < src_prod_idx)
5101                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5102                 else
5103                         cpycnt = tp->rx_std_ring_mask + 1 -
5104                                  spr->rx_std_cons_idx;
5105
5106                 cpycnt = min(cpycnt,
5107                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5108
5109                 si = spr->rx_std_cons_idx;
5110                 di = dpr->rx_std_prod_idx;
5111
5112                 for (i = di; i < di + cpycnt; i++) {
5113                         if (dpr->rx_std_buffers[i].skb) {
5114                                 cpycnt = i - di;
5115                                 err = -ENOSPC;
5116                                 break;
5117                         }
5118                 }
5119
5120                 if (!cpycnt)
5121                         break;
5122
5123                 /* Ensure that updates to the rx_std_buffers ring and the
5124                  * shadowed hardware producer ring from tg3_recycle_skb() are
5125                  * ordered correctly WRT the skb check above.
5126                  */
5127                 smp_rmb();
5128
5129                 memcpy(&dpr->rx_std_buffers[di],
5130                        &spr->rx_std_buffers[si],
5131                        cpycnt * sizeof(struct ring_info));
5132
5133                 for (i = 0; i < cpycnt; i++, di++, si++) {
5134                         struct tg3_rx_buffer_desc *sbd, *dbd;
5135                         sbd = &spr->rx_std[si];
5136                         dbd = &dpr->rx_std[di];
5137                         dbd->addr_hi = sbd->addr_hi;
5138                         dbd->addr_lo = sbd->addr_lo;
5139                 }
5140
5141                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5142                                        tp->rx_std_ring_mask;
5143                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5144                                        tp->rx_std_ring_mask;
5145         }
5146
5147         while (1) {
5148                 src_prod_idx = spr->rx_jmb_prod_idx;
5149
5150                 /* Make sure updates to the rx_jmb_buffers[] entries and
5151                  * the jumbo producer index are seen in the correct order.
5152                  */
5153                 smp_rmb();
5154
5155                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5156                         break;
5157
5158                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5159                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5160                 else
5161                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5162                                  spr->rx_jmb_cons_idx;
5163
5164                 cpycnt = min(cpycnt,
5165                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5166
5167                 si = spr->rx_jmb_cons_idx;
5168                 di = dpr->rx_jmb_prod_idx;
5169
5170                 for (i = di; i < di + cpycnt; i++) {
5171                         if (dpr->rx_jmb_buffers[i].skb) {
5172                                 cpycnt = i - di;
5173                                 err = -ENOSPC;
5174                                 break;
5175                         }
5176                 }
5177
5178                 if (!cpycnt)
5179                         break;
5180
5181                 /* Ensure that updates to the rx_jmb_buffers ring and the
5182                  * shadowed hardware producer ring from tg3_recycle_skb() are
5183                  * ordered correctly WRT the skb check above.
5184                  */
5185                 smp_rmb();
5186
5187                 memcpy(&dpr->rx_jmb_buffers[di],
5188                        &spr->rx_jmb_buffers[si],
5189                        cpycnt * sizeof(struct ring_info));
5190
5191                 for (i = 0; i < cpycnt; i++, di++, si++) {
5192                         struct tg3_rx_buffer_desc *sbd, *dbd;
5193                         sbd = &spr->rx_jmb[si].std;
5194                         dbd = &dpr->rx_jmb[di].std;
5195                         dbd->addr_hi = sbd->addr_hi;
5196                         dbd->addr_lo = sbd->addr_lo;
5197                 }
5198
5199                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5200                                        tp->rx_jmb_ring_mask;
5201                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5202                                        tp->rx_jmb_ring_mask;
5203         }
5204
5205         return err;
5206 }
5207
5208 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5209 {
5210         struct tg3 *tp = tnapi->tp;
5211
5212         /* run TX completion thread */
5213         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5214                 tg3_tx(tnapi);
5215                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5216                         return work_done;
5217         }
5218
5219         /* run RX thread, within the bounds set by NAPI.
5220          * All RX "locking" is done by ensuring outside
5221          * code synchronizes with tg3->napi.poll()
5222          */
5223         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5224                 work_done += tg3_rx(tnapi, budget - work_done);
5225
5226         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5227                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5228                 int i, err = 0;
5229                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5230                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5231
5232                 for (i = 1; i < tp->irq_cnt; i++)
5233                         err |= tg3_rx_prodring_xfer(tp, dpr,
5234                                                     &tp->napi[i].prodring);
5235
5236                 wmb();
5237
5238                 if (std_prod_idx != dpr->rx_std_prod_idx)
5239                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5240                                      dpr->rx_std_prod_idx);
5241
5242                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5243                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5244                                      dpr->rx_jmb_prod_idx);
5245
5246                 mmiowb();
5247
5248                 if (err)
5249                         tw32_f(HOSTCC_MODE, tp->coal_now);
5250         }
5251
5252         return work_done;
5253 }
5254
5255 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5256 {
5257         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5258         struct tg3 *tp = tnapi->tp;
5259         int work_done = 0;
5260         struct tg3_hw_status *sblk = tnapi->hw_status;
5261
5262         while (1) {
5263                 work_done = tg3_poll_work(tnapi, work_done, budget);
5264
5265                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5266                         goto tx_recovery;
5267
5268                 if (unlikely(work_done >= budget))
5269                         break;
5270
5271                 /* tp->last_tag is used in tg3_int_reenable() below
5272                  * to tell the hw how much work has been processed,
5273                  * so we must read it before checking for more work.
5274                  */
5275                 tnapi->last_tag = sblk->status_tag;
5276                 tnapi->last_irq_tag = tnapi->last_tag;
5277                 rmb();
5278
5279                 /* check for RX/TX work to do */
5280                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5281                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5282                         napi_complete(napi);
5283                         /* Reenable interrupts. */
5284                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5285                         mmiowb();
5286                         break;
5287                 }
5288         }
5289
5290         return work_done;
5291
5292 tx_recovery:
5293         /* work_done is guaranteed to be less than budget. */
5294         napi_complete(napi);
5295         schedule_work(&tp->reset_task);
5296         return work_done;
5297 }
5298
5299 static void tg3_process_error(struct tg3 *tp)
5300 {
5301         u32 val;
5302         bool real_error = false;
5303
5304         if (tg3_flag(tp, ERROR_PROCESSED))
5305                 return;
5306
5307         /* Check Flow Attention register */
5308         val = tr32(HOSTCC_FLOW_ATTN);
5309         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5310                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5311                 real_error = true;
5312         }
5313
5314         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5315                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5316                 real_error = true;
5317         }
5318
5319         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5320                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5321                 real_error = true;
5322         }
5323
5324         if (!real_error)
5325                 return;
5326
5327         tg3_dump_state(tp);
5328
5329         tg3_flag_set(tp, ERROR_PROCESSED);
5330         schedule_work(&tp->reset_task);
5331 }
5332
5333 static int tg3_poll(struct napi_struct *napi, int budget)
5334 {
5335         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5336         struct tg3 *tp = tnapi->tp;
5337         int work_done = 0;
5338         struct tg3_hw_status *sblk = tnapi->hw_status;
5339
5340         while (1) {
5341                 if (sblk->status & SD_STATUS_ERROR)
5342                         tg3_process_error(tp);
5343
5344                 tg3_poll_link(tp);
5345
5346                 work_done = tg3_poll_work(tnapi, work_done, budget);
5347
5348                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5349                         goto tx_recovery;
5350
5351                 if (unlikely(work_done >= budget))
5352                         break;
5353
5354                 if (tg3_flag(tp, TAGGED_STATUS)) {
5355                         /* tp->last_tag is used in tg3_int_reenable() below
5356                          * to tell the hw how much work has been processed,
5357                          * so we must read it before checking for more work.
5358                          */
5359                         tnapi->last_tag = sblk->status_tag;
5360                         tnapi->last_irq_tag = tnapi->last_tag;
5361                         rmb();
5362                 } else
5363                         sblk->status &= ~SD_STATUS_UPDATED;
5364
5365                 if (likely(!tg3_has_work(tnapi))) {
5366                         napi_complete(napi);
5367                         tg3_int_reenable(tnapi);
5368                         break;
5369                 }
5370         }
5371
5372         return work_done;
5373
5374 tx_recovery:
5375         /* work_done is guaranteed to be less than budget. */
5376         napi_complete(napi);
5377         schedule_work(&tp->reset_task);
5378         return work_done;
5379 }
5380
5381 static void tg3_napi_disable(struct tg3 *tp)
5382 {
5383         int i;
5384
5385         for (i = tp->irq_cnt - 1; i >= 0; i--)
5386                 napi_disable(&tp->napi[i].napi);
5387 }
5388
5389 static void tg3_napi_enable(struct tg3 *tp)
5390 {
5391         int i;
5392
5393         for (i = 0; i < tp->irq_cnt; i++)
5394                 napi_enable(&tp->napi[i].napi);
5395 }
5396
5397 static void tg3_napi_init(struct tg3 *tp)
5398 {
5399         int i;
5400
5401         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5402         for (i = 1; i < tp->irq_cnt; i++)
5403                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5404 }
5405
5406 static void tg3_napi_fini(struct tg3 *tp)
5407 {
5408         int i;
5409
5410         for (i = 0; i < tp->irq_cnt; i++)
5411                 netif_napi_del(&tp->napi[i].napi);
5412 }
5413
5414 static inline void tg3_netif_stop(struct tg3 *tp)
5415 {
5416         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5417         tg3_napi_disable(tp);
5418         netif_tx_disable(tp->dev);
5419 }
5420
5421 static inline void tg3_netif_start(struct tg3 *tp)
5422 {
5423         /* NOTE: unconditional netif_tx_wake_all_queues is only
5424          * appropriate so long as all callers are assured to
5425          * have free tx slots (such as after tg3_init_hw)
5426          */
5427         netif_tx_wake_all_queues(tp->dev);
5428
5429         tg3_napi_enable(tp);
5430         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5431         tg3_enable_ints(tp);
5432 }
5433
5434 static void tg3_irq_quiesce(struct tg3 *tp)
5435 {
5436         int i;
5437
5438         BUG_ON(tp->irq_sync);
5439
5440         tp->irq_sync = 1;
5441         smp_mb();
5442
5443         for (i = 0; i < tp->irq_cnt; i++)
5444                 synchronize_irq(tp->napi[i].irq_vec);
5445 }
5446
5447 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5448  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5449  * with as well.  Most of the time, this is not necessary except when
5450  * shutting down the device.
5451  */
5452 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5453 {
5454         spin_lock_bh(&tp->lock);
5455         if (irq_sync)
5456                 tg3_irq_quiesce(tp);
5457 }
5458
5459 static inline void tg3_full_unlock(struct tg3 *tp)
5460 {
5461         spin_unlock_bh(&tp->lock);
5462 }
5463
5464 /* One-shot MSI handler - Chip automatically disables interrupt
5465  * after sending MSI so driver doesn't have to do it.
5466  */
5467 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5468 {
5469         struct tg3_napi *tnapi = dev_id;
5470         struct tg3 *tp = tnapi->tp;
5471
5472         prefetch(tnapi->hw_status);
5473         if (tnapi->rx_rcb)
5474                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5475
5476         if (likely(!tg3_irq_sync(tp)))
5477                 napi_schedule(&tnapi->napi);
5478
5479         return IRQ_HANDLED;
5480 }
5481
5482 /* MSI ISR - No need to check for interrupt sharing and no need to
5483  * flush status block and interrupt mailbox. PCI ordering rules
5484  * guarantee that MSI will arrive after the status block.
5485  */
5486 static irqreturn_t tg3_msi(int irq, void *dev_id)
5487 {
5488         struct tg3_napi *tnapi = dev_id;
5489         struct tg3 *tp = tnapi->tp;
5490
5491         prefetch(tnapi->hw_status);
5492         if (tnapi->rx_rcb)
5493                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5494         /*
5495          * Writing any value to intr-mbox-0 clears PCI INTA# and
5496          * chip-internal interrupt pending events.
5497          * Writing non-zero to intr-mbox-0 additional tells the
5498          * NIC to stop sending us irqs, engaging "in-intr-handler"
5499          * event coalescing.
5500          */
5501         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5502         if (likely(!tg3_irq_sync(tp)))
5503                 napi_schedule(&tnapi->napi);
5504
5505         return IRQ_RETVAL(1);
5506 }
5507
5508 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5509 {
5510         struct tg3_napi *tnapi = dev_id;
5511         struct tg3 *tp = tnapi->tp;
5512         struct tg3_hw_status *sblk = tnapi->hw_status;
5513         unsigned int handled = 1;
5514
5515         /* In INTx mode, it is possible for the interrupt to arrive at
5516          * the CPU before the status block posted prior to the interrupt.
5517          * Reading the PCI State register will confirm whether the
5518          * interrupt is ours and will flush the status block.
5519          */
5520         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5521                 if (tg3_flag(tp, CHIP_RESETTING) ||
5522                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5523                         handled = 0;
5524                         goto out;
5525                 }
5526         }
5527
5528         /*
5529          * Writing any value to intr-mbox-0 clears PCI INTA# and
5530          * chip-internal interrupt pending events.
5531          * Writing non-zero to intr-mbox-0 additional tells the
5532          * NIC to stop sending us irqs, engaging "in-intr-handler"
5533          * event coalescing.
5534          *
5535          * Flush the mailbox to de-assert the IRQ immediately to prevent
5536          * spurious interrupts.  The flush impacts performance but
5537          * excessive spurious interrupts can be worse in some cases.
5538          */
5539         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5540         if (tg3_irq_sync(tp))
5541                 goto out;
5542         sblk->status &= ~SD_STATUS_UPDATED;
5543         if (likely(tg3_has_work(tnapi))) {
5544                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5545                 napi_schedule(&tnapi->napi);
5546         } else {
5547                 /* No work, shared interrupt perhaps?  re-enable
5548                  * interrupts, and flush that PCI write
5549                  */
5550                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5551                                0x00000000);
5552         }
5553 out:
5554         return IRQ_RETVAL(handled);
5555 }
5556
5557 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5558 {
5559         struct tg3_napi *tnapi = dev_id;
5560         struct tg3 *tp = tnapi->tp;
5561         struct tg3_hw_status *sblk = tnapi->hw_status;
5562         unsigned int handled = 1;
5563
5564         /* In INTx mode, it is possible for the interrupt to arrive at
5565          * the CPU before the status block posted prior to the interrupt.
5566          * Reading the PCI State register will confirm whether the
5567          * interrupt is ours and will flush the status block.
5568          */
5569         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5570                 if (tg3_flag(tp, CHIP_RESETTING) ||
5571                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5572                         handled = 0;
5573                         goto out;
5574                 }
5575         }
5576
5577         /*
5578          * writing any value to intr-mbox-0 clears PCI INTA# and
5579          * chip-internal interrupt pending events.
5580          * writing non-zero to intr-mbox-0 additional tells the
5581          * NIC to stop sending us irqs, engaging "in-intr-handler"
5582          * event coalescing.
5583          *
5584          * Flush the mailbox to de-assert the IRQ immediately to prevent
5585          * spurious interrupts.  The flush impacts performance but
5586          * excessive spurious interrupts can be worse in some cases.
5587          */
5588         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5589
5590         /*
5591          * In a shared interrupt configuration, sometimes other devices'
5592          * interrupts will scream.  We record the current status tag here
5593          * so that the above check can report that the screaming interrupts
5594          * are unhandled.  Eventually they will be silenced.
5595          */
5596         tnapi->last_irq_tag = sblk->status_tag;
5597
5598         if (tg3_irq_sync(tp))
5599                 goto out;
5600
5601         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5602
5603         napi_schedule(&tnapi->napi);
5604
5605 out:
5606         return IRQ_RETVAL(handled);
5607 }
5608
5609 /* ISR for interrupt test */
5610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5611 {
5612         struct tg3_napi *tnapi = dev_id;
5613         struct tg3 *tp = tnapi->tp;
5614         struct tg3_hw_status *sblk = tnapi->hw_status;
5615
5616         if ((sblk->status & SD_STATUS_UPDATED) ||
5617             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5618                 tg3_disable_ints(tp);
5619                 return IRQ_RETVAL(1);
5620         }
5621         return IRQ_RETVAL(0);
5622 }
5623
5624 static int tg3_init_hw(struct tg3 *, int);
5625 static int tg3_halt(struct tg3 *, int, int);
5626
5627 /* Restart hardware after configuration changes, self-test, etc.
5628  * Invoked with tp->lock held.
5629  */
5630 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5631         __releases(tp->lock)
5632         __acquires(tp->lock)
5633 {
5634         int err;
5635
5636         err = tg3_init_hw(tp, reset_phy);
5637         if (err) {
5638                 netdev_err(tp->dev,
5639                            "Failed to re-initialize device, aborting\n");
5640                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5641                 tg3_full_unlock(tp);
5642                 del_timer_sync(&tp->timer);
5643                 tp->irq_sync = 0;
5644                 tg3_napi_enable(tp);
5645                 dev_close(tp->dev);
5646                 tg3_full_lock(tp, 0);
5647         }
5648         return err;
5649 }
5650
5651 #ifdef CONFIG_NET_POLL_CONTROLLER
5652 static void tg3_poll_controller(struct net_device *dev)
5653 {
5654         int i;
5655         struct tg3 *tp = netdev_priv(dev);
5656
5657         for (i = 0; i < tp->irq_cnt; i++)
5658                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5659 }
5660 #endif
5661
5662 static void tg3_reset_task(struct work_struct *work)
5663 {
5664         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5665         int err;
5666         unsigned int restart_timer;
5667
5668         tg3_full_lock(tp, 0);
5669
5670         if (!netif_running(tp->dev)) {
5671                 tg3_full_unlock(tp);
5672                 return;
5673         }
5674
5675         tg3_full_unlock(tp);
5676
5677         tg3_phy_stop(tp);
5678
5679         tg3_netif_stop(tp);
5680
5681         tg3_full_lock(tp, 1);
5682
5683         restart_timer = tg3_flag(tp, RESTART_TIMER);
5684         tg3_flag_clear(tp, RESTART_TIMER);
5685
5686         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5687                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5688                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5689                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5690                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5691         }
5692
5693         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5694         err = tg3_init_hw(tp, 1);
5695         if (err)
5696                 goto out;
5697
5698         tg3_netif_start(tp);
5699
5700         if (restart_timer)
5701                 mod_timer(&tp->timer, jiffies + 1);
5702
5703 out:
5704         tg3_full_unlock(tp);
5705
5706         if (!err)
5707                 tg3_phy_start(tp);
5708 }
5709
5710 static void tg3_tx_timeout(struct net_device *dev)
5711 {
5712         struct tg3 *tp = netdev_priv(dev);
5713
5714         if (netif_msg_tx_err(tp)) {
5715                 netdev_err(dev, "transmit timed out, resetting\n");
5716                 tg3_dump_state(tp);
5717         }
5718
5719         schedule_work(&tp->reset_task);
5720 }
5721
5722 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5723 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5724 {
5725         u32 base = (u32) mapping & 0xffffffff;
5726
5727         return (base > 0xffffdcc0) && (base + len + 8 < base);
5728 }
5729
5730 /* Test for DMA addresses > 40-bit */
5731 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5732                                           int len)
5733 {
5734 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5735         if (tg3_flag(tp, 40BIT_DMA_BUG))
5736                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5737         return 0;
5738 #else
5739         return 0;
5740 #endif
5741 }
5742
5743 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5744                         dma_addr_t mapping, int len, u32 flags,
5745                         u32 mss_and_is_end)
5746 {
5747         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5748         int is_end = (mss_and_is_end & 0x1);
5749         u32 mss = (mss_and_is_end >> 1);
5750         u32 vlan_tag = 0;
5751
5752         if (is_end)
5753                 flags |= TXD_FLAG_END;
5754         if (flags & TXD_FLAG_VLAN) {
5755                 vlan_tag = flags >> 16;
5756                 flags &= 0xffff;
5757         }
5758         vlan_tag |= (mss << TXD_MSS_SHIFT);
5759
5760         txd->addr_hi = ((u64) mapping >> 32);
5761         txd->addr_lo = ((u64) mapping & 0xffffffff);
5762         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5763         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5764 }
5765
5766 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5767                                 struct sk_buff *skb, int last)
5768 {
5769         int i;
5770         u32 entry = tnapi->tx_prod;
5771         struct ring_info *txb = &tnapi->tx_buffers[entry];
5772
5773         pci_unmap_single(tnapi->tp->pdev,
5774                          dma_unmap_addr(txb, mapping),
5775                          skb_headlen(skb),
5776                          PCI_DMA_TODEVICE);
5777         for (i = 0; i < last; i++) {
5778                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5779
5780                 entry = NEXT_TX(entry);
5781                 txb = &tnapi->tx_buffers[entry];
5782
5783                 pci_unmap_page(tnapi->tp->pdev,
5784                                dma_unmap_addr(txb, mapping),
5785                                frag->size, PCI_DMA_TODEVICE);
5786         }
5787 }
5788
5789 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5790 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5791                                        struct sk_buff *skb,
5792                                        u32 base_flags, u32 mss)
5793 {
5794         struct tg3 *tp = tnapi->tp;
5795         struct sk_buff *new_skb;
5796         dma_addr_t new_addr = 0;
5797         u32 entry = tnapi->tx_prod;
5798         int ret = 0;
5799
5800         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5801                 new_skb = skb_copy(skb, GFP_ATOMIC);
5802         else {
5803                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5804
5805                 new_skb = skb_copy_expand(skb,
5806                                           skb_headroom(skb) + more_headroom,
5807                                           skb_tailroom(skb), GFP_ATOMIC);
5808         }
5809
5810         if (!new_skb) {
5811                 ret = -1;
5812         } else {
5813                 /* New SKB is guaranteed to be linear. */
5814                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5815                                           PCI_DMA_TODEVICE);
5816                 /* Make sure the mapping succeeded */
5817                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5818                         ret = -1;
5819                         dev_kfree_skb(new_skb);
5820
5821                 /* Make sure new skb does not cross any 4G boundaries.
5822                  * Drop the packet if it does.
5823                  */
5824                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5825                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5826                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5827                                          PCI_DMA_TODEVICE);
5828                         ret = -1;
5829                         dev_kfree_skb(new_skb);
5830                 } else {
5831                         tnapi->tx_buffers[entry].skb = new_skb;
5832                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5833                                            mapping, new_addr);
5834
5835                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5836                                     base_flags, 1 | (mss << 1));
5837                 }
5838         }
5839
5840         dev_kfree_skb(skb);
5841
5842         return ret;
5843 }
5844
5845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5846
5847 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5848  * TSO header is greater than 80 bytes.
5849  */
5850 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5851 {
5852         struct sk_buff *segs, *nskb;
5853         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5854
5855         /* Estimate the number of fragments in the worst case */
5856         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5857                 netif_stop_queue(tp->dev);
5858
5859                 /* netif_tx_stop_queue() must be done before checking
5860                  * checking tx index in tg3_tx_avail() below, because in
5861                  * tg3_tx(), we update tx index before checking for
5862                  * netif_tx_queue_stopped().
5863                  */
5864                 smp_mb();
5865                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5866                         return NETDEV_TX_BUSY;
5867
5868                 netif_wake_queue(tp->dev);
5869         }
5870
5871         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5872         if (IS_ERR(segs))
5873                 goto tg3_tso_bug_end;
5874
5875         do {
5876                 nskb = segs;
5877                 segs = segs->next;
5878                 nskb->next = NULL;
5879                 tg3_start_xmit(nskb, tp->dev);
5880         } while (segs);
5881
5882 tg3_tso_bug_end:
5883         dev_kfree_skb(skb);
5884
5885         return NETDEV_TX_OK;
5886 }
5887
5888 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5889  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5890  */
5891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5892 {
5893         struct tg3 *tp = netdev_priv(dev);
5894         u32 len, entry, base_flags, mss;
5895         int i = -1, would_hit_hwbug;
5896         dma_addr_t mapping;
5897         struct tg3_napi *tnapi;
5898         struct netdev_queue *txq;
5899         unsigned int last;
5900
5901         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5902         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5903         if (tg3_flag(tp, ENABLE_TSS))
5904                 tnapi++;
5905
5906         /* We are running in BH disabled context with netif_tx_lock
5907          * and TX reclaim runs via tp->napi.poll inside of a software
5908          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5909          * no IRQ context deadlocks to worry about either.  Rejoice!
5910          */
5911         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5912                 if (!netif_tx_queue_stopped(txq)) {
5913                         netif_tx_stop_queue(txq);
5914
5915                         /* This is a hard error, log it. */
5916                         netdev_err(dev,
5917                                    "BUG! Tx Ring full when queue awake!\n");
5918                 }
5919                 return NETDEV_TX_BUSY;
5920         }
5921
5922         entry = tnapi->tx_prod;
5923         base_flags = 0;
5924         if (skb->ip_summed == CHECKSUM_PARTIAL)
5925                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5926
5927         mss = skb_shinfo(skb)->gso_size;
5928         if (mss) {
5929                 struct iphdr *iph;
5930                 u32 tcp_opt_len, hdr_len;
5931
5932                 if (skb_header_cloned(skb) &&
5933                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5934                         dev_kfree_skb(skb);
5935                         goto out_unlock;
5936                 }
5937
5938                 iph = ip_hdr(skb);
5939                 tcp_opt_len = tcp_optlen(skb);
5940
5941                 if (skb_is_gso_v6(skb)) {
5942                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5943                 } else {
5944                         u32 ip_tcp_len;
5945
5946                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5947                         hdr_len = ip_tcp_len + tcp_opt_len;
5948
5949                         iph->check = 0;
5950                         iph->tot_len = htons(mss + hdr_len);
5951                 }
5952
5953                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5954                     tg3_flag(tp, TSO_BUG))
5955                         return tg3_tso_bug(tp, skb);
5956
5957                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5958                                TXD_FLAG_CPU_POST_DMA);
5959
5960                 if (tg3_flag(tp, HW_TSO_1) ||
5961                     tg3_flag(tp, HW_TSO_2) ||
5962                     tg3_flag(tp, HW_TSO_3)) {
5963                         tcp_hdr(skb)->check = 0;
5964                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5965                 } else
5966                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5967                                                                  iph->daddr, 0,
5968                                                                  IPPROTO_TCP,
5969                                                                  0);
5970
5971                 if (tg3_flag(tp, HW_TSO_3)) {
5972                         mss |= (hdr_len & 0xc) << 12;
5973                         if (hdr_len & 0x10)
5974                                 base_flags |= 0x00000010;
5975                         base_flags |= (hdr_len & 0x3e0) << 5;
5976                 } else if (tg3_flag(tp, HW_TSO_2))
5977                         mss |= hdr_len << 9;
5978                 else if (tg3_flag(tp, HW_TSO_1) ||
5979                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5980                         if (tcp_opt_len || iph->ihl > 5) {
5981                                 int tsflags;
5982
5983                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5984                                 mss |= (tsflags << 11);
5985                         }
5986                 } else {
5987                         if (tcp_opt_len || iph->ihl > 5) {
5988                                 int tsflags;
5989
5990                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5991                                 base_flags |= tsflags << 12;
5992                         }
5993                 }
5994         }
5995
5996         if (vlan_tx_tag_present(skb))
5997                 base_flags |= (TXD_FLAG_VLAN |
5998                                (vlan_tx_tag_get(skb) << 16));
5999
6000         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6001             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6002                 base_flags |= TXD_FLAG_JMB_PKT;
6003
6004         len = skb_headlen(skb);
6005
6006         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6007         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6008                 dev_kfree_skb(skb);
6009                 goto out_unlock;
6010         }
6011
6012         tnapi->tx_buffers[entry].skb = skb;
6013         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6014
6015         would_hit_hwbug = 0;
6016
6017         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6018                 would_hit_hwbug = 1;
6019
6020         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6021             tg3_4g_overflow_test(mapping, len))
6022                 would_hit_hwbug = 1;
6023
6024         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6025             tg3_40bit_overflow_test(tp, mapping, len))
6026                 would_hit_hwbug = 1;
6027
6028         if (tg3_flag(tp, 5701_DMA_BUG))
6029                 would_hit_hwbug = 1;
6030
6031         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6032                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6033
6034         entry = NEXT_TX(entry);
6035
6036         /* Now loop through additional data fragments, and queue them. */
6037         if (skb_shinfo(skb)->nr_frags > 0) {
6038                 last = skb_shinfo(skb)->nr_frags - 1;
6039                 for (i = 0; i <= last; i++) {
6040                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6041
6042                         len = frag->size;
6043                         mapping = pci_map_page(tp->pdev,
6044                                                frag->page,
6045                                                frag->page_offset,
6046                                                len, PCI_DMA_TODEVICE);
6047
6048                         tnapi->tx_buffers[entry].skb = NULL;
6049                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6050                                            mapping);
6051                         if (pci_dma_mapping_error(tp->pdev, mapping))
6052                                 goto dma_error;
6053
6054                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6055                             len <= 8)
6056                                 would_hit_hwbug = 1;
6057
6058                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6059                             tg3_4g_overflow_test(mapping, len))
6060                                 would_hit_hwbug = 1;
6061
6062                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6063                             tg3_40bit_overflow_test(tp, mapping, len))
6064                                 would_hit_hwbug = 1;
6065
6066                         if (tg3_flag(tp, HW_TSO_1) ||
6067                             tg3_flag(tp, HW_TSO_2) ||
6068                             tg3_flag(tp, HW_TSO_3))
6069                                 tg3_set_txd(tnapi, entry, mapping, len,
6070                                             base_flags, (i == last)|(mss << 1));
6071                         else
6072                                 tg3_set_txd(tnapi, entry, mapping, len,
6073                                             base_flags, (i == last));
6074
6075                         entry = NEXT_TX(entry);
6076                 }
6077         }
6078
6079         if (would_hit_hwbug) {
6080                 tg3_skb_error_unmap(tnapi, skb, i);
6081
6082                 /* If the workaround fails due to memory/mapping
6083                  * failure, silently drop this packet.
6084                  */
6085                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6086                         goto out_unlock;
6087
6088                 entry = NEXT_TX(tnapi->tx_prod);
6089         }
6090
6091         /* Packets are ready, update Tx producer idx local and on card. */
6092         tw32_tx_mbox(tnapi->prodmbox, entry);
6093
6094         tnapi->tx_prod = entry;
6095         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6096                 netif_tx_stop_queue(txq);
6097
6098                 /* netif_tx_stop_queue() must be done before checking
6099                  * checking tx index in tg3_tx_avail() below, because in
6100                  * tg3_tx(), we update tx index before checking for
6101                  * netif_tx_queue_stopped().
6102                  */
6103                 smp_mb();
6104                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6105                         netif_tx_wake_queue(txq);
6106         }
6107
6108 out_unlock:
6109         mmiowb();
6110
6111         return NETDEV_TX_OK;
6112
6113 dma_error:
6114         tg3_skb_error_unmap(tnapi, skb, i);
6115         dev_kfree_skb(skb);
6116         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6117         return NETDEV_TX_OK;
6118 }
6119
6120 static void tg3_set_loopback(struct net_device *dev, u32 features)
6121 {
6122         struct tg3 *tp = netdev_priv(dev);
6123
6124         if (features & NETIF_F_LOOPBACK) {
6125                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6126                         return;
6127
6128                 /*
6129                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6130                  * loopback mode if Half-Duplex mode was negotiated earlier.
6131                  */
6132                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6133
6134                 /* Enable internal MAC loopback mode */
6135                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6136                 spin_lock_bh(&tp->lock);
6137                 tw32(MAC_MODE, tp->mac_mode);
6138                 netif_carrier_on(tp->dev);
6139                 spin_unlock_bh(&tp->lock);
6140                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6141         } else {
6142                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6143                         return;
6144
6145                 /* Disable internal MAC loopback mode */
6146                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6147                 spin_lock_bh(&tp->lock);
6148                 tw32(MAC_MODE, tp->mac_mode);
6149                 /* Force link status check */
6150                 tg3_setup_phy(tp, 1);
6151                 spin_unlock_bh(&tp->lock);
6152                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6153         }
6154 }
6155
6156 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6157 {
6158         struct tg3 *tp = netdev_priv(dev);
6159
6160         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6161                 features &= ~NETIF_F_ALL_TSO;
6162
6163         return features;
6164 }
6165
6166 static int tg3_set_features(struct net_device *dev, u32 features)
6167 {
6168         u32 changed = dev->features ^ features;
6169
6170         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6171                 tg3_set_loopback(dev, features);
6172
6173         return 0;
6174 }
6175
6176 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6177                                int new_mtu)
6178 {
6179         dev->mtu = new_mtu;
6180
6181         if (new_mtu > ETH_DATA_LEN) {
6182                 if (tg3_flag(tp, 5780_CLASS)) {
6183                         netdev_update_features(dev);
6184                         tg3_flag_clear(tp, TSO_CAPABLE);
6185                 } else {
6186                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6187                 }
6188         } else {
6189                 if (tg3_flag(tp, 5780_CLASS)) {
6190                         tg3_flag_set(tp, TSO_CAPABLE);
6191                         netdev_update_features(dev);
6192                 }
6193                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6194         }
6195 }
6196
6197 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6198 {
6199         struct tg3 *tp = netdev_priv(dev);
6200         int err;
6201
6202         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6203                 return -EINVAL;
6204
6205         if (!netif_running(dev)) {
6206                 /* We'll just catch it later when the
6207                  * device is up'd.
6208                  */
6209                 tg3_set_mtu(dev, tp, new_mtu);
6210                 return 0;
6211         }
6212
6213         tg3_phy_stop(tp);
6214
6215         tg3_netif_stop(tp);
6216
6217         tg3_full_lock(tp, 1);
6218
6219         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6220
6221         tg3_set_mtu(dev, tp, new_mtu);
6222
6223         err = tg3_restart_hw(tp, 0);
6224
6225         if (!err)
6226                 tg3_netif_start(tp);
6227
6228         tg3_full_unlock(tp);
6229
6230         if (!err)
6231                 tg3_phy_start(tp);
6232
6233         return err;
6234 }
6235
6236 static void tg3_rx_prodring_free(struct tg3 *tp,
6237                                  struct tg3_rx_prodring_set *tpr)
6238 {
6239         int i;
6240
6241         if (tpr != &tp->napi[0].prodring) {
6242                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6243                      i = (i + 1) & tp->rx_std_ring_mask)
6244                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6245                                         tp->rx_pkt_map_sz);
6246
6247                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6248                         for (i = tpr->rx_jmb_cons_idx;
6249                              i != tpr->rx_jmb_prod_idx;
6250                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6251                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6252                                                 TG3_RX_JMB_MAP_SZ);
6253                         }
6254                 }
6255
6256                 return;
6257         }
6258
6259         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6260                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6261                                 tp->rx_pkt_map_sz);
6262
6263         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6264                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6265                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6266                                         TG3_RX_JMB_MAP_SZ);
6267         }
6268 }
6269
6270 /* Initialize rx rings for packet processing.
6271  *
6272  * The chip has been shut down and the driver detached from
6273  * the networking, so no interrupts or new tx packets will
6274  * end up in the driver.  tp->{tx,}lock are held and thus
6275  * we may not sleep.
6276  */
6277 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6278                                  struct tg3_rx_prodring_set *tpr)
6279 {
6280         u32 i, rx_pkt_dma_sz;
6281
6282         tpr->rx_std_cons_idx = 0;
6283         tpr->rx_std_prod_idx = 0;
6284         tpr->rx_jmb_cons_idx = 0;
6285         tpr->rx_jmb_prod_idx = 0;
6286
6287         if (tpr != &tp->napi[0].prodring) {
6288                 memset(&tpr->rx_std_buffers[0], 0,
6289                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6290                 if (tpr->rx_jmb_buffers)
6291                         memset(&tpr->rx_jmb_buffers[0], 0,
6292                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6293                 goto done;
6294         }
6295
6296         /* Zero out all descriptors. */
6297         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6298
6299         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6300         if (tg3_flag(tp, 5780_CLASS) &&
6301             tp->dev->mtu > ETH_DATA_LEN)
6302                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6303         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6304
6305         /* Initialize invariants of the rings, we only set this
6306          * stuff once.  This works because the card does not
6307          * write into the rx buffer posting rings.
6308          */
6309         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6310                 struct tg3_rx_buffer_desc *rxd;
6311
6312                 rxd = &tpr->rx_std[i];
6313                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6314                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6315                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6316                                (i << RXD_OPAQUE_INDEX_SHIFT));
6317         }
6318
6319         /* Now allocate fresh SKBs for each rx ring. */
6320         for (i = 0; i < tp->rx_pending; i++) {
6321                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6322                         netdev_warn(tp->dev,
6323                                     "Using a smaller RX standard ring. Only "
6324                                     "%d out of %d buffers were allocated "
6325                                     "successfully\n", i, tp->rx_pending);
6326                         if (i == 0)
6327                                 goto initfail;
6328                         tp->rx_pending = i;
6329                         break;
6330                 }
6331         }
6332
6333         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6334                 goto done;
6335
6336         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6337
6338         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6339                 goto done;
6340
6341         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6342                 struct tg3_rx_buffer_desc *rxd;
6343
6344                 rxd = &tpr->rx_jmb[i].std;
6345                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6346                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6347                                   RXD_FLAG_JUMBO;
6348                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6349                        (i << RXD_OPAQUE_INDEX_SHIFT));
6350         }
6351
6352         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6353                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6354                         netdev_warn(tp->dev,
6355                                     "Using a smaller RX jumbo ring. Only %d "
6356                                     "out of %d buffers were allocated "
6357                                     "successfully\n", i, tp->rx_jumbo_pending);
6358                         if (i == 0)
6359                                 goto initfail;
6360                         tp->rx_jumbo_pending = i;
6361                         break;
6362                 }
6363         }
6364
6365 done:
6366         return 0;
6367
6368 initfail:
6369         tg3_rx_prodring_free(tp, tpr);
6370         return -ENOMEM;
6371 }
6372
6373 static void tg3_rx_prodring_fini(struct tg3 *tp,
6374                                  struct tg3_rx_prodring_set *tpr)
6375 {
6376         kfree(tpr->rx_std_buffers);
6377         tpr->rx_std_buffers = NULL;
6378         kfree(tpr->rx_jmb_buffers);
6379         tpr->rx_jmb_buffers = NULL;
6380         if (tpr->rx_std) {
6381                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6382                                   tpr->rx_std, tpr->rx_std_mapping);
6383                 tpr->rx_std = NULL;
6384         }
6385         if (tpr->rx_jmb) {
6386                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6387                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6388                 tpr->rx_jmb = NULL;
6389         }
6390 }
6391
6392 static int tg3_rx_prodring_init(struct tg3 *tp,
6393                                 struct tg3_rx_prodring_set *tpr)
6394 {
6395         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6396                                       GFP_KERNEL);
6397         if (!tpr->rx_std_buffers)
6398                 return -ENOMEM;
6399
6400         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6401                                          TG3_RX_STD_RING_BYTES(tp),
6402                                          &tpr->rx_std_mapping,
6403                                          GFP_KERNEL);
6404         if (!tpr->rx_std)
6405                 goto err_out;
6406
6407         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6408                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6409                                               GFP_KERNEL);
6410                 if (!tpr->rx_jmb_buffers)
6411                         goto err_out;
6412
6413                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6414                                                  TG3_RX_JMB_RING_BYTES(tp),
6415                                                  &tpr->rx_jmb_mapping,
6416                                                  GFP_KERNEL);
6417                 if (!tpr->rx_jmb)
6418                         goto err_out;
6419         }
6420
6421         return 0;
6422
6423 err_out:
6424         tg3_rx_prodring_fini(tp, tpr);
6425         return -ENOMEM;
6426 }
6427
6428 /* Free up pending packets in all rx/tx rings.
6429  *
6430  * The chip has been shut down and the driver detached from
6431  * the networking, so no interrupts or new tx packets will
6432  * end up in the driver.  tp->{tx,}lock is not held and we are not
6433  * in an interrupt context and thus may sleep.
6434  */
6435 static void tg3_free_rings(struct tg3 *tp)
6436 {
6437         int i, j;
6438
6439         for (j = 0; j < tp->irq_cnt; j++) {
6440                 struct tg3_napi *tnapi = &tp->napi[j];
6441
6442                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6443
6444                 if (!tnapi->tx_buffers)
6445                         continue;
6446
6447                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6448                         struct ring_info *txp;
6449                         struct sk_buff *skb;
6450                         unsigned int k;
6451
6452                         txp = &tnapi->tx_buffers[i];
6453                         skb = txp->skb;
6454
6455                         if (skb == NULL) {
6456                                 i++;
6457                                 continue;
6458                         }
6459
6460                         pci_unmap_single(tp->pdev,
6461                                          dma_unmap_addr(txp, mapping),
6462                                          skb_headlen(skb),
6463                                          PCI_DMA_TODEVICE);
6464                         txp->skb = NULL;
6465
6466                         i++;
6467
6468                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6469                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6470                                 pci_unmap_page(tp->pdev,
6471                                                dma_unmap_addr(txp, mapping),
6472                                                skb_shinfo(skb)->frags[k].size,
6473                                                PCI_DMA_TODEVICE);
6474                                 i++;
6475                         }
6476
6477                         dev_kfree_skb_any(skb);
6478                 }
6479         }
6480 }
6481
6482 /* Initialize tx/rx rings for packet processing.
6483  *
6484  * The chip has been shut down and the driver detached from
6485  * the networking, so no interrupts or new tx packets will
6486  * end up in the driver.  tp->{tx,}lock are held and thus
6487  * we may not sleep.
6488  */
6489 static int tg3_init_rings(struct tg3 *tp)
6490 {
6491         int i;
6492
6493         /* Free up all the SKBs. */
6494         tg3_free_rings(tp);
6495
6496         for (i = 0; i < tp->irq_cnt; i++) {
6497                 struct tg3_napi *tnapi = &tp->napi[i];
6498
6499                 tnapi->last_tag = 0;
6500                 tnapi->last_irq_tag = 0;
6501                 tnapi->hw_status->status = 0;
6502                 tnapi->hw_status->status_tag = 0;
6503                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6504
6505                 tnapi->tx_prod = 0;
6506                 tnapi->tx_cons = 0;
6507                 if (tnapi->tx_ring)
6508                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6509
6510                 tnapi->rx_rcb_ptr = 0;
6511                 if (tnapi->rx_rcb)
6512                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6513
6514                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6515                         tg3_free_rings(tp);
6516                         return -ENOMEM;
6517                 }
6518         }
6519
6520         return 0;
6521 }
6522
6523 /*
6524  * Must not be invoked with interrupt sources disabled and
6525  * the hardware shutdown down.
6526  */
6527 static void tg3_free_consistent(struct tg3 *tp)
6528 {
6529         int i;
6530
6531         for (i = 0; i < tp->irq_cnt; i++) {
6532                 struct tg3_napi *tnapi = &tp->napi[i];
6533
6534                 if (tnapi->tx_ring) {
6535                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6536                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6537                         tnapi->tx_ring = NULL;
6538                 }
6539
6540                 kfree(tnapi->tx_buffers);
6541                 tnapi->tx_buffers = NULL;
6542
6543                 if (tnapi->rx_rcb) {
6544                         dma_free_coherent(&tp->pdev->dev,
6545                                           TG3_RX_RCB_RING_BYTES(tp),
6546                                           tnapi->rx_rcb,
6547                                           tnapi->rx_rcb_mapping);
6548                         tnapi->rx_rcb = NULL;
6549                 }
6550
6551                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6552
6553                 if (tnapi->hw_status) {
6554                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6555                                           tnapi->hw_status,
6556                                           tnapi->status_mapping);
6557                         tnapi->hw_status = NULL;
6558                 }
6559         }
6560
6561         if (tp->hw_stats) {
6562                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6563                                   tp->hw_stats, tp->stats_mapping);
6564                 tp->hw_stats = NULL;
6565         }
6566 }
6567
6568 /*
6569  * Must not be invoked with interrupt sources disabled and
6570  * the hardware shutdown down.  Can sleep.
6571  */
6572 static int tg3_alloc_consistent(struct tg3 *tp)
6573 {
6574         int i;
6575
6576         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6577                                           sizeof(struct tg3_hw_stats),
6578                                           &tp->stats_mapping,
6579                                           GFP_KERNEL);
6580         if (!tp->hw_stats)
6581                 goto err_out;
6582
6583         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6584
6585         for (i = 0; i < tp->irq_cnt; i++) {
6586                 struct tg3_napi *tnapi = &tp->napi[i];
6587                 struct tg3_hw_status *sblk;
6588
6589                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6590                                                       TG3_HW_STATUS_SIZE,
6591                                                       &tnapi->status_mapping,
6592                                                       GFP_KERNEL);
6593                 if (!tnapi->hw_status)
6594                         goto err_out;
6595
6596                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6597                 sblk = tnapi->hw_status;
6598
6599                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6600                         goto err_out;
6601
6602                 /* If multivector TSS is enabled, vector 0 does not handle
6603                  * tx interrupts.  Don't allocate any resources for it.
6604                  */
6605                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6606                     (i && tg3_flag(tp, ENABLE_TSS))) {
6607                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6608                                                     TG3_TX_RING_SIZE,
6609                                                     GFP_KERNEL);
6610                         if (!tnapi->tx_buffers)
6611                                 goto err_out;
6612
6613                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6614                                                             TG3_TX_RING_BYTES,
6615                                                         &tnapi->tx_desc_mapping,
6616                                                             GFP_KERNEL);
6617                         if (!tnapi->tx_ring)
6618                                 goto err_out;
6619                 }
6620
6621                 /*
6622                  * When RSS is enabled, the status block format changes
6623                  * slightly.  The "rx_jumbo_consumer", "reserved",
6624                  * and "rx_mini_consumer" members get mapped to the
6625                  * other three rx return ring producer indexes.
6626                  */
6627                 switch (i) {
6628                 default:
6629                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6630                         break;
6631                 case 2:
6632                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6633                         break;
6634                 case 3:
6635                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6636                         break;
6637                 case 4:
6638                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6639                         break;
6640                 }
6641
6642                 /*
6643                  * If multivector RSS is enabled, vector 0 does not handle
6644                  * rx or tx interrupts.  Don't allocate any resources for it.
6645                  */
6646                 if (!i && tg3_flag(tp, ENABLE_RSS))
6647                         continue;
6648
6649                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6650                                                    TG3_RX_RCB_RING_BYTES(tp),
6651                                                    &tnapi->rx_rcb_mapping,
6652                                                    GFP_KERNEL);
6653                 if (!tnapi->rx_rcb)
6654                         goto err_out;
6655
6656                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6657         }
6658
6659         return 0;
6660
6661 err_out:
6662         tg3_free_consistent(tp);
6663         return -ENOMEM;
6664 }
6665
6666 #define MAX_WAIT_CNT 1000
6667
6668 /* To stop a block, clear the enable bit and poll till it
6669  * clears.  tp->lock is held.
6670  */
6671 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6672 {
6673         unsigned int i;
6674         u32 val;
6675
6676         if (tg3_flag(tp, 5705_PLUS)) {
6677                 switch (ofs) {
6678                 case RCVLSC_MODE:
6679                 case DMAC_MODE:
6680                 case MBFREE_MODE:
6681                 case BUFMGR_MODE:
6682                 case MEMARB_MODE:
6683                         /* We can't enable/disable these bits of the
6684                          * 5705/5750, just say success.
6685                          */
6686                         return 0;
6687
6688                 default:
6689                         break;
6690                 }
6691         }
6692
6693         val = tr32(ofs);
6694         val &= ~enable_bit;
6695         tw32_f(ofs, val);
6696
6697         for (i = 0; i < MAX_WAIT_CNT; i++) {
6698                 udelay(100);
6699                 val = tr32(ofs);
6700                 if ((val & enable_bit) == 0)
6701                         break;
6702         }
6703
6704         if (i == MAX_WAIT_CNT && !silent) {
6705                 dev_err(&tp->pdev->dev,
6706                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6707                         ofs, enable_bit);
6708                 return -ENODEV;
6709         }
6710
6711         return 0;
6712 }
6713
6714 /* tp->lock is held. */
6715 static int tg3_abort_hw(struct tg3 *tp, int silent)
6716 {
6717         int i, err;
6718
6719         tg3_disable_ints(tp);
6720
6721         tp->rx_mode &= ~RX_MODE_ENABLE;
6722         tw32_f(MAC_RX_MODE, tp->rx_mode);
6723         udelay(10);
6724
6725         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6726         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6727         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6728         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6729         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6730         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6731
6732         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6733         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6734         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6735         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6736         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6737         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6738         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6739
6740         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6741         tw32_f(MAC_MODE, tp->mac_mode);
6742         udelay(40);
6743
6744         tp->tx_mode &= ~TX_MODE_ENABLE;
6745         tw32_f(MAC_TX_MODE, tp->tx_mode);
6746
6747         for (i = 0; i < MAX_WAIT_CNT; i++) {
6748                 udelay(100);
6749                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6750                         break;
6751         }
6752         if (i >= MAX_WAIT_CNT) {
6753                 dev_err(&tp->pdev->dev,
6754                         "%s timed out, TX_MODE_ENABLE will not clear "
6755                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6756                 err |= -ENODEV;
6757         }
6758
6759         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6760         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6761         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6762
6763         tw32(FTQ_RESET, 0xffffffff);
6764         tw32(FTQ_RESET, 0x00000000);
6765
6766         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6767         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6768
6769         for (i = 0; i < tp->irq_cnt; i++) {
6770                 struct tg3_napi *tnapi = &tp->napi[i];
6771                 if (tnapi->hw_status)
6772                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6773         }
6774         if (tp->hw_stats)
6775                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6776
6777         return err;
6778 }
6779
6780 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6781 {
6782         int i;
6783         u32 apedata;
6784
6785         /* NCSI does not support APE events */
6786         if (tg3_flag(tp, APE_HAS_NCSI))
6787                 return;
6788
6789         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6790         if (apedata != APE_SEG_SIG_MAGIC)
6791                 return;
6792
6793         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6794         if (!(apedata & APE_FW_STATUS_READY))
6795                 return;
6796
6797         /* Wait for up to 1 millisecond for APE to service previous event. */
6798         for (i = 0; i < 10; i++) {
6799                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6800                         return;
6801
6802                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6803
6804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6805                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6806                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6807
6808                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6809
6810                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6811                         break;
6812
6813                 udelay(100);
6814         }
6815
6816         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6817                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6818 }
6819
6820 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6821 {
6822         u32 event;
6823         u32 apedata;
6824
6825         if (!tg3_flag(tp, ENABLE_APE))
6826                 return;
6827
6828         switch (kind) {
6829         case RESET_KIND_INIT:
6830                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6831                                 APE_HOST_SEG_SIG_MAGIC);
6832                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6833                                 APE_HOST_SEG_LEN_MAGIC);
6834                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6835                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6836                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6837                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6838                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6839                                 APE_HOST_BEHAV_NO_PHYLOCK);
6840                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6841                                     TG3_APE_HOST_DRVR_STATE_START);
6842
6843                 event = APE_EVENT_STATUS_STATE_START;
6844                 break;
6845         case RESET_KIND_SHUTDOWN:
6846                 /* With the interface we are currently using,
6847                  * APE does not track driver state.  Wiping
6848                  * out the HOST SEGMENT SIGNATURE forces
6849                  * the APE to assume OS absent status.
6850                  */
6851                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6852
6853                 if (device_may_wakeup(&tp->pdev->dev) &&
6854                     tg3_flag(tp, WOL_ENABLE)) {
6855                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6856                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6857                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6858                 } else
6859                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6860
6861                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6862
6863                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6864                 break;
6865         case RESET_KIND_SUSPEND:
6866                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6867                 break;
6868         default:
6869                 return;
6870         }
6871
6872         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6873
6874         tg3_ape_send_event(tp, event);
6875 }
6876
6877 /* tp->lock is held. */
6878 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6879 {
6880         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6881                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6882
6883         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6884                 switch (kind) {
6885                 case RESET_KIND_INIT:
6886                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6887                                       DRV_STATE_START);
6888                         break;
6889
6890                 case RESET_KIND_SHUTDOWN:
6891                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6892                                       DRV_STATE_UNLOAD);
6893                         break;
6894
6895                 case RESET_KIND_SUSPEND:
6896                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6897                                       DRV_STATE_SUSPEND);
6898                         break;
6899
6900                 default:
6901                         break;
6902                 }
6903         }
6904
6905         if (kind == RESET_KIND_INIT ||
6906             kind == RESET_KIND_SUSPEND)
6907                 tg3_ape_driver_state_change(tp, kind);
6908 }
6909
6910 /* tp->lock is held. */
6911 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6912 {
6913         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6914                 switch (kind) {
6915                 case RESET_KIND_INIT:
6916                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6917                                       DRV_STATE_START_DONE);
6918                         break;
6919
6920                 case RESET_KIND_SHUTDOWN:
6921                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6922                                       DRV_STATE_UNLOAD_DONE);
6923                         break;
6924
6925                 default:
6926                         break;
6927                 }
6928         }
6929
6930         if (kind == RESET_KIND_SHUTDOWN)
6931                 tg3_ape_driver_state_change(tp, kind);
6932 }
6933
6934 /* tp->lock is held. */
6935 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6936 {
6937         if (tg3_flag(tp, ENABLE_ASF)) {
6938                 switch (kind) {
6939                 case RESET_KIND_INIT:
6940                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6941                                       DRV_STATE_START);
6942                         break;
6943
6944                 case RESET_KIND_SHUTDOWN:
6945                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6946                                       DRV_STATE_UNLOAD);
6947                         break;
6948
6949                 case RESET_KIND_SUSPEND:
6950                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6951                                       DRV_STATE_SUSPEND);
6952                         break;
6953
6954                 default:
6955                         break;
6956                 }
6957         }
6958 }
6959
6960 static int tg3_poll_fw(struct tg3 *tp)
6961 {
6962         int i;
6963         u32 val;
6964
6965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6966                 /* Wait up to 20ms for init done. */
6967                 for (i = 0; i < 200; i++) {
6968                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6969                                 return 0;
6970                         udelay(100);
6971                 }
6972                 return -ENODEV;
6973         }
6974
6975         /* Wait for firmware initialization to complete. */
6976         for (i = 0; i < 100000; i++) {
6977                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6978                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6979                         break;
6980                 udelay(10);
6981         }
6982
6983         /* Chip might not be fitted with firmware.  Some Sun onboard
6984          * parts are configured like that.  So don't signal the timeout
6985          * of the above loop as an error, but do report the lack of
6986          * running firmware once.
6987          */
6988         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6989                 tg3_flag_set(tp, NO_FWARE_REPORTED);
6990
6991                 netdev_info(tp->dev, "No firmware running\n");
6992         }
6993
6994         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6995                 /* The 57765 A0 needs a little more
6996                  * time to do some important work.
6997                  */
6998                 mdelay(10);
6999         }
7000
7001         return 0;
7002 }
7003
7004 /* Save PCI command register before chip reset */
7005 static void tg3_save_pci_state(struct tg3 *tp)
7006 {
7007         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7008 }
7009
7010 /* Restore PCI state after chip reset */
7011 static void tg3_restore_pci_state(struct tg3 *tp)
7012 {
7013         u32 val;
7014
7015         /* Re-enable indirect register accesses. */
7016         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7017                                tp->misc_host_ctrl);
7018
7019         /* Set MAX PCI retry to zero. */
7020         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7021         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7022             tg3_flag(tp, PCIX_MODE))
7023                 val |= PCISTATE_RETRY_SAME_DMA;
7024         /* Allow reads and writes to the APE register and memory space. */
7025         if (tg3_flag(tp, ENABLE_APE))
7026                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7027                        PCISTATE_ALLOW_APE_SHMEM_WR |
7028                        PCISTATE_ALLOW_APE_PSPACE_WR;
7029         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7030
7031         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7032
7033         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7034                 if (tg3_flag(tp, PCI_EXPRESS))
7035                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7036                 else {
7037                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7038                                               tp->pci_cacheline_sz);
7039                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7040                                               tp->pci_lat_timer);
7041                 }
7042         }
7043
7044         /* Make sure PCI-X relaxed ordering bit is clear. */
7045         if (tg3_flag(tp, PCIX_MODE)) {
7046                 u16 pcix_cmd;
7047
7048                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7049                                      &pcix_cmd);
7050                 pcix_cmd &= ~PCI_X_CMD_ERO;
7051                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7052                                       pcix_cmd);
7053         }
7054
7055         if (tg3_flag(tp, 5780_CLASS)) {
7056
7057                 /* Chip reset on 5780 will reset MSI enable bit,
7058                  * so need to restore it.
7059                  */
7060                 if (tg3_flag(tp, USING_MSI)) {
7061                         u16 ctrl;
7062
7063                         pci_read_config_word(tp->pdev,
7064                                              tp->msi_cap + PCI_MSI_FLAGS,
7065                                              &ctrl);
7066                         pci_write_config_word(tp->pdev,
7067                                               tp->msi_cap + PCI_MSI_FLAGS,
7068                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7069                         val = tr32(MSGINT_MODE);
7070                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7071                 }
7072         }
7073 }
7074
7075 static void tg3_stop_fw(struct tg3 *);
7076
7077 /* tp->lock is held. */
7078 static int tg3_chip_reset(struct tg3 *tp)
7079 {
7080         u32 val;
7081         void (*write_op)(struct tg3 *, u32, u32);
7082         int i, err;
7083
7084         tg3_nvram_lock(tp);
7085
7086         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7087
7088         /* No matching tg3_nvram_unlock() after this because
7089          * chip reset below will undo the nvram lock.
7090          */
7091         tp->nvram_lock_cnt = 0;
7092
7093         /* GRC_MISC_CFG core clock reset will clear the memory
7094          * enable bit in PCI register 4 and the MSI enable bit
7095          * on some chips, so we save relevant registers here.
7096          */
7097         tg3_save_pci_state(tp);
7098
7099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7100             tg3_flag(tp, 5755_PLUS))
7101                 tw32(GRC_FASTBOOT_PC, 0);
7102
7103         /*
7104          * We must avoid the readl() that normally takes place.
7105          * It locks machines, causes machine checks, and other
7106          * fun things.  So, temporarily disable the 5701
7107          * hardware workaround, while we do the reset.
7108          */
7109         write_op = tp->write32;
7110         if (write_op == tg3_write_flush_reg32)
7111                 tp->write32 = tg3_write32;
7112
7113         /* Prevent the irq handler from reading or writing PCI registers
7114          * during chip reset when the memory enable bit in the PCI command
7115          * register may be cleared.  The chip does not generate interrupt
7116          * at this time, but the irq handler may still be called due to irq
7117          * sharing or irqpoll.
7118          */
7119         tg3_flag_set(tp, CHIP_RESETTING);
7120         for (i = 0; i < tp->irq_cnt; i++) {
7121                 struct tg3_napi *tnapi = &tp->napi[i];
7122                 if (tnapi->hw_status) {
7123                         tnapi->hw_status->status = 0;
7124                         tnapi->hw_status->status_tag = 0;
7125                 }
7126                 tnapi->last_tag = 0;
7127                 tnapi->last_irq_tag = 0;
7128         }
7129         smp_mb();
7130
7131         for (i = 0; i < tp->irq_cnt; i++)
7132                 synchronize_irq(tp->napi[i].irq_vec);
7133
7134         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7135                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7136                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7137         }
7138
7139         /* do the reset */
7140         val = GRC_MISC_CFG_CORECLK_RESET;
7141
7142         if (tg3_flag(tp, PCI_EXPRESS)) {
7143                 /* Force PCIe 1.0a mode */
7144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7145                     !tg3_flag(tp, 57765_PLUS) &&
7146                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7147                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7148                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7149
7150                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7151                         tw32(GRC_MISC_CFG, (1 << 29));
7152                         val |= (1 << 29);
7153                 }
7154         }
7155
7156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7157                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7158                 tw32(GRC_VCPU_EXT_CTRL,
7159                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7160         }
7161
7162         /* Manage gphy power for all CPMU absent PCIe devices. */
7163         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7164                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7165
7166         tw32(GRC_MISC_CFG, val);
7167
7168         /* restore 5701 hardware bug workaround write method */
7169         tp->write32 = write_op;
7170
7171         /* Unfortunately, we have to delay before the PCI read back.
7172          * Some 575X chips even will not respond to a PCI cfg access
7173          * when the reset command is given to the chip.
7174          *
7175          * How do these hardware designers expect things to work
7176          * properly if the PCI write is posted for a long period
7177          * of time?  It is always necessary to have some method by
7178          * which a register read back can occur to push the write
7179          * out which does the reset.
7180          *
7181          * For most tg3 variants the trick below was working.
7182          * Ho hum...
7183          */
7184         udelay(120);
7185
7186         /* Flush PCI posted writes.  The normal MMIO registers
7187          * are inaccessible at this time so this is the only
7188          * way to make this reliably (actually, this is no longer
7189          * the case, see above).  I tried to use indirect
7190          * register read/write but this upset some 5701 variants.
7191          */
7192         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7193
7194         udelay(120);
7195
7196         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7197                 u16 val16;
7198
7199                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7200                         int i;
7201                         u32 cfg_val;
7202
7203                         /* Wait for link training to complete.  */
7204                         for (i = 0; i < 5000; i++)
7205                                 udelay(100);
7206
7207                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7208                         pci_write_config_dword(tp->pdev, 0xc4,
7209                                                cfg_val | (1 << 15));
7210                 }
7211
7212                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7213                 pci_read_config_word(tp->pdev,
7214                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7215                                      &val16);
7216                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7217                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7218                 /*
7219                  * Older PCIe devices only support the 128 byte
7220                  * MPS setting.  Enforce the restriction.
7221                  */
7222                 if (!tg3_flag(tp, CPMU_PRESENT))
7223                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7224                 pci_write_config_word(tp->pdev,
7225                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7226                                       val16);
7227
7228                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7229
7230                 /* Clear error status */
7231                 pci_write_config_word(tp->pdev,
7232                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7233                                       PCI_EXP_DEVSTA_CED |
7234                                       PCI_EXP_DEVSTA_NFED |
7235                                       PCI_EXP_DEVSTA_FED |
7236                                       PCI_EXP_DEVSTA_URD);
7237         }
7238
7239         tg3_restore_pci_state(tp);
7240
7241         tg3_flag_clear(tp, CHIP_RESETTING);
7242         tg3_flag_clear(tp, ERROR_PROCESSED);
7243
7244         val = 0;
7245         if (tg3_flag(tp, 5780_CLASS))
7246                 val = tr32(MEMARB_MODE);
7247         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7248
7249         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7250                 tg3_stop_fw(tp);
7251                 tw32(0x5000, 0x400);
7252         }
7253
7254         tw32(GRC_MODE, tp->grc_mode);
7255
7256         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7257                 val = tr32(0xc4);
7258
7259                 tw32(0xc4, val | (1 << 15));
7260         }
7261
7262         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7263             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7264                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7265                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7266                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7267                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7268         }
7269
7270         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7271                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7272                 val = tp->mac_mode;
7273         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7274                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7275                 val = tp->mac_mode;
7276         } else
7277                 val = 0;
7278
7279         tw32_f(MAC_MODE, val);
7280         udelay(40);
7281
7282         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7283
7284         err = tg3_poll_fw(tp);
7285         if (err)
7286                 return err;
7287
7288         tg3_mdio_start(tp);
7289
7290         if (tg3_flag(tp, PCI_EXPRESS) &&
7291             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7292             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7293             !tg3_flag(tp, 57765_PLUS)) {
7294                 val = tr32(0x7c00);
7295
7296                 tw32(0x7c00, val | (1 << 25));
7297         }
7298
7299         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7300                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7301                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7302         }
7303
7304         /* Reprobe ASF enable state.  */
7305         tg3_flag_clear(tp, ENABLE_ASF);
7306         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7307         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7308         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7309                 u32 nic_cfg;
7310
7311                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7312                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7313                         tg3_flag_set(tp, ENABLE_ASF);
7314                         tp->last_event_jiffies = jiffies;
7315                         if (tg3_flag(tp, 5750_PLUS))
7316                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7317                 }
7318         }
7319
7320         return 0;
7321 }
7322
7323 /* tp->lock is held. */
7324 static void tg3_stop_fw(struct tg3 *tp)
7325 {
7326         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7327                 /* Wait for RX cpu to ACK the previous event. */
7328                 tg3_wait_for_event_ack(tp);
7329
7330                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7331
7332                 tg3_generate_fw_event(tp);
7333
7334                 /* Wait for RX cpu to ACK this event. */
7335                 tg3_wait_for_event_ack(tp);
7336         }
7337 }
7338
7339 /* tp->lock is held. */
7340 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7341 {
7342         int err;
7343
7344         tg3_stop_fw(tp);
7345
7346         tg3_write_sig_pre_reset(tp, kind);
7347
7348         tg3_abort_hw(tp, silent);
7349         err = tg3_chip_reset(tp);
7350
7351         __tg3_set_mac_addr(tp, 0);
7352
7353         tg3_write_sig_legacy(tp, kind);
7354         tg3_write_sig_post_reset(tp, kind);
7355
7356         if (err)
7357                 return err;
7358
7359         return 0;
7360 }
7361
7362 #define RX_CPU_SCRATCH_BASE     0x30000
7363 #define RX_CPU_SCRATCH_SIZE     0x04000
7364 #define TX_CPU_SCRATCH_BASE     0x34000
7365 #define TX_CPU_SCRATCH_SIZE     0x04000
7366
7367 /* tp->lock is held. */
7368 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7369 {
7370         int i;
7371
7372         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7373
7374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7375                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7376
7377                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7378                 return 0;
7379         }
7380         if (offset == RX_CPU_BASE) {
7381                 for (i = 0; i < 10000; i++) {
7382                         tw32(offset + CPU_STATE, 0xffffffff);
7383                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7384                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7385                                 break;
7386                 }
7387
7388                 tw32(offset + CPU_STATE, 0xffffffff);
7389                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7390                 udelay(10);
7391         } else {
7392                 for (i = 0; i < 10000; i++) {
7393                         tw32(offset + CPU_STATE, 0xffffffff);
7394                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7395                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7396                                 break;
7397                 }
7398         }
7399
7400         if (i >= 10000) {
7401                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7402                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7403                 return -ENODEV;
7404         }
7405
7406         /* Clear firmware's nvram arbitration. */
7407         if (tg3_flag(tp, NVRAM))
7408                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7409         return 0;
7410 }
7411
7412 struct fw_info {
7413         unsigned int fw_base;
7414         unsigned int fw_len;
7415         const __be32 *fw_data;
7416 };
7417
7418 /* tp->lock is held. */
7419 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7420                                  int cpu_scratch_size, struct fw_info *info)
7421 {
7422         int err, lock_err, i;
7423         void (*write_op)(struct tg3 *, u32, u32);
7424
7425         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7426                 netdev_err(tp->dev,
7427                            "%s: Trying to load TX cpu firmware which is 5705\n",
7428                            __func__);
7429                 return -EINVAL;
7430         }
7431
7432         if (tg3_flag(tp, 5705_PLUS))
7433                 write_op = tg3_write_mem;
7434         else
7435                 write_op = tg3_write_indirect_reg32;
7436
7437         /* It is possible that bootcode is still loading at this point.
7438          * Get the nvram lock first before halting the cpu.
7439          */
7440         lock_err = tg3_nvram_lock(tp);
7441         err = tg3_halt_cpu(tp, cpu_base);
7442         if (!lock_err)
7443                 tg3_nvram_unlock(tp);
7444         if (err)
7445                 goto out;
7446
7447         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7448                 write_op(tp, cpu_scratch_base + i, 0);
7449         tw32(cpu_base + CPU_STATE, 0xffffffff);
7450         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7451         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7452                 write_op(tp, (cpu_scratch_base +
7453                               (info->fw_base & 0xffff) +
7454                               (i * sizeof(u32))),
7455                               be32_to_cpu(info->fw_data[i]));
7456
7457         err = 0;
7458
7459 out:
7460         return err;
7461 }
7462
7463 /* tp->lock is held. */
7464 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7465 {
7466         struct fw_info info;
7467         const __be32 *fw_data;
7468         int err, i;
7469
7470         fw_data = (void *)tp->fw->data;
7471
7472         /* Firmware blob starts with version numbers, followed by
7473            start address and length. We are setting complete length.
7474            length = end_address_of_bss - start_address_of_text.
7475            Remainder is the blob to be loaded contiguously
7476            from start address. */
7477
7478         info.fw_base = be32_to_cpu(fw_data[1]);
7479         info.fw_len = tp->fw->size - 12;
7480         info.fw_data = &fw_data[3];
7481
7482         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7483                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7484                                     &info);
7485         if (err)
7486                 return err;
7487
7488         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7489                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7490                                     &info);
7491         if (err)
7492                 return err;
7493
7494         /* Now startup only the RX cpu. */
7495         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7496         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7497
7498         for (i = 0; i < 5; i++) {
7499                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7500                         break;
7501                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7502                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7503                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7504                 udelay(1000);
7505         }
7506         if (i >= 5) {
7507                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7508                            "should be %08x\n", __func__,
7509                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7510                 return -ENODEV;
7511         }
7512         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7513         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7514
7515         return 0;
7516 }
7517
7518 /* tp->lock is held. */
7519 static int tg3_load_tso_firmware(struct tg3 *tp)
7520 {
7521         struct fw_info info;
7522         const __be32 *fw_data;
7523         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7524         int err, i;
7525
7526         if (tg3_flag(tp, HW_TSO_1) ||
7527             tg3_flag(tp, HW_TSO_2) ||
7528             tg3_flag(tp, HW_TSO_3))
7529                 return 0;
7530
7531         fw_data = (void *)tp->fw->data;
7532
7533         /* Firmware blob starts with version numbers, followed by
7534            start address and length. We are setting complete length.
7535            length = end_address_of_bss - start_address_of_text.
7536            Remainder is the blob to be loaded contiguously
7537            from start address. */
7538
7539         info.fw_base = be32_to_cpu(fw_data[1]);
7540         cpu_scratch_size = tp->fw_len;
7541         info.fw_len = tp->fw->size - 12;
7542         info.fw_data = &fw_data[3];
7543
7544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7545                 cpu_base = RX_CPU_BASE;
7546                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7547         } else {
7548                 cpu_base = TX_CPU_BASE;
7549                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7550                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7551         }
7552
7553         err = tg3_load_firmware_cpu(tp, cpu_base,
7554                                     cpu_scratch_base, cpu_scratch_size,
7555                                     &info);
7556         if (err)
7557                 return err;
7558
7559         /* Now startup the cpu. */
7560         tw32(cpu_base + CPU_STATE, 0xffffffff);
7561         tw32_f(cpu_base + CPU_PC, info.fw_base);
7562
7563         for (i = 0; i < 5; i++) {
7564                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7565                         break;
7566                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7567                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7568                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7569                 udelay(1000);
7570         }
7571         if (i >= 5) {
7572                 netdev_err(tp->dev,
7573                            "%s fails to set CPU PC, is %08x should be %08x\n",
7574                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7575                 return -ENODEV;
7576         }
7577         tw32(cpu_base + CPU_STATE, 0xffffffff);
7578         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7579         return 0;
7580 }
7581
7582
7583 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7584 {
7585         struct tg3 *tp = netdev_priv(dev);
7586         struct sockaddr *addr = p;
7587         int err = 0, skip_mac_1 = 0;
7588
7589         if (!is_valid_ether_addr(addr->sa_data))
7590                 return -EINVAL;
7591
7592         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7593
7594         if (!netif_running(dev))
7595                 return 0;
7596
7597         if (tg3_flag(tp, ENABLE_ASF)) {
7598                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7599
7600                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7601                 addr0_low = tr32(MAC_ADDR_0_LOW);
7602                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7603                 addr1_low = tr32(MAC_ADDR_1_LOW);
7604
7605                 /* Skip MAC addr 1 if ASF is using it. */
7606                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7607                     !(addr1_high == 0 && addr1_low == 0))
7608                         skip_mac_1 = 1;
7609         }
7610         spin_lock_bh(&tp->lock);
7611         __tg3_set_mac_addr(tp, skip_mac_1);
7612         spin_unlock_bh(&tp->lock);
7613
7614         return err;
7615 }
7616
7617 /* tp->lock is held. */
7618 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7619                            dma_addr_t mapping, u32 maxlen_flags,
7620                            u32 nic_addr)
7621 {
7622         tg3_write_mem(tp,
7623                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7624                       ((u64) mapping >> 32));
7625         tg3_write_mem(tp,
7626                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7627                       ((u64) mapping & 0xffffffff));
7628         tg3_write_mem(tp,
7629                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7630                        maxlen_flags);
7631
7632         if (!tg3_flag(tp, 5705_PLUS))
7633                 tg3_write_mem(tp,
7634                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7635                               nic_addr);
7636 }
7637
7638 static void __tg3_set_rx_mode(struct net_device *);
7639 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7640 {
7641         int i;
7642
7643         if (!tg3_flag(tp, ENABLE_TSS)) {
7644                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7645                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7646                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7647         } else {
7648                 tw32(HOSTCC_TXCOL_TICKS, 0);
7649                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7650                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7651         }
7652
7653         if (!tg3_flag(tp, ENABLE_RSS)) {
7654                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7655                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7656                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7657         } else {
7658                 tw32(HOSTCC_RXCOL_TICKS, 0);
7659                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7660                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7661         }
7662
7663         if (!tg3_flag(tp, 5705_PLUS)) {
7664                 u32 val = ec->stats_block_coalesce_usecs;
7665
7666                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7667                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7668
7669                 if (!netif_carrier_ok(tp->dev))
7670                         val = 0;
7671
7672                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7673         }
7674
7675         for (i = 0; i < tp->irq_cnt - 1; i++) {
7676                 u32 reg;
7677
7678                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7679                 tw32(reg, ec->rx_coalesce_usecs);
7680                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7681                 tw32(reg, ec->rx_max_coalesced_frames);
7682                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7683                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7684
7685                 if (tg3_flag(tp, ENABLE_TSS)) {
7686                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7687                         tw32(reg, ec->tx_coalesce_usecs);
7688                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7689                         tw32(reg, ec->tx_max_coalesced_frames);
7690                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7691                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7692                 }
7693         }
7694
7695         for (; i < tp->irq_max - 1; i++) {
7696                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7697                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7698                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7699
7700                 if (tg3_flag(tp, ENABLE_TSS)) {
7701                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7702                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7703                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7704                 }
7705         }
7706 }
7707
7708 /* tp->lock is held. */
7709 static void tg3_rings_reset(struct tg3 *tp)
7710 {
7711         int i;
7712         u32 stblk, txrcb, rxrcb, limit;
7713         struct tg3_napi *tnapi = &tp->napi[0];
7714
7715         /* Disable all transmit rings but the first. */
7716         if (!tg3_flag(tp, 5705_PLUS))
7717                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7718         else if (tg3_flag(tp, 5717_PLUS))
7719                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7720         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7721                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7722         else
7723                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7724
7725         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7726              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7727                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7728                               BDINFO_FLAGS_DISABLED);
7729
7730
7731         /* Disable all receive return rings but the first. */
7732         if (tg3_flag(tp, 5717_PLUS))
7733                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7734         else if (!tg3_flag(tp, 5705_PLUS))
7735                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7736         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7737                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7738                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7739         else
7740                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7741
7742         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7743              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7744                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7745                               BDINFO_FLAGS_DISABLED);
7746
7747         /* Disable interrupts */
7748         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7749
7750         /* Zero mailbox registers. */
7751         if (tg3_flag(tp, SUPPORT_MSIX)) {
7752                 for (i = 1; i < tp->irq_max; i++) {
7753                         tp->napi[i].tx_prod = 0;
7754                         tp->napi[i].tx_cons = 0;
7755                         if (tg3_flag(tp, ENABLE_TSS))
7756                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7757                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7758                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7759                 }
7760                 if (!tg3_flag(tp, ENABLE_TSS))
7761                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7762         } else {
7763                 tp->napi[0].tx_prod = 0;
7764                 tp->napi[0].tx_cons = 0;
7765                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7766                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7767         }
7768
7769         /* Make sure the NIC-based send BD rings are disabled. */
7770         if (!tg3_flag(tp, 5705_PLUS)) {
7771                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7772                 for (i = 0; i < 16; i++)
7773                         tw32_tx_mbox(mbox + i * 8, 0);
7774         }
7775
7776         txrcb = NIC_SRAM_SEND_RCB;
7777         rxrcb = NIC_SRAM_RCV_RET_RCB;
7778
7779         /* Clear status block in ram. */
7780         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7781
7782         /* Set status block DMA address */
7783         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7784              ((u64) tnapi->status_mapping >> 32));
7785         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7786              ((u64) tnapi->status_mapping & 0xffffffff));
7787
7788         if (tnapi->tx_ring) {
7789                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7790                                (TG3_TX_RING_SIZE <<
7791                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7792                                NIC_SRAM_TX_BUFFER_DESC);
7793                 txrcb += TG3_BDINFO_SIZE;
7794         }
7795
7796         if (tnapi->rx_rcb) {
7797                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7798                                (tp->rx_ret_ring_mask + 1) <<
7799                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7800                 rxrcb += TG3_BDINFO_SIZE;
7801         }
7802
7803         stblk = HOSTCC_STATBLCK_RING1;
7804
7805         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7806                 u64 mapping = (u64)tnapi->status_mapping;
7807                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7808                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7809
7810                 /* Clear status block in ram. */
7811                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7812
7813                 if (tnapi->tx_ring) {
7814                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7815                                        (TG3_TX_RING_SIZE <<
7816                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7817                                        NIC_SRAM_TX_BUFFER_DESC);
7818                         txrcb += TG3_BDINFO_SIZE;
7819                 }
7820
7821                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7822                                ((tp->rx_ret_ring_mask + 1) <<
7823                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7824
7825                 stblk += 8;
7826                 rxrcb += TG3_BDINFO_SIZE;
7827         }
7828 }
7829
7830 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7831 {
7832         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7833
7834         if (!tg3_flag(tp, 5750_PLUS) ||
7835             tg3_flag(tp, 5780_CLASS) ||
7836             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7837             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7838                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7839         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7840                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7841                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7842         else
7843                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7844
7845         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7846         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7847
7848         val = min(nic_rep_thresh, host_rep_thresh);
7849         tw32(RCVBDI_STD_THRESH, val);
7850
7851         if (tg3_flag(tp, 57765_PLUS))
7852                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7853
7854         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7855                 return;
7856
7857         if (!tg3_flag(tp, 5705_PLUS))
7858                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7859         else
7860                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7861
7862         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7863
7864         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7865         tw32(RCVBDI_JUMBO_THRESH, val);
7866
7867         if (tg3_flag(tp, 57765_PLUS))
7868                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7869 }
7870
7871 /* tp->lock is held. */
7872 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7873 {
7874         u32 val, rdmac_mode;
7875         int i, err, limit;
7876         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7877
7878         tg3_disable_ints(tp);
7879
7880         tg3_stop_fw(tp);
7881
7882         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7883
7884         if (tg3_flag(tp, INIT_COMPLETE))
7885                 tg3_abort_hw(tp, 1);
7886
7887         /* Enable MAC control of LPI */
7888         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7889                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7890                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7891                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7892
7893                 tw32_f(TG3_CPMU_EEE_CTRL,
7894                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7895
7896                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7897                       TG3_CPMU_EEEMD_LPI_IN_TX |
7898                       TG3_CPMU_EEEMD_LPI_IN_RX |
7899                       TG3_CPMU_EEEMD_EEE_ENABLE;
7900
7901                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7902                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7903
7904                 if (tg3_flag(tp, ENABLE_APE))
7905                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7906
7907                 tw32_f(TG3_CPMU_EEE_MODE, val);
7908
7909                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7910                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7911                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7912
7913                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7914                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7915                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7916         }
7917
7918         if (reset_phy)
7919                 tg3_phy_reset(tp);
7920
7921         err = tg3_chip_reset(tp);
7922         if (err)
7923                 return err;
7924
7925         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7926
7927         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7928                 val = tr32(TG3_CPMU_CTRL);
7929                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7930                 tw32(TG3_CPMU_CTRL, val);
7931
7932                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7933                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7934                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7935                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7936
7937                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7938                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7939                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7940                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7941
7942                 val = tr32(TG3_CPMU_HST_ACC);
7943                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7944                 val |= CPMU_HST_ACC_MACCLK_6_25;
7945                 tw32(TG3_CPMU_HST_ACC, val);
7946         }
7947
7948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7949                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7950                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7951                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7952                 tw32(PCIE_PWR_MGMT_THRESH, val);
7953
7954                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7955                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7956
7957                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7958
7959                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7960                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7961         }
7962
7963         if (tg3_flag(tp, L1PLLPD_EN)) {
7964                 u32 grc_mode = tr32(GRC_MODE);
7965
7966                 /* Access the lower 1K of PL PCIE block registers. */
7967                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7968                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7969
7970                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7971                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7972                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7973
7974                 tw32(GRC_MODE, grc_mode);
7975         }
7976
7977         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7978                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7979                         u32 grc_mode = tr32(GRC_MODE);
7980
7981                         /* Access the lower 1K of PL PCIE block registers. */
7982                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7983                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7984
7985                         val = tr32(TG3_PCIE_TLDLPL_PORT +
7986                                    TG3_PCIE_PL_LO_PHYCTL5);
7987                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7988                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7989
7990                         tw32(GRC_MODE, grc_mode);
7991                 }
7992
7993                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
7994                         u32 grc_mode = tr32(GRC_MODE);
7995
7996                         /* Access the lower 1K of DL PCIE block registers. */
7997                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7998                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
7999
8000                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8001                                    TG3_PCIE_DL_LO_FTSMAX);
8002                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8003                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8004                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8005
8006                         tw32(GRC_MODE, grc_mode);
8007                 }
8008
8009                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8010                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8011                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8012                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8013         }
8014
8015         /* This works around an issue with Athlon chipsets on
8016          * B3 tigon3 silicon.  This bit has no effect on any
8017          * other revision.  But do not set this on PCI Express
8018          * chips and don't even touch the clocks if the CPMU is present.
8019          */
8020         if (!tg3_flag(tp, CPMU_PRESENT)) {
8021                 if (!tg3_flag(tp, PCI_EXPRESS))
8022                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8023                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8024         }
8025
8026         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8027             tg3_flag(tp, PCIX_MODE)) {
8028                 val = tr32(TG3PCI_PCISTATE);
8029                 val |= PCISTATE_RETRY_SAME_DMA;
8030                 tw32(TG3PCI_PCISTATE, val);
8031         }
8032
8033         if (tg3_flag(tp, ENABLE_APE)) {
8034                 /* Allow reads and writes to the
8035                  * APE register and memory space.
8036                  */
8037                 val = tr32(TG3PCI_PCISTATE);
8038                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8039                        PCISTATE_ALLOW_APE_SHMEM_WR |
8040                        PCISTATE_ALLOW_APE_PSPACE_WR;
8041                 tw32(TG3PCI_PCISTATE, val);
8042         }
8043
8044         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8045                 /* Enable some hw fixes.  */
8046                 val = tr32(TG3PCI_MSI_DATA);
8047                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8048                 tw32(TG3PCI_MSI_DATA, val);
8049         }
8050
8051         /* Descriptor ring init may make accesses to the
8052          * NIC SRAM area to setup the TX descriptors, so we
8053          * can only do this after the hardware has been
8054          * successfully reset.
8055          */
8056         err = tg3_init_rings(tp);
8057         if (err)
8058                 return err;
8059
8060         if (tg3_flag(tp, 57765_PLUS)) {
8061                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8062                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8063                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8064                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8066                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8067                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8068                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8069         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8070                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8071                 /* This value is determined during the probe time DMA
8072                  * engine test, tg3_test_dma.
8073                  */
8074                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8075         }
8076
8077         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8078                           GRC_MODE_4X_NIC_SEND_RINGS |
8079                           GRC_MODE_NO_TX_PHDR_CSUM |
8080                           GRC_MODE_NO_RX_PHDR_CSUM);
8081         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8082
8083         /* Pseudo-header checksum is done by hardware logic and not
8084          * the offload processers, so make the chip do the pseudo-
8085          * header checksums on receive.  For transmit it is more
8086          * convenient to do the pseudo-header checksum in software
8087          * as Linux does that on transmit for us in all cases.
8088          */
8089         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8090
8091         tw32(GRC_MODE,
8092              tp->grc_mode |
8093              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8094
8095         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8096         val = tr32(GRC_MISC_CFG);
8097         val &= ~0xff;
8098         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8099         tw32(GRC_MISC_CFG, val);
8100
8101         /* Initialize MBUF/DESC pool. */
8102         if (tg3_flag(tp, 5750_PLUS)) {
8103                 /* Do nothing.  */
8104         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8105                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8107                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8108                 else
8109                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8110                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8111                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8112         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8113                 int fw_len;
8114
8115                 fw_len = tp->fw_len;
8116                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8117                 tw32(BUFMGR_MB_POOL_ADDR,
8118                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8119                 tw32(BUFMGR_MB_POOL_SIZE,
8120                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8121         }
8122
8123         if (tp->dev->mtu <= ETH_DATA_LEN) {
8124                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8125                      tp->bufmgr_config.mbuf_read_dma_low_water);
8126                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8127                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8128                 tw32(BUFMGR_MB_HIGH_WATER,
8129                      tp->bufmgr_config.mbuf_high_water);
8130         } else {
8131                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8132                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8133                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8134                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8135                 tw32(BUFMGR_MB_HIGH_WATER,
8136                      tp->bufmgr_config.mbuf_high_water_jumbo);
8137         }
8138         tw32(BUFMGR_DMA_LOW_WATER,
8139              tp->bufmgr_config.dma_low_water);
8140         tw32(BUFMGR_DMA_HIGH_WATER,
8141              tp->bufmgr_config.dma_high_water);
8142
8143         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8145                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8147             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8148             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8149                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8150         tw32(BUFMGR_MODE, val);
8151         for (i = 0; i < 2000; i++) {
8152                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8153                         break;
8154                 udelay(10);
8155         }
8156         if (i >= 2000) {
8157                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8158                 return -ENODEV;
8159         }
8160
8161         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8162                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8163
8164         tg3_setup_rxbd_thresholds(tp);
8165
8166         /* Initialize TG3_BDINFO's at:
8167          *  RCVDBDI_STD_BD:     standard eth size rx ring
8168          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8169          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8170          *
8171          * like so:
8172          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8173          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8174          *                              ring attribute flags
8175          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8176          *
8177          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8178          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8179          *
8180          * The size of each ring is fixed in the firmware, but the location is
8181          * configurable.
8182          */
8183         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8184              ((u64) tpr->rx_std_mapping >> 32));
8185         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8186              ((u64) tpr->rx_std_mapping & 0xffffffff));
8187         if (!tg3_flag(tp, 5717_PLUS))
8188                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8189                      NIC_SRAM_RX_BUFFER_DESC);
8190
8191         /* Disable the mini ring */
8192         if (!tg3_flag(tp, 5705_PLUS))
8193                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8194                      BDINFO_FLAGS_DISABLED);
8195
8196         /* Program the jumbo buffer descriptor ring control
8197          * blocks on those devices that have them.
8198          */
8199         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8200             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8201
8202                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8203                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8204                              ((u64) tpr->rx_jmb_mapping >> 32));
8205                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8206                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8207                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8208                               BDINFO_FLAGS_MAXLEN_SHIFT;
8209                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8210                              val | BDINFO_FLAGS_USE_EXT_RECV);
8211                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8212                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8213                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8214                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8215                 } else {
8216                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8217                              BDINFO_FLAGS_DISABLED);
8218                 }
8219
8220                 if (tg3_flag(tp, 57765_PLUS)) {
8221                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8222                                 val = TG3_RX_STD_MAX_SIZE_5700;
8223                         else
8224                                 val = TG3_RX_STD_MAX_SIZE_5717;
8225                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8226                         val |= (TG3_RX_STD_DMA_SZ << 2);
8227                 } else
8228                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8229         } else
8230                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8231
8232         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8233
8234         tpr->rx_std_prod_idx = tp->rx_pending;
8235         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8236
8237         tpr->rx_jmb_prod_idx =
8238                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8239         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8240
8241         tg3_rings_reset(tp);
8242
8243         /* Initialize MAC address and backoff seed. */
8244         __tg3_set_mac_addr(tp, 0);
8245
8246         /* MTU + ethernet header + FCS + optional VLAN tag */
8247         tw32(MAC_RX_MTU_SIZE,
8248              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8249
8250         /* The slot time is changed by tg3_setup_phy if we
8251          * run at gigabit with half duplex.
8252          */
8253         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8254               (6 << TX_LENGTHS_IPG_SHIFT) |
8255               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8256
8257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8258                 val |= tr32(MAC_TX_LENGTHS) &
8259                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8260                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8261
8262         tw32(MAC_TX_LENGTHS, val);
8263
8264         /* Receive rules. */
8265         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8266         tw32(RCVLPC_CONFIG, 0x0181);
8267
8268         /* Calculate RDMAC_MODE setting early, we need it to determine
8269          * the RCVLPC_STATE_ENABLE mask.
8270          */
8271         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8272                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8273                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8274                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8275                       RDMAC_MODE_LNGREAD_ENAB);
8276
8277         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8278                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8279
8280         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8281             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8282             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8283                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8284                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8285                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8286
8287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8288             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8289                 if (tg3_flag(tp, TSO_CAPABLE) &&
8290                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8291                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8292                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8293                            !tg3_flag(tp, IS_5788)) {
8294                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8295                 }
8296         }
8297
8298         if (tg3_flag(tp, PCI_EXPRESS))
8299                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8300
8301         if (tg3_flag(tp, HW_TSO_1) ||
8302             tg3_flag(tp, HW_TSO_2) ||
8303             tg3_flag(tp, HW_TSO_3))
8304                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8305
8306         if (tg3_flag(tp, 57765_PLUS) ||
8307             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8308             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8309                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8310
8311         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8312                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8313
8314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8315             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8316             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8317             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8318             tg3_flag(tp, 57765_PLUS)) {
8319                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8320                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8321                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8322                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8323                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8324                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8325                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8326                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8327                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8328                 }
8329                 tw32(TG3_RDMA_RSRVCTRL_REG,
8330                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8331         }
8332
8333         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8334             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8335                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8336                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8337                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8338                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8339         }
8340
8341         /* Receive/send statistics. */
8342         if (tg3_flag(tp, 5750_PLUS)) {
8343                 val = tr32(RCVLPC_STATS_ENABLE);
8344                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8345                 tw32(RCVLPC_STATS_ENABLE, val);
8346         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8347                    tg3_flag(tp, TSO_CAPABLE)) {
8348                 val = tr32(RCVLPC_STATS_ENABLE);
8349                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8350                 tw32(RCVLPC_STATS_ENABLE, val);
8351         } else {
8352                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8353         }
8354         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8355         tw32(SNDDATAI_STATSENAB, 0xffffff);
8356         tw32(SNDDATAI_STATSCTRL,
8357              (SNDDATAI_SCTRL_ENABLE |
8358               SNDDATAI_SCTRL_FASTUPD));
8359
8360         /* Setup host coalescing engine. */
8361         tw32(HOSTCC_MODE, 0);
8362         for (i = 0; i < 2000; i++) {
8363                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8364                         break;
8365                 udelay(10);
8366         }
8367
8368         __tg3_set_coalesce(tp, &tp->coal);
8369
8370         if (!tg3_flag(tp, 5705_PLUS)) {
8371                 /* Status/statistics block address.  See tg3_timer,
8372                  * the tg3_periodic_fetch_stats call there, and
8373                  * tg3_get_stats to see how this works for 5705/5750 chips.
8374                  */
8375                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8376                      ((u64) tp->stats_mapping >> 32));
8377                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8378                      ((u64) tp->stats_mapping & 0xffffffff));
8379                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8380
8381                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8382
8383                 /* Clear statistics and status block memory areas */
8384                 for (i = NIC_SRAM_STATS_BLK;
8385                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8386                      i += sizeof(u32)) {
8387                         tg3_write_mem(tp, i, 0);
8388                         udelay(40);
8389                 }
8390         }
8391
8392         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8393
8394         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8395         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8396         if (!tg3_flag(tp, 5705_PLUS))
8397                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8398
8399         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8400                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8401                 /* reset to prevent losing 1st rx packet intermittently */
8402                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8403                 udelay(10);
8404         }
8405
8406         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8407                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8408                         MAC_MODE_FHDE_ENABLE;
8409         if (tg3_flag(tp, ENABLE_APE))
8410                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8411         if (!tg3_flag(tp, 5705_PLUS) &&
8412             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8413             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8414                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8415         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8416         udelay(40);
8417
8418         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8419          * If TG3_FLAG_IS_NIC is zero, we should read the
8420          * register to preserve the GPIO settings for LOMs. The GPIOs,
8421          * whether used as inputs or outputs, are set by boot code after
8422          * reset.
8423          */
8424         if (!tg3_flag(tp, IS_NIC)) {
8425                 u32 gpio_mask;
8426
8427                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8428                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8429                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8430
8431                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8432                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8433                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8434
8435                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8436                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8437
8438                 tp->grc_local_ctrl &= ~gpio_mask;
8439                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8440
8441                 /* GPIO1 must be driven high for eeprom write protect */
8442                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8443                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8444                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8445         }
8446         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8447         udelay(100);
8448
8449         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8450                 val = tr32(MSGINT_MODE);
8451                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8452                 tw32(MSGINT_MODE, val);
8453         }
8454
8455         if (!tg3_flag(tp, 5705_PLUS)) {
8456                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8457                 udelay(40);
8458         }
8459
8460         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8461                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8462                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8463                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8464                WDMAC_MODE_LNGREAD_ENAB);
8465
8466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8467             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8468                 if (tg3_flag(tp, TSO_CAPABLE) &&
8469                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8470                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8471                         /* nothing */
8472                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8473                            !tg3_flag(tp, IS_5788)) {
8474                         val |= WDMAC_MODE_RX_ACCEL;
8475                 }
8476         }
8477
8478         /* Enable host coalescing bug fix */
8479         if (tg3_flag(tp, 5755_PLUS))
8480                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8481
8482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8483                 val |= WDMAC_MODE_BURST_ALL_DATA;
8484
8485         tw32_f(WDMAC_MODE, val);
8486         udelay(40);
8487
8488         if (tg3_flag(tp, PCIX_MODE)) {
8489                 u16 pcix_cmd;
8490
8491                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8492                                      &pcix_cmd);
8493                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8494                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8495                         pcix_cmd |= PCI_X_CMD_READ_2K;
8496                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8497                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8498                         pcix_cmd |= PCI_X_CMD_READ_2K;
8499                 }
8500                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8501                                       pcix_cmd);
8502         }
8503
8504         tw32_f(RDMAC_MODE, rdmac_mode);
8505         udelay(40);
8506
8507         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8508         if (!tg3_flag(tp, 5705_PLUS))
8509                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8510
8511         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8512                 tw32(SNDDATAC_MODE,
8513                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8514         else
8515                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8516
8517         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8518         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8519         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8520         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8521                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8522         tw32(RCVDBDI_MODE, val);
8523         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8524         if (tg3_flag(tp, HW_TSO_1) ||
8525             tg3_flag(tp, HW_TSO_2) ||
8526             tg3_flag(tp, HW_TSO_3))
8527                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8528         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8529         if (tg3_flag(tp, ENABLE_TSS))
8530                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8531         tw32(SNDBDI_MODE, val);
8532         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8533
8534         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8535                 err = tg3_load_5701_a0_firmware_fix(tp);
8536                 if (err)
8537                         return err;
8538         }
8539
8540         if (tg3_flag(tp, TSO_CAPABLE)) {
8541                 err = tg3_load_tso_firmware(tp);
8542                 if (err)
8543                         return err;
8544         }
8545
8546         tp->tx_mode = TX_MODE_ENABLE;
8547
8548         if (tg3_flag(tp, 5755_PLUS) ||
8549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8550                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8551
8552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8553                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8554                 tp->tx_mode &= ~val;
8555                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8556         }
8557
8558         tw32_f(MAC_TX_MODE, tp->tx_mode);
8559         udelay(100);
8560
8561         if (tg3_flag(tp, ENABLE_RSS)) {
8562                 u32 reg = MAC_RSS_INDIR_TBL_0;
8563                 u8 *ent = (u8 *)&val;
8564
8565                 /* Setup the indirection table */
8566                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8567                         int idx = i % sizeof(val);
8568
8569                         ent[idx] = i % (tp->irq_cnt - 1);
8570                         if (idx == sizeof(val) - 1) {
8571                                 tw32(reg, val);
8572                                 reg += 4;
8573                         }
8574                 }
8575
8576                 /* Setup the "secret" hash key. */
8577                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8578                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8579                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8580                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8581                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8582                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8583                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8584                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8585                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8586                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8587         }
8588
8589         tp->rx_mode = RX_MODE_ENABLE;
8590         if (tg3_flag(tp, 5755_PLUS))
8591                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8592
8593         if (tg3_flag(tp, ENABLE_RSS))
8594                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8595                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8596                                RX_MODE_RSS_IPV6_HASH_EN |
8597                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8598                                RX_MODE_RSS_IPV4_HASH_EN |
8599                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8600
8601         tw32_f(MAC_RX_MODE, tp->rx_mode);
8602         udelay(10);
8603
8604         tw32(MAC_LED_CTRL, tp->led_ctrl);
8605
8606         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8607         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8608                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8609                 udelay(10);
8610         }
8611         tw32_f(MAC_RX_MODE, tp->rx_mode);
8612         udelay(10);
8613
8614         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8615                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8616                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8617                         /* Set drive transmission level to 1.2V  */
8618                         /* only if the signal pre-emphasis bit is not set  */
8619                         val = tr32(MAC_SERDES_CFG);
8620                         val &= 0xfffff000;
8621                         val |= 0x880;
8622                         tw32(MAC_SERDES_CFG, val);
8623                 }
8624                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8625                         tw32(MAC_SERDES_CFG, 0x616000);
8626         }
8627
8628         /* Prevent chip from dropping frames when flow control
8629          * is enabled.
8630          */
8631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8632                 val = 1;
8633         else
8634                 val = 2;
8635         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8636
8637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8638             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8639                 /* Use hardware link auto-negotiation */
8640                 tg3_flag_set(tp, HW_AUTONEG);
8641         }
8642
8643         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8644             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8645                 u32 tmp;
8646
8647                 tmp = tr32(SERDES_RX_CTRL);
8648                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8649                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8650                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8651                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8652         }
8653
8654         if (!tg3_flag(tp, USE_PHYLIB)) {
8655                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8656                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8657                         tp->link_config.speed = tp->link_config.orig_speed;
8658                         tp->link_config.duplex = tp->link_config.orig_duplex;
8659                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8660                 }
8661
8662                 err = tg3_setup_phy(tp, 0);
8663                 if (err)
8664                         return err;
8665
8666                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8667                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8668                         u32 tmp;
8669
8670                         /* Clear CRC stats. */
8671                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8672                                 tg3_writephy(tp, MII_TG3_TEST1,
8673                                              tmp | MII_TG3_TEST1_CRC_EN);
8674                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8675                         }
8676                 }
8677         }
8678
8679         __tg3_set_rx_mode(tp->dev);
8680
8681         /* Initialize receive rules. */
8682         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8683         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8684         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8685         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8686
8687         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8688                 limit = 8;
8689         else
8690                 limit = 16;
8691         if (tg3_flag(tp, ENABLE_ASF))
8692                 limit -= 4;
8693         switch (limit) {
8694         case 16:
8695                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8696         case 15:
8697                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8698         case 14:
8699                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8700         case 13:
8701                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8702         case 12:
8703                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8704         case 11:
8705                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8706         case 10:
8707                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8708         case 9:
8709                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8710         case 8:
8711                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8712         case 7:
8713                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8714         case 6:
8715                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8716         case 5:
8717                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8718         case 4:
8719                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8720         case 3:
8721                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8722         case 2:
8723         case 1:
8724
8725         default:
8726                 break;
8727         }
8728
8729         if (tg3_flag(tp, ENABLE_APE))
8730                 /* Write our heartbeat update interval to APE. */
8731                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8732                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8733
8734         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8735
8736         return 0;
8737 }
8738
8739 /* Called at device open time to get the chip ready for
8740  * packet processing.  Invoked with tp->lock held.
8741  */
8742 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8743 {
8744         tg3_switch_clocks(tp);
8745
8746         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8747
8748         return tg3_reset_hw(tp, reset_phy);
8749 }
8750
8751 #define TG3_STAT_ADD32(PSTAT, REG) \
8752 do {    u32 __val = tr32(REG); \
8753         (PSTAT)->low += __val; \
8754         if ((PSTAT)->low < __val) \
8755                 (PSTAT)->high += 1; \
8756 } while (0)
8757
8758 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8759 {
8760         struct tg3_hw_stats *sp = tp->hw_stats;
8761
8762         if (!netif_carrier_ok(tp->dev))
8763                 return;
8764
8765         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8766         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8767         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8768         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8769         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8770         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8771         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8772         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8773         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8774         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8775         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8776         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8777         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8778
8779         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8780         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8781         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8782         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8783         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8784         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8785         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8786         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8787         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8788         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8789         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8790         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8791         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8792         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8793
8794         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8795         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8796             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8797             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8798                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8799         } else {
8800                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8801                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8802                 if (val) {
8803                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8804                         sp->rx_discards.low += val;
8805                         if (sp->rx_discards.low < val)
8806                                 sp->rx_discards.high += 1;
8807                 }
8808                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8809         }
8810         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8811 }
8812
8813 static void tg3_timer(unsigned long __opaque)
8814 {
8815         struct tg3 *tp = (struct tg3 *) __opaque;
8816
8817         if (tp->irq_sync)
8818                 goto restart_timer;
8819
8820         spin_lock(&tp->lock);
8821
8822         if (!tg3_flag(tp, TAGGED_STATUS)) {
8823                 /* All of this garbage is because when using non-tagged
8824                  * IRQ status the mailbox/status_block protocol the chip
8825                  * uses with the cpu is race prone.
8826                  */
8827                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8828                         tw32(GRC_LOCAL_CTRL,
8829                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8830                 } else {
8831                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8832                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8833                 }
8834
8835                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8836                         tg3_flag_set(tp, RESTART_TIMER);
8837                         spin_unlock(&tp->lock);
8838                         schedule_work(&tp->reset_task);
8839                         return;
8840                 }
8841         }
8842
8843         /* This part only runs once per second. */
8844         if (!--tp->timer_counter) {
8845                 if (tg3_flag(tp, 5705_PLUS))
8846                         tg3_periodic_fetch_stats(tp);
8847
8848                 if (tp->setlpicnt && !--tp->setlpicnt)
8849                         tg3_phy_eee_enable(tp);
8850
8851                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8852                         u32 mac_stat;
8853                         int phy_event;
8854
8855                         mac_stat = tr32(MAC_STATUS);
8856
8857                         phy_event = 0;
8858                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8859                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8860                                         phy_event = 1;
8861                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8862                                 phy_event = 1;
8863
8864                         if (phy_event)
8865                                 tg3_setup_phy(tp, 0);
8866                 } else if (tg3_flag(tp, POLL_SERDES)) {
8867                         u32 mac_stat = tr32(MAC_STATUS);
8868                         int need_setup = 0;
8869
8870                         if (netif_carrier_ok(tp->dev) &&
8871                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8872                                 need_setup = 1;
8873                         }
8874                         if (!netif_carrier_ok(tp->dev) &&
8875                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8876                                          MAC_STATUS_SIGNAL_DET))) {
8877                                 need_setup = 1;
8878                         }
8879                         if (need_setup) {
8880                                 if (!tp->serdes_counter) {
8881                                         tw32_f(MAC_MODE,
8882                                              (tp->mac_mode &
8883                                               ~MAC_MODE_PORT_MODE_MASK));
8884                                         udelay(40);
8885                                         tw32_f(MAC_MODE, tp->mac_mode);
8886                                         udelay(40);
8887                                 }
8888                                 tg3_setup_phy(tp, 0);
8889                         }
8890                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8891                            tg3_flag(tp, 5780_CLASS)) {
8892                         tg3_serdes_parallel_detect(tp);
8893                 }
8894
8895                 tp->timer_counter = tp->timer_multiplier;
8896         }
8897
8898         /* Heartbeat is only sent once every 2 seconds.
8899          *
8900          * The heartbeat is to tell the ASF firmware that the host
8901          * driver is still alive.  In the event that the OS crashes,
8902          * ASF needs to reset the hardware to free up the FIFO space
8903          * that may be filled with rx packets destined for the host.
8904          * If the FIFO is full, ASF will no longer function properly.
8905          *
8906          * Unintended resets have been reported on real time kernels
8907          * where the timer doesn't run on time.  Netpoll will also have
8908          * same problem.
8909          *
8910          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8911          * to check the ring condition when the heartbeat is expiring
8912          * before doing the reset.  This will prevent most unintended
8913          * resets.
8914          */
8915         if (!--tp->asf_counter) {
8916                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8917                         tg3_wait_for_event_ack(tp);
8918
8919                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8920                                       FWCMD_NICDRV_ALIVE3);
8921                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8922                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8923                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8924
8925                         tg3_generate_fw_event(tp);
8926                 }
8927                 tp->asf_counter = tp->asf_multiplier;
8928         }
8929
8930         spin_unlock(&tp->lock);
8931
8932 restart_timer:
8933         tp->timer.expires = jiffies + tp->timer_offset;
8934         add_timer(&tp->timer);
8935 }
8936
8937 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8938 {
8939         irq_handler_t fn;
8940         unsigned long flags;
8941         char *name;
8942         struct tg3_napi *tnapi = &tp->napi[irq_num];
8943
8944         if (tp->irq_cnt == 1)
8945                 name = tp->dev->name;
8946         else {
8947                 name = &tnapi->irq_lbl[0];
8948                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8949                 name[IFNAMSIZ-1] = 0;
8950         }
8951
8952         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8953                 fn = tg3_msi;
8954                 if (tg3_flag(tp, 1SHOT_MSI))
8955                         fn = tg3_msi_1shot;
8956                 flags = 0;
8957         } else {
8958                 fn = tg3_interrupt;
8959                 if (tg3_flag(tp, TAGGED_STATUS))
8960                         fn = tg3_interrupt_tagged;
8961                 flags = IRQF_SHARED;
8962         }
8963
8964         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8965 }
8966
8967 static int tg3_test_interrupt(struct tg3 *tp)
8968 {
8969         struct tg3_napi *tnapi = &tp->napi[0];
8970         struct net_device *dev = tp->dev;
8971         int err, i, intr_ok = 0;
8972         u32 val;
8973
8974         if (!netif_running(dev))
8975                 return -ENODEV;
8976
8977         tg3_disable_ints(tp);
8978
8979         free_irq(tnapi->irq_vec, tnapi);
8980
8981         /*
8982          * Turn off MSI one shot mode.  Otherwise this test has no
8983          * observable way to know whether the interrupt was delivered.
8984          */
8985         if (tg3_flag(tp, 57765_PLUS)) {
8986                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8987                 tw32(MSGINT_MODE, val);
8988         }
8989
8990         err = request_irq(tnapi->irq_vec, tg3_test_isr,
8991                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8992         if (err)
8993                 return err;
8994
8995         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8996         tg3_enable_ints(tp);
8997
8998         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8999                tnapi->coal_now);
9000
9001         for (i = 0; i < 5; i++) {
9002                 u32 int_mbox, misc_host_ctrl;
9003
9004                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9005                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9006
9007                 if ((int_mbox != 0) ||
9008                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9009                         intr_ok = 1;
9010                         break;
9011                 }
9012
9013                 if (tg3_flag(tp, 57765_PLUS) &&
9014                     tnapi->hw_status->status_tag != tnapi->last_tag)
9015                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9016
9017                 msleep(10);
9018         }
9019
9020         tg3_disable_ints(tp);
9021
9022         free_irq(tnapi->irq_vec, tnapi);
9023
9024         err = tg3_request_irq(tp, 0);
9025
9026         if (err)
9027                 return err;
9028
9029         if (intr_ok) {
9030                 /* Reenable MSI one shot mode. */
9031                 if (tg3_flag(tp, 57765_PLUS)) {
9032                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9033                         tw32(MSGINT_MODE, val);
9034                 }
9035                 return 0;
9036         }
9037
9038         return -EIO;
9039 }
9040
9041 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9042  * successfully restored
9043  */
9044 static int tg3_test_msi(struct tg3 *tp)
9045 {
9046         int err;
9047         u16 pci_cmd;
9048
9049         if (!tg3_flag(tp, USING_MSI))
9050                 return 0;
9051
9052         /* Turn off SERR reporting in case MSI terminates with Master
9053          * Abort.
9054          */
9055         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9056         pci_write_config_word(tp->pdev, PCI_COMMAND,
9057                               pci_cmd & ~PCI_COMMAND_SERR);
9058
9059         err = tg3_test_interrupt(tp);
9060
9061         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9062
9063         if (!err)
9064                 return 0;
9065
9066         /* other failures */
9067         if (err != -EIO)
9068                 return err;
9069
9070         /* MSI test failed, go back to INTx mode */
9071         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9072                     "to INTx mode. Please report this failure to the PCI "
9073                     "maintainer and include system chipset information\n");
9074
9075         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9076
9077         pci_disable_msi(tp->pdev);
9078
9079         tg3_flag_clear(tp, USING_MSI);
9080         tp->napi[0].irq_vec = tp->pdev->irq;
9081
9082         err = tg3_request_irq(tp, 0);
9083         if (err)
9084                 return err;
9085
9086         /* Need to reset the chip because the MSI cycle may have terminated
9087          * with Master Abort.
9088          */
9089         tg3_full_lock(tp, 1);
9090
9091         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9092         err = tg3_init_hw(tp, 1);
9093
9094         tg3_full_unlock(tp);
9095
9096         if (err)
9097                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9098
9099         return err;
9100 }
9101
9102 static int tg3_request_firmware(struct tg3 *tp)
9103 {
9104         const __be32 *fw_data;
9105
9106         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9107                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9108                            tp->fw_needed);
9109                 return -ENOENT;
9110         }
9111
9112         fw_data = (void *)tp->fw->data;
9113
9114         /* Firmware blob starts with version numbers, followed by
9115          * start address and _full_ length including BSS sections
9116          * (which must be longer than the actual data, of course
9117          */
9118
9119         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9120         if (tp->fw_len < (tp->fw->size - 12)) {
9121                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9122                            tp->fw_len, tp->fw_needed);
9123                 release_firmware(tp->fw);
9124                 tp->fw = NULL;
9125                 return -EINVAL;
9126         }
9127
9128         /* We no longer need firmware; we have it. */
9129         tp->fw_needed = NULL;
9130         return 0;
9131 }
9132
9133 static bool tg3_enable_msix(struct tg3 *tp)
9134 {
9135         int i, rc, cpus = num_online_cpus();
9136         struct msix_entry msix_ent[tp->irq_max];
9137
9138         if (cpus == 1)
9139                 /* Just fallback to the simpler MSI mode. */
9140                 return false;
9141
9142         /*
9143          * We want as many rx rings enabled as there are cpus.
9144          * The first MSIX vector only deals with link interrupts, etc,
9145          * so we add one to the number of vectors we are requesting.
9146          */
9147         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9148
9149         for (i = 0; i < tp->irq_max; i++) {
9150                 msix_ent[i].entry  = i;
9151                 msix_ent[i].vector = 0;
9152         }
9153
9154         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9155         if (rc < 0) {
9156                 return false;
9157         } else if (rc != 0) {
9158                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9159                         return false;
9160                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9161                               tp->irq_cnt, rc);
9162                 tp->irq_cnt = rc;
9163         }
9164
9165         for (i = 0; i < tp->irq_max; i++)
9166                 tp->napi[i].irq_vec = msix_ent[i].vector;
9167
9168         netif_set_real_num_tx_queues(tp->dev, 1);
9169         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9170         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9171                 pci_disable_msix(tp->pdev);
9172                 return false;
9173         }
9174
9175         if (tp->irq_cnt > 1) {
9176                 tg3_flag_set(tp, ENABLE_RSS);
9177
9178                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9179                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9180                         tg3_flag_set(tp, ENABLE_TSS);
9181                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9182                 }
9183         }
9184
9185         return true;
9186 }
9187
9188 static void tg3_ints_init(struct tg3 *tp)
9189 {
9190         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9191             !tg3_flag(tp, TAGGED_STATUS)) {
9192                 /* All MSI supporting chips should support tagged
9193                  * status.  Assert that this is the case.
9194                  */
9195                 netdev_warn(tp->dev,
9196                             "MSI without TAGGED_STATUS? Not using MSI\n");
9197                 goto defcfg;
9198         }
9199
9200         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9201                 tg3_flag_set(tp, USING_MSIX);
9202         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9203                 tg3_flag_set(tp, USING_MSI);
9204
9205         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9206                 u32 msi_mode = tr32(MSGINT_MODE);
9207                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9208                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9209                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9210         }
9211 defcfg:
9212         if (!tg3_flag(tp, USING_MSIX)) {
9213                 tp->irq_cnt = 1;
9214                 tp->napi[0].irq_vec = tp->pdev->irq;
9215                 netif_set_real_num_tx_queues(tp->dev, 1);
9216                 netif_set_real_num_rx_queues(tp->dev, 1);
9217         }
9218 }
9219
9220 static void tg3_ints_fini(struct tg3 *tp)
9221 {
9222         if (tg3_flag(tp, USING_MSIX))
9223                 pci_disable_msix(tp->pdev);
9224         else if (tg3_flag(tp, USING_MSI))
9225                 pci_disable_msi(tp->pdev);
9226         tg3_flag_clear(tp, USING_MSI);
9227         tg3_flag_clear(tp, USING_MSIX);
9228         tg3_flag_clear(tp, ENABLE_RSS);
9229         tg3_flag_clear(tp, ENABLE_TSS);
9230 }
9231
9232 static int tg3_open(struct net_device *dev)
9233 {
9234         struct tg3 *tp = netdev_priv(dev);
9235         int i, err;
9236
9237         if (tp->fw_needed) {
9238                 err = tg3_request_firmware(tp);
9239                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9240                         if (err)
9241                                 return err;
9242                 } else if (err) {
9243                         netdev_warn(tp->dev, "TSO capability disabled\n");
9244                         tg3_flag_clear(tp, TSO_CAPABLE);
9245                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9246                         netdev_notice(tp->dev, "TSO capability restored\n");
9247                         tg3_flag_set(tp, TSO_CAPABLE);
9248                 }
9249         }
9250
9251         netif_carrier_off(tp->dev);
9252
9253         err = tg3_power_up(tp);
9254         if (err)
9255                 return err;
9256
9257         tg3_full_lock(tp, 0);
9258
9259         tg3_disable_ints(tp);
9260         tg3_flag_clear(tp, INIT_COMPLETE);
9261
9262         tg3_full_unlock(tp);
9263
9264         /*
9265          * Setup interrupts first so we know how
9266          * many NAPI resources to allocate
9267          */
9268         tg3_ints_init(tp);
9269
9270         /* The placement of this call is tied
9271          * to the setup and use of Host TX descriptors.
9272          */
9273         err = tg3_alloc_consistent(tp);
9274         if (err)
9275                 goto err_out1;
9276
9277         tg3_napi_init(tp);
9278
9279         tg3_napi_enable(tp);
9280
9281         for (i = 0; i < tp->irq_cnt; i++) {
9282                 struct tg3_napi *tnapi = &tp->napi[i];
9283                 err = tg3_request_irq(tp, i);
9284                 if (err) {
9285                         for (i--; i >= 0; i--)
9286                                 free_irq(tnapi->irq_vec, tnapi);
9287                         break;
9288                 }
9289         }
9290
9291         if (err)
9292                 goto err_out2;
9293
9294         tg3_full_lock(tp, 0);
9295
9296         err = tg3_init_hw(tp, 1);
9297         if (err) {
9298                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9299                 tg3_free_rings(tp);
9300         } else {
9301                 if (tg3_flag(tp, TAGGED_STATUS))
9302                         tp->timer_offset = HZ;
9303                 else
9304                         tp->timer_offset = HZ / 10;
9305
9306                 BUG_ON(tp->timer_offset > HZ);
9307                 tp->timer_counter = tp->timer_multiplier =
9308                         (HZ / tp->timer_offset);
9309                 tp->asf_counter = tp->asf_multiplier =
9310                         ((HZ / tp->timer_offset) * 2);
9311
9312                 init_timer(&tp->timer);
9313                 tp->timer.expires = jiffies + tp->timer_offset;
9314                 tp->timer.data = (unsigned long) tp;
9315                 tp->timer.function = tg3_timer;
9316         }
9317
9318         tg3_full_unlock(tp);
9319
9320         if (err)
9321                 goto err_out3;
9322
9323         if (tg3_flag(tp, USING_MSI)) {
9324                 err = tg3_test_msi(tp);
9325
9326                 if (err) {
9327                         tg3_full_lock(tp, 0);
9328                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9329                         tg3_free_rings(tp);
9330                         tg3_full_unlock(tp);
9331
9332                         goto err_out2;
9333                 }
9334
9335                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9336                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9337
9338                         tw32(PCIE_TRANSACTION_CFG,
9339                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9340                 }
9341         }
9342
9343         tg3_phy_start(tp);
9344
9345         tg3_full_lock(tp, 0);
9346
9347         add_timer(&tp->timer);
9348         tg3_flag_set(tp, INIT_COMPLETE);
9349         tg3_enable_ints(tp);
9350
9351         tg3_full_unlock(tp);
9352
9353         netif_tx_start_all_queues(dev);
9354
9355         /*
9356          * Reset loopback feature if it was turned on while the device was down
9357          * make sure that it's installed properly now.
9358          */
9359         if (dev->features & NETIF_F_LOOPBACK)
9360                 tg3_set_loopback(dev, dev->features);
9361
9362         return 0;
9363
9364 err_out3:
9365         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9366                 struct tg3_napi *tnapi = &tp->napi[i];
9367                 free_irq(tnapi->irq_vec, tnapi);
9368         }
9369
9370 err_out2:
9371         tg3_napi_disable(tp);
9372         tg3_napi_fini(tp);
9373         tg3_free_consistent(tp);
9374
9375 err_out1:
9376         tg3_ints_fini(tp);
9377         return err;
9378 }
9379
9380 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9381                                                  struct rtnl_link_stats64 *);
9382 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9383
9384 static int tg3_close(struct net_device *dev)
9385 {
9386         int i;
9387         struct tg3 *tp = netdev_priv(dev);
9388
9389         tg3_napi_disable(tp);
9390         cancel_work_sync(&tp->reset_task);
9391
9392         netif_tx_stop_all_queues(dev);
9393
9394         del_timer_sync(&tp->timer);
9395
9396         tg3_phy_stop(tp);
9397
9398         tg3_full_lock(tp, 1);
9399
9400         tg3_disable_ints(tp);
9401
9402         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9403         tg3_free_rings(tp);
9404         tg3_flag_clear(tp, INIT_COMPLETE);
9405
9406         tg3_full_unlock(tp);
9407
9408         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9409                 struct tg3_napi *tnapi = &tp->napi[i];
9410                 free_irq(tnapi->irq_vec, tnapi);
9411         }
9412
9413         tg3_ints_fini(tp);
9414
9415         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9416
9417         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9418                sizeof(tp->estats_prev));
9419
9420         tg3_napi_fini(tp);
9421
9422         tg3_free_consistent(tp);
9423
9424         tg3_power_down(tp);
9425
9426         netif_carrier_off(tp->dev);
9427
9428         return 0;
9429 }
9430
9431 static inline u64 get_stat64(tg3_stat64_t *val)
9432 {
9433        return ((u64)val->high << 32) | ((u64)val->low);
9434 }
9435
9436 static u64 calc_crc_errors(struct tg3 *tp)
9437 {
9438         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9439
9440         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9441             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9442              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9443                 u32 val;
9444
9445                 spin_lock_bh(&tp->lock);
9446                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9447                         tg3_writephy(tp, MII_TG3_TEST1,
9448                                      val | MII_TG3_TEST1_CRC_EN);
9449                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9450                 } else
9451                         val = 0;
9452                 spin_unlock_bh(&tp->lock);
9453
9454                 tp->phy_crc_errors += val;
9455
9456                 return tp->phy_crc_errors;
9457         }
9458
9459         return get_stat64(&hw_stats->rx_fcs_errors);
9460 }
9461
9462 #define ESTAT_ADD(member) \
9463         estats->member =        old_estats->member + \
9464                                 get_stat64(&hw_stats->member)
9465
9466 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9467 {
9468         struct tg3_ethtool_stats *estats = &tp->estats;
9469         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9470         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9471
9472         if (!hw_stats)
9473                 return old_estats;
9474
9475         ESTAT_ADD(rx_octets);
9476         ESTAT_ADD(rx_fragments);
9477         ESTAT_ADD(rx_ucast_packets);
9478         ESTAT_ADD(rx_mcast_packets);
9479         ESTAT_ADD(rx_bcast_packets);
9480         ESTAT_ADD(rx_fcs_errors);
9481         ESTAT_ADD(rx_align_errors);
9482         ESTAT_ADD(rx_xon_pause_rcvd);
9483         ESTAT_ADD(rx_xoff_pause_rcvd);
9484         ESTAT_ADD(rx_mac_ctrl_rcvd);
9485         ESTAT_ADD(rx_xoff_entered);
9486         ESTAT_ADD(rx_frame_too_long_errors);
9487         ESTAT_ADD(rx_jabbers);
9488         ESTAT_ADD(rx_undersize_packets);
9489         ESTAT_ADD(rx_in_length_errors);
9490         ESTAT_ADD(rx_out_length_errors);
9491         ESTAT_ADD(rx_64_or_less_octet_packets);
9492         ESTAT_ADD(rx_65_to_127_octet_packets);
9493         ESTAT_ADD(rx_128_to_255_octet_packets);
9494         ESTAT_ADD(rx_256_to_511_octet_packets);
9495         ESTAT_ADD(rx_512_to_1023_octet_packets);
9496         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9497         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9498         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9499         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9500         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9501
9502         ESTAT_ADD(tx_octets);
9503         ESTAT_ADD(tx_collisions);
9504         ESTAT_ADD(tx_xon_sent);
9505         ESTAT_ADD(tx_xoff_sent);
9506         ESTAT_ADD(tx_flow_control);
9507         ESTAT_ADD(tx_mac_errors);
9508         ESTAT_ADD(tx_single_collisions);
9509         ESTAT_ADD(tx_mult_collisions);
9510         ESTAT_ADD(tx_deferred);
9511         ESTAT_ADD(tx_excessive_collisions);
9512         ESTAT_ADD(tx_late_collisions);
9513         ESTAT_ADD(tx_collide_2times);
9514         ESTAT_ADD(tx_collide_3times);
9515         ESTAT_ADD(tx_collide_4times);
9516         ESTAT_ADD(tx_collide_5times);
9517         ESTAT_ADD(tx_collide_6times);
9518         ESTAT_ADD(tx_collide_7times);
9519         ESTAT_ADD(tx_collide_8times);
9520         ESTAT_ADD(tx_collide_9times);
9521         ESTAT_ADD(tx_collide_10times);
9522         ESTAT_ADD(tx_collide_11times);
9523         ESTAT_ADD(tx_collide_12times);
9524         ESTAT_ADD(tx_collide_13times);
9525         ESTAT_ADD(tx_collide_14times);
9526         ESTAT_ADD(tx_collide_15times);
9527         ESTAT_ADD(tx_ucast_packets);
9528         ESTAT_ADD(tx_mcast_packets);
9529         ESTAT_ADD(tx_bcast_packets);
9530         ESTAT_ADD(tx_carrier_sense_errors);
9531         ESTAT_ADD(tx_discards);
9532         ESTAT_ADD(tx_errors);
9533
9534         ESTAT_ADD(dma_writeq_full);
9535         ESTAT_ADD(dma_write_prioq_full);
9536         ESTAT_ADD(rxbds_empty);
9537         ESTAT_ADD(rx_discards);
9538         ESTAT_ADD(rx_errors);
9539         ESTAT_ADD(rx_threshold_hit);
9540
9541         ESTAT_ADD(dma_readq_full);
9542         ESTAT_ADD(dma_read_prioq_full);
9543         ESTAT_ADD(tx_comp_queue_full);
9544
9545         ESTAT_ADD(ring_set_send_prod_index);
9546         ESTAT_ADD(ring_status_update);
9547         ESTAT_ADD(nic_irqs);
9548         ESTAT_ADD(nic_avoided_irqs);
9549         ESTAT_ADD(nic_tx_threshold_hit);
9550
9551         ESTAT_ADD(mbuf_lwm_thresh_hit);
9552
9553         return estats;
9554 }
9555
9556 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9557                                                  struct rtnl_link_stats64 *stats)
9558 {
9559         struct tg3 *tp = netdev_priv(dev);
9560         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9561         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9562
9563         if (!hw_stats)
9564                 return old_stats;
9565
9566         stats->rx_packets = old_stats->rx_packets +
9567                 get_stat64(&hw_stats->rx_ucast_packets) +
9568                 get_stat64(&hw_stats->rx_mcast_packets) +
9569                 get_stat64(&hw_stats->rx_bcast_packets);
9570
9571         stats->tx_packets = old_stats->tx_packets +
9572                 get_stat64(&hw_stats->tx_ucast_packets) +
9573                 get_stat64(&hw_stats->tx_mcast_packets) +
9574                 get_stat64(&hw_stats->tx_bcast_packets);
9575
9576         stats->rx_bytes = old_stats->rx_bytes +
9577                 get_stat64(&hw_stats->rx_octets);
9578         stats->tx_bytes = old_stats->tx_bytes +
9579                 get_stat64(&hw_stats->tx_octets);
9580
9581         stats->rx_errors = old_stats->rx_errors +
9582                 get_stat64(&hw_stats->rx_errors);
9583         stats->tx_errors = old_stats->tx_errors +
9584                 get_stat64(&hw_stats->tx_errors) +
9585                 get_stat64(&hw_stats->tx_mac_errors) +
9586                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9587                 get_stat64(&hw_stats->tx_discards);
9588
9589         stats->multicast = old_stats->multicast +
9590                 get_stat64(&hw_stats->rx_mcast_packets);
9591         stats->collisions = old_stats->collisions +
9592                 get_stat64(&hw_stats->tx_collisions);
9593
9594         stats->rx_length_errors = old_stats->rx_length_errors +
9595                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9596                 get_stat64(&hw_stats->rx_undersize_packets);
9597
9598         stats->rx_over_errors = old_stats->rx_over_errors +
9599                 get_stat64(&hw_stats->rxbds_empty);
9600         stats->rx_frame_errors = old_stats->rx_frame_errors +
9601                 get_stat64(&hw_stats->rx_align_errors);
9602         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9603                 get_stat64(&hw_stats->tx_discards);
9604         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9605                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9606
9607         stats->rx_crc_errors = old_stats->rx_crc_errors +
9608                 calc_crc_errors(tp);
9609
9610         stats->rx_missed_errors = old_stats->rx_missed_errors +
9611                 get_stat64(&hw_stats->rx_discards);
9612
9613         stats->rx_dropped = tp->rx_dropped;
9614
9615         return stats;
9616 }
9617
9618 static inline u32 calc_crc(unsigned char *buf, int len)
9619 {
9620         u32 reg;
9621         u32 tmp;
9622         int j, k;
9623
9624         reg = 0xffffffff;
9625
9626         for (j = 0; j < len; j++) {
9627                 reg ^= buf[j];
9628
9629                 for (k = 0; k < 8; k++) {
9630                         tmp = reg & 0x01;
9631
9632                         reg >>= 1;
9633
9634                         if (tmp)
9635                                 reg ^= 0xedb88320;
9636                 }
9637         }
9638
9639         return ~reg;
9640 }
9641
9642 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9643 {
9644         /* accept or reject all multicast frames */
9645         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9646         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9647         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9648         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9649 }
9650
9651 static void __tg3_set_rx_mode(struct net_device *dev)
9652 {
9653         struct tg3 *tp = netdev_priv(dev);
9654         u32 rx_mode;
9655
9656         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9657                                   RX_MODE_KEEP_VLAN_TAG);
9658
9659 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9660         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9661          * flag clear.
9662          */
9663         if (!tg3_flag(tp, ENABLE_ASF))
9664                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9665 #endif
9666
9667         if (dev->flags & IFF_PROMISC) {
9668                 /* Promiscuous mode. */
9669                 rx_mode |= RX_MODE_PROMISC;
9670         } else if (dev->flags & IFF_ALLMULTI) {
9671                 /* Accept all multicast. */
9672                 tg3_set_multi(tp, 1);
9673         } else if (netdev_mc_empty(dev)) {
9674                 /* Reject all multicast. */
9675                 tg3_set_multi(tp, 0);
9676         } else {
9677                 /* Accept one or more multicast(s). */
9678                 struct netdev_hw_addr *ha;
9679                 u32 mc_filter[4] = { 0, };
9680                 u32 regidx;
9681                 u32 bit;
9682                 u32 crc;
9683
9684                 netdev_for_each_mc_addr(ha, dev) {
9685                         crc = calc_crc(ha->addr, ETH_ALEN);
9686                         bit = ~crc & 0x7f;
9687                         regidx = (bit & 0x60) >> 5;
9688                         bit &= 0x1f;
9689                         mc_filter[regidx] |= (1 << bit);
9690                 }
9691
9692                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9693                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9694                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9695                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9696         }
9697
9698         if (rx_mode != tp->rx_mode) {
9699                 tp->rx_mode = rx_mode;
9700                 tw32_f(MAC_RX_MODE, rx_mode);
9701                 udelay(10);
9702         }
9703 }
9704
9705 static void tg3_set_rx_mode(struct net_device *dev)
9706 {
9707         struct tg3 *tp = netdev_priv(dev);
9708
9709         if (!netif_running(dev))
9710                 return;
9711
9712         tg3_full_lock(tp, 0);
9713         __tg3_set_rx_mode(dev);
9714         tg3_full_unlock(tp);
9715 }
9716
9717 static int tg3_get_regs_len(struct net_device *dev)
9718 {
9719         return TG3_REG_BLK_SIZE;
9720 }
9721
9722 static void tg3_get_regs(struct net_device *dev,
9723                 struct ethtool_regs *regs, void *_p)
9724 {
9725         struct tg3 *tp = netdev_priv(dev);
9726
9727         regs->version = 0;
9728
9729         memset(_p, 0, TG3_REG_BLK_SIZE);
9730
9731         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9732                 return;
9733
9734         tg3_full_lock(tp, 0);
9735
9736         tg3_dump_legacy_regs(tp, (u32 *)_p);
9737
9738         tg3_full_unlock(tp);
9739 }
9740
9741 static int tg3_get_eeprom_len(struct net_device *dev)
9742 {
9743         struct tg3 *tp = netdev_priv(dev);
9744
9745         return tp->nvram_size;
9746 }
9747
9748 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9749 {
9750         struct tg3 *tp = netdev_priv(dev);
9751         int ret;
9752         u8  *pd;
9753         u32 i, offset, len, b_offset, b_count;
9754         __be32 val;
9755
9756         if (tg3_flag(tp, NO_NVRAM))
9757                 return -EINVAL;
9758
9759         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9760                 return -EAGAIN;
9761
9762         offset = eeprom->offset;
9763         len = eeprom->len;
9764         eeprom->len = 0;
9765
9766         eeprom->magic = TG3_EEPROM_MAGIC;
9767
9768         if (offset & 3) {
9769                 /* adjustments to start on required 4 byte boundary */
9770                 b_offset = offset & 3;
9771                 b_count = 4 - b_offset;
9772                 if (b_count > len) {
9773                         /* i.e. offset=1 len=2 */
9774                         b_count = len;
9775                 }
9776                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9777                 if (ret)
9778                         return ret;
9779                 memcpy(data, ((char *)&val) + b_offset, b_count);
9780                 len -= b_count;
9781                 offset += b_count;
9782                 eeprom->len += b_count;
9783         }
9784
9785         /* read bytes up to the last 4 byte boundary */
9786         pd = &data[eeprom->len];
9787         for (i = 0; i < (len - (len & 3)); i += 4) {
9788                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9789                 if (ret) {
9790                         eeprom->len += i;
9791                         return ret;
9792                 }
9793                 memcpy(pd + i, &val, 4);
9794         }
9795         eeprom->len += i;
9796
9797         if (len & 3) {
9798                 /* read last bytes not ending on 4 byte boundary */
9799                 pd = &data[eeprom->len];
9800                 b_count = len & 3;
9801                 b_offset = offset + len - b_count;
9802                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9803                 if (ret)
9804                         return ret;
9805                 memcpy(pd, &val, b_count);
9806                 eeprom->len += b_count;
9807         }
9808         return 0;
9809 }
9810
9811 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9812
9813 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9814 {
9815         struct tg3 *tp = netdev_priv(dev);
9816         int ret;
9817         u32 offset, len, b_offset, odd_len;
9818         u8 *buf;
9819         __be32 start, end;
9820
9821         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9822                 return -EAGAIN;
9823
9824         if (tg3_flag(tp, NO_NVRAM) ||
9825             eeprom->magic != TG3_EEPROM_MAGIC)
9826                 return -EINVAL;
9827
9828         offset = eeprom->offset;
9829         len = eeprom->len;
9830
9831         if ((b_offset = (offset & 3))) {
9832                 /* adjustments to start on required 4 byte boundary */
9833                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9834                 if (ret)
9835                         return ret;
9836                 len += b_offset;
9837                 offset &= ~3;
9838                 if (len < 4)
9839                         len = 4;
9840         }
9841
9842         odd_len = 0;
9843         if (len & 3) {
9844                 /* adjustments to end on required 4 byte boundary */
9845                 odd_len = 1;
9846                 len = (len + 3) & ~3;
9847                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9848                 if (ret)
9849                         return ret;
9850         }
9851
9852         buf = data;
9853         if (b_offset || odd_len) {
9854                 buf = kmalloc(len, GFP_KERNEL);
9855                 if (!buf)
9856                         return -ENOMEM;
9857                 if (b_offset)
9858                         memcpy(buf, &start, 4);
9859                 if (odd_len)
9860                         memcpy(buf+len-4, &end, 4);
9861                 memcpy(buf + b_offset, data, eeprom->len);
9862         }
9863
9864         ret = tg3_nvram_write_block(tp, offset, len, buf);
9865
9866         if (buf != data)
9867                 kfree(buf);
9868
9869         return ret;
9870 }
9871
9872 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9873 {
9874         struct tg3 *tp = netdev_priv(dev);
9875
9876         if (tg3_flag(tp, USE_PHYLIB)) {
9877                 struct phy_device *phydev;
9878                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9879                         return -EAGAIN;
9880                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9881                 return phy_ethtool_gset(phydev, cmd);
9882         }
9883
9884         cmd->supported = (SUPPORTED_Autoneg);
9885
9886         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9887                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9888                                    SUPPORTED_1000baseT_Full);
9889
9890         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9891                 cmd->supported |= (SUPPORTED_100baseT_Half |
9892                                   SUPPORTED_100baseT_Full |
9893                                   SUPPORTED_10baseT_Half |
9894                                   SUPPORTED_10baseT_Full |
9895                                   SUPPORTED_TP);
9896                 cmd->port = PORT_TP;
9897         } else {
9898                 cmd->supported |= SUPPORTED_FIBRE;
9899                 cmd->port = PORT_FIBRE;
9900         }
9901
9902         cmd->advertising = tp->link_config.advertising;
9903         if (netif_running(dev)) {
9904                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9905                 cmd->duplex = tp->link_config.active_duplex;
9906         } else {
9907                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9908                 cmd->duplex = DUPLEX_INVALID;
9909         }
9910         cmd->phy_address = tp->phy_addr;
9911         cmd->transceiver = XCVR_INTERNAL;
9912         cmd->autoneg = tp->link_config.autoneg;
9913         cmd->maxtxpkt = 0;
9914         cmd->maxrxpkt = 0;
9915         return 0;
9916 }
9917
9918 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9919 {
9920         struct tg3 *tp = netdev_priv(dev);
9921         u32 speed = ethtool_cmd_speed(cmd);
9922
9923         if (tg3_flag(tp, USE_PHYLIB)) {
9924                 struct phy_device *phydev;
9925                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9926                         return -EAGAIN;
9927                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9928                 return phy_ethtool_sset(phydev, cmd);
9929         }
9930
9931         if (cmd->autoneg != AUTONEG_ENABLE &&
9932             cmd->autoneg != AUTONEG_DISABLE)
9933                 return -EINVAL;
9934
9935         if (cmd->autoneg == AUTONEG_DISABLE &&
9936             cmd->duplex != DUPLEX_FULL &&
9937             cmd->duplex != DUPLEX_HALF)
9938                 return -EINVAL;
9939
9940         if (cmd->autoneg == AUTONEG_ENABLE) {
9941                 u32 mask = ADVERTISED_Autoneg |
9942                            ADVERTISED_Pause |
9943                            ADVERTISED_Asym_Pause;
9944
9945                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9946                         mask |= ADVERTISED_1000baseT_Half |
9947                                 ADVERTISED_1000baseT_Full;
9948
9949                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9950                         mask |= ADVERTISED_100baseT_Half |
9951                                 ADVERTISED_100baseT_Full |
9952                                 ADVERTISED_10baseT_Half |
9953                                 ADVERTISED_10baseT_Full |
9954                                 ADVERTISED_TP;
9955                 else
9956                         mask |= ADVERTISED_FIBRE;
9957
9958                 if (cmd->advertising & ~mask)
9959                         return -EINVAL;
9960
9961                 mask &= (ADVERTISED_1000baseT_Half |
9962                          ADVERTISED_1000baseT_Full |
9963                          ADVERTISED_100baseT_Half |
9964                          ADVERTISED_100baseT_Full |
9965                          ADVERTISED_10baseT_Half |
9966                          ADVERTISED_10baseT_Full);
9967
9968                 cmd->advertising &= mask;
9969         } else {
9970                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9971                         if (speed != SPEED_1000)
9972                                 return -EINVAL;
9973
9974                         if (cmd->duplex != DUPLEX_FULL)
9975                                 return -EINVAL;
9976                 } else {
9977                         if (speed != SPEED_100 &&
9978                             speed != SPEED_10)
9979                                 return -EINVAL;
9980                 }
9981         }
9982
9983         tg3_full_lock(tp, 0);
9984
9985         tp->link_config.autoneg = cmd->autoneg;
9986         if (cmd->autoneg == AUTONEG_ENABLE) {
9987                 tp->link_config.advertising = (cmd->advertising |
9988                                               ADVERTISED_Autoneg);
9989                 tp->link_config.speed = SPEED_INVALID;
9990                 tp->link_config.duplex = DUPLEX_INVALID;
9991         } else {
9992                 tp->link_config.advertising = 0;
9993                 tp->link_config.speed = speed;
9994                 tp->link_config.duplex = cmd->duplex;
9995         }
9996
9997         tp->link_config.orig_speed = tp->link_config.speed;
9998         tp->link_config.orig_duplex = tp->link_config.duplex;
9999         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10000
10001         if (netif_running(dev))
10002                 tg3_setup_phy(tp, 1);
10003
10004         tg3_full_unlock(tp);
10005
10006         return 0;
10007 }
10008
10009 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10010 {
10011         struct tg3 *tp = netdev_priv(dev);
10012
10013         strcpy(info->driver, DRV_MODULE_NAME);
10014         strcpy(info->version, DRV_MODULE_VERSION);
10015         strcpy(info->fw_version, tp->fw_ver);
10016         strcpy(info->bus_info, pci_name(tp->pdev));
10017 }
10018
10019 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10020 {
10021         struct tg3 *tp = netdev_priv(dev);
10022
10023         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10024                 wol->supported = WAKE_MAGIC;
10025         else
10026                 wol->supported = 0;
10027         wol->wolopts = 0;
10028         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10029                 wol->wolopts = WAKE_MAGIC;
10030         memset(&wol->sopass, 0, sizeof(wol->sopass));
10031 }
10032
10033 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10034 {
10035         struct tg3 *tp = netdev_priv(dev);
10036         struct device *dp = &tp->pdev->dev;
10037
10038         if (wol->wolopts & ~WAKE_MAGIC)
10039                 return -EINVAL;
10040         if ((wol->wolopts & WAKE_MAGIC) &&
10041             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10042                 return -EINVAL;
10043
10044         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10045
10046         spin_lock_bh(&tp->lock);
10047         if (device_may_wakeup(dp))
10048                 tg3_flag_set(tp, WOL_ENABLE);
10049         else
10050                 tg3_flag_clear(tp, WOL_ENABLE);
10051         spin_unlock_bh(&tp->lock);
10052
10053         return 0;
10054 }
10055
10056 static u32 tg3_get_msglevel(struct net_device *dev)
10057 {
10058         struct tg3 *tp = netdev_priv(dev);
10059         return tp->msg_enable;
10060 }
10061
10062 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10063 {
10064         struct tg3 *tp = netdev_priv(dev);
10065         tp->msg_enable = value;
10066 }
10067
10068 static int tg3_nway_reset(struct net_device *dev)
10069 {
10070         struct tg3 *tp = netdev_priv(dev);
10071         int r;
10072
10073         if (!netif_running(dev))
10074                 return -EAGAIN;
10075
10076         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10077                 return -EINVAL;
10078
10079         if (tg3_flag(tp, USE_PHYLIB)) {
10080                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10081                         return -EAGAIN;
10082                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10083         } else {
10084                 u32 bmcr;
10085
10086                 spin_lock_bh(&tp->lock);
10087                 r = -EINVAL;
10088                 tg3_readphy(tp, MII_BMCR, &bmcr);
10089                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10090                     ((bmcr & BMCR_ANENABLE) ||
10091                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10092                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10093                                                    BMCR_ANENABLE);
10094                         r = 0;
10095                 }
10096                 spin_unlock_bh(&tp->lock);
10097         }
10098
10099         return r;
10100 }
10101
10102 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10103 {
10104         struct tg3 *tp = netdev_priv(dev);
10105
10106         ering->rx_max_pending = tp->rx_std_ring_mask;
10107         ering->rx_mini_max_pending = 0;
10108         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10109                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10110         else
10111                 ering->rx_jumbo_max_pending = 0;
10112
10113         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10114
10115         ering->rx_pending = tp->rx_pending;
10116         ering->rx_mini_pending = 0;
10117         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10118                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10119         else
10120                 ering->rx_jumbo_pending = 0;
10121
10122         ering->tx_pending = tp->napi[0].tx_pending;
10123 }
10124
10125 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10126 {
10127         struct tg3 *tp = netdev_priv(dev);
10128         int i, irq_sync = 0, err = 0;
10129
10130         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10131             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10132             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10133             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10134             (tg3_flag(tp, TSO_BUG) &&
10135              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10136                 return -EINVAL;
10137
10138         if (netif_running(dev)) {
10139                 tg3_phy_stop(tp);
10140                 tg3_netif_stop(tp);
10141                 irq_sync = 1;
10142         }
10143
10144         tg3_full_lock(tp, irq_sync);
10145
10146         tp->rx_pending = ering->rx_pending;
10147
10148         if (tg3_flag(tp, MAX_RXPEND_64) &&
10149             tp->rx_pending > 63)
10150                 tp->rx_pending = 63;
10151         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10152
10153         for (i = 0; i < tp->irq_max; i++)
10154                 tp->napi[i].tx_pending = ering->tx_pending;
10155
10156         if (netif_running(dev)) {
10157                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10158                 err = tg3_restart_hw(tp, 1);
10159                 if (!err)
10160                         tg3_netif_start(tp);
10161         }
10162
10163         tg3_full_unlock(tp);
10164
10165         if (irq_sync && !err)
10166                 tg3_phy_start(tp);
10167
10168         return err;
10169 }
10170
10171 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10172 {
10173         struct tg3 *tp = netdev_priv(dev);
10174
10175         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10176
10177         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10178                 epause->rx_pause = 1;
10179         else
10180                 epause->rx_pause = 0;
10181
10182         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10183                 epause->tx_pause = 1;
10184         else
10185                 epause->tx_pause = 0;
10186 }
10187
10188 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10189 {
10190         struct tg3 *tp = netdev_priv(dev);
10191         int err = 0;
10192
10193         if (tg3_flag(tp, USE_PHYLIB)) {
10194                 u32 newadv;
10195                 struct phy_device *phydev;
10196
10197                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10198
10199                 if (!(phydev->supported & SUPPORTED_Pause) ||
10200                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10201                      (epause->rx_pause != epause->tx_pause)))
10202                         return -EINVAL;
10203
10204                 tp->link_config.flowctrl = 0;
10205                 if (epause->rx_pause) {
10206                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10207
10208                         if (epause->tx_pause) {
10209                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10210                                 newadv = ADVERTISED_Pause;
10211                         } else
10212                                 newadv = ADVERTISED_Pause |
10213                                          ADVERTISED_Asym_Pause;
10214                 } else if (epause->tx_pause) {
10215                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10216                         newadv = ADVERTISED_Asym_Pause;
10217                 } else
10218                         newadv = 0;
10219
10220                 if (epause->autoneg)
10221                         tg3_flag_set(tp, PAUSE_AUTONEG);
10222                 else
10223                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10224
10225                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10226                         u32 oldadv = phydev->advertising &
10227                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10228                         if (oldadv != newadv) {
10229                                 phydev->advertising &=
10230                                         ~(ADVERTISED_Pause |
10231                                           ADVERTISED_Asym_Pause);
10232                                 phydev->advertising |= newadv;
10233                                 if (phydev->autoneg) {
10234                                         /*
10235                                          * Always renegotiate the link to
10236                                          * inform our link partner of our
10237                                          * flow control settings, even if the
10238                                          * flow control is forced.  Let
10239                                          * tg3_adjust_link() do the final
10240                                          * flow control setup.
10241                                          */
10242                                         return phy_start_aneg(phydev);
10243                                 }
10244                         }
10245
10246                         if (!epause->autoneg)
10247                                 tg3_setup_flow_control(tp, 0, 0);
10248                 } else {
10249                         tp->link_config.orig_advertising &=
10250                                         ~(ADVERTISED_Pause |
10251                                           ADVERTISED_Asym_Pause);
10252                         tp->link_config.orig_advertising |= newadv;
10253                 }
10254         } else {
10255                 int irq_sync = 0;
10256
10257                 if (netif_running(dev)) {
10258                         tg3_netif_stop(tp);
10259                         irq_sync = 1;
10260                 }
10261
10262                 tg3_full_lock(tp, irq_sync);
10263
10264                 if (epause->autoneg)
10265                         tg3_flag_set(tp, PAUSE_AUTONEG);
10266                 else
10267                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10268                 if (epause->rx_pause)
10269                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10270                 else
10271                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10272                 if (epause->tx_pause)
10273                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10274                 else
10275                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10276
10277                 if (netif_running(dev)) {
10278                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10279                         err = tg3_restart_hw(tp, 1);
10280                         if (!err)
10281                                 tg3_netif_start(tp);
10282                 }
10283
10284                 tg3_full_unlock(tp);
10285         }
10286
10287         return err;
10288 }
10289
10290 static int tg3_get_sset_count(struct net_device *dev, int sset)
10291 {
10292         switch (sset) {
10293         case ETH_SS_TEST:
10294                 return TG3_NUM_TEST;
10295         case ETH_SS_STATS:
10296                 return TG3_NUM_STATS;
10297         default:
10298                 return -EOPNOTSUPP;
10299         }
10300 }
10301
10302 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10303 {
10304         switch (stringset) {
10305         case ETH_SS_STATS:
10306                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10307                 break;
10308         case ETH_SS_TEST:
10309                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10310                 break;
10311         default:
10312                 WARN_ON(1);     /* we need a WARN() */
10313                 break;
10314         }
10315 }
10316
10317 static int tg3_set_phys_id(struct net_device *dev,
10318                             enum ethtool_phys_id_state state)
10319 {
10320         struct tg3 *tp = netdev_priv(dev);
10321
10322         if (!netif_running(tp->dev))
10323                 return -EAGAIN;
10324
10325         switch (state) {
10326         case ETHTOOL_ID_ACTIVE:
10327                 return 1;       /* cycle on/off once per second */
10328
10329         case ETHTOOL_ID_ON:
10330                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10331                      LED_CTRL_1000MBPS_ON |
10332                      LED_CTRL_100MBPS_ON |
10333                      LED_CTRL_10MBPS_ON |
10334                      LED_CTRL_TRAFFIC_OVERRIDE |
10335                      LED_CTRL_TRAFFIC_BLINK |
10336                      LED_CTRL_TRAFFIC_LED);
10337                 break;
10338
10339         case ETHTOOL_ID_OFF:
10340                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10341                      LED_CTRL_TRAFFIC_OVERRIDE);
10342                 break;
10343
10344         case ETHTOOL_ID_INACTIVE:
10345                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10346                 break;
10347         }
10348
10349         return 0;
10350 }
10351
10352 static void tg3_get_ethtool_stats(struct net_device *dev,
10353                                    struct ethtool_stats *estats, u64 *tmp_stats)
10354 {
10355         struct tg3 *tp = netdev_priv(dev);
10356         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10357 }
10358
10359 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10360 {
10361         int i;
10362         __be32 *buf;
10363         u32 offset = 0, len = 0;
10364         u32 magic, val;
10365
10366         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10367                 return NULL;
10368
10369         if (magic == TG3_EEPROM_MAGIC) {
10370                 for (offset = TG3_NVM_DIR_START;
10371                      offset < TG3_NVM_DIR_END;
10372                      offset += TG3_NVM_DIRENT_SIZE) {
10373                         if (tg3_nvram_read(tp, offset, &val))
10374                                 return NULL;
10375
10376                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10377                             TG3_NVM_DIRTYPE_EXTVPD)
10378                                 break;
10379                 }
10380
10381                 if (offset != TG3_NVM_DIR_END) {
10382                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10383                         if (tg3_nvram_read(tp, offset + 4, &offset))
10384                                 return NULL;
10385
10386                         offset = tg3_nvram_logical_addr(tp, offset);
10387                 }
10388         }
10389
10390         if (!offset || !len) {
10391                 offset = TG3_NVM_VPD_OFF;
10392                 len = TG3_NVM_VPD_LEN;
10393         }
10394
10395         buf = kmalloc(len, GFP_KERNEL);
10396         if (buf == NULL)
10397                 return NULL;
10398
10399         if (magic == TG3_EEPROM_MAGIC) {
10400                 for (i = 0; i < len; i += 4) {
10401                         /* The data is in little-endian format in NVRAM.
10402                          * Use the big-endian read routines to preserve
10403                          * the byte order as it exists in NVRAM.
10404                          */
10405                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10406                                 goto error;
10407                 }
10408         } else {
10409                 u8 *ptr;
10410                 ssize_t cnt;
10411                 unsigned int pos = 0;
10412
10413                 ptr = (u8 *)&buf[0];
10414                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10415                         cnt = pci_read_vpd(tp->pdev, pos,
10416                                            len - pos, ptr);
10417                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10418                                 cnt = 0;
10419                         else if (cnt < 0)
10420                                 goto error;
10421                 }
10422                 if (pos != len)
10423                         goto error;
10424         }
10425
10426         return buf;
10427
10428 error:
10429         kfree(buf);
10430         return NULL;
10431 }
10432
10433 #define NVRAM_TEST_SIZE 0x100
10434 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10435 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10436 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10437 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10438 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10439
10440 static int tg3_test_nvram(struct tg3 *tp)
10441 {
10442         u32 csum, magic;
10443         __be32 *buf;
10444         int i, j, k, err = 0, size;
10445
10446         if (tg3_flag(tp, NO_NVRAM))
10447                 return 0;
10448
10449         if (tg3_nvram_read(tp, 0, &magic) != 0)
10450                 return -EIO;
10451
10452         if (magic == TG3_EEPROM_MAGIC)
10453                 size = NVRAM_TEST_SIZE;
10454         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10455                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10456                     TG3_EEPROM_SB_FORMAT_1) {
10457                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10458                         case TG3_EEPROM_SB_REVISION_0:
10459                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10460                                 break;
10461                         case TG3_EEPROM_SB_REVISION_2:
10462                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10463                                 break;
10464                         case TG3_EEPROM_SB_REVISION_3:
10465                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10466                                 break;
10467                         default:
10468                                 return 0;
10469                         }
10470                 } else
10471                         return 0;
10472         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10473                 size = NVRAM_SELFBOOT_HW_SIZE;
10474         else
10475                 return -EIO;
10476
10477         buf = kmalloc(size, GFP_KERNEL);
10478         if (buf == NULL)
10479                 return -ENOMEM;
10480
10481         err = -EIO;
10482         for (i = 0, j = 0; i < size; i += 4, j++) {
10483                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10484                 if (err)
10485                         break;
10486         }
10487         if (i < size)
10488                 goto out;
10489
10490         /* Selfboot format */
10491         magic = be32_to_cpu(buf[0]);
10492         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10493             TG3_EEPROM_MAGIC_FW) {
10494                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10495
10496                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10497                     TG3_EEPROM_SB_REVISION_2) {
10498                         /* For rev 2, the csum doesn't include the MBA. */
10499                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10500                                 csum8 += buf8[i];
10501                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10502                                 csum8 += buf8[i];
10503                 } else {
10504                         for (i = 0; i < size; i++)
10505                                 csum8 += buf8[i];
10506                 }
10507
10508                 if (csum8 == 0) {
10509                         err = 0;
10510                         goto out;
10511                 }
10512
10513                 err = -EIO;
10514                 goto out;
10515         }
10516
10517         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10518             TG3_EEPROM_MAGIC_HW) {
10519                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10520                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10521                 u8 *buf8 = (u8 *) buf;
10522
10523                 /* Separate the parity bits and the data bytes.  */
10524                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10525                         if ((i == 0) || (i == 8)) {
10526                                 int l;
10527                                 u8 msk;
10528
10529                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10530                                         parity[k++] = buf8[i] & msk;
10531                                 i++;
10532                         } else if (i == 16) {
10533                                 int l;
10534                                 u8 msk;
10535
10536                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10537                                         parity[k++] = buf8[i] & msk;
10538                                 i++;
10539
10540                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10541                                         parity[k++] = buf8[i] & msk;
10542                                 i++;
10543                         }
10544                         data[j++] = buf8[i];
10545                 }
10546
10547                 err = -EIO;
10548                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10549                         u8 hw8 = hweight8(data[i]);
10550
10551                         if ((hw8 & 0x1) && parity[i])
10552                                 goto out;
10553                         else if (!(hw8 & 0x1) && !parity[i])
10554                                 goto out;
10555                 }
10556                 err = 0;
10557                 goto out;
10558         }
10559
10560         err = -EIO;
10561
10562         /* Bootstrap checksum at offset 0x10 */
10563         csum = calc_crc((unsigned char *) buf, 0x10);
10564         if (csum != le32_to_cpu(buf[0x10/4]))
10565                 goto out;
10566
10567         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10568         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10569         if (csum != le32_to_cpu(buf[0xfc/4]))
10570                 goto out;
10571
10572         kfree(buf);
10573
10574         buf = tg3_vpd_readblock(tp);
10575         if (!buf)
10576                 return -ENOMEM;
10577
10578         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10579                              PCI_VPD_LRDT_RO_DATA);
10580         if (i > 0) {
10581                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10582                 if (j < 0)
10583                         goto out;
10584
10585                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10586                         goto out;
10587
10588                 i += PCI_VPD_LRDT_TAG_SIZE;
10589                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10590                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10591                 if (j > 0) {
10592                         u8 csum8 = 0;
10593
10594                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10595
10596                         for (i = 0; i <= j; i++)
10597                                 csum8 += ((u8 *)buf)[i];
10598
10599                         if (csum8)
10600                                 goto out;
10601                 }
10602         }
10603
10604         err = 0;
10605
10606 out:
10607         kfree(buf);
10608         return err;
10609 }
10610
10611 #define TG3_SERDES_TIMEOUT_SEC  2
10612 #define TG3_COPPER_TIMEOUT_SEC  6
10613
10614 static int tg3_test_link(struct tg3 *tp)
10615 {
10616         int i, max;
10617
10618         if (!netif_running(tp->dev))
10619                 return -ENODEV;
10620
10621         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10622                 max = TG3_SERDES_TIMEOUT_SEC;
10623         else
10624                 max = TG3_COPPER_TIMEOUT_SEC;
10625
10626         for (i = 0; i < max; i++) {
10627                 if (netif_carrier_ok(tp->dev))
10628                         return 0;
10629
10630                 if (msleep_interruptible(1000))
10631                         break;
10632         }
10633
10634         return -EIO;
10635 }
10636
10637 /* Only test the commonly used registers */
10638 static int tg3_test_registers(struct tg3 *tp)
10639 {
10640         int i, is_5705, is_5750;
10641         u32 offset, read_mask, write_mask, val, save_val, read_val;
10642         static struct {
10643                 u16 offset;
10644                 u16 flags;
10645 #define TG3_FL_5705     0x1
10646 #define TG3_FL_NOT_5705 0x2
10647 #define TG3_FL_NOT_5788 0x4
10648 #define TG3_FL_NOT_5750 0x8
10649                 u32 read_mask;
10650                 u32 write_mask;
10651         } reg_tbl[] = {
10652                 /* MAC Control Registers */
10653                 { MAC_MODE, TG3_FL_NOT_5705,
10654                         0x00000000, 0x00ef6f8c },
10655                 { MAC_MODE, TG3_FL_5705,
10656                         0x00000000, 0x01ef6b8c },
10657                 { MAC_STATUS, TG3_FL_NOT_5705,
10658                         0x03800107, 0x00000000 },
10659                 { MAC_STATUS, TG3_FL_5705,
10660                         0x03800100, 0x00000000 },
10661                 { MAC_ADDR_0_HIGH, 0x0000,
10662                         0x00000000, 0x0000ffff },
10663                 { MAC_ADDR_0_LOW, 0x0000,
10664                         0x00000000, 0xffffffff },
10665                 { MAC_RX_MTU_SIZE, 0x0000,
10666                         0x00000000, 0x0000ffff },
10667                 { MAC_TX_MODE, 0x0000,
10668                         0x00000000, 0x00000070 },
10669                 { MAC_TX_LENGTHS, 0x0000,
10670                         0x00000000, 0x00003fff },
10671                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10672                         0x00000000, 0x000007fc },
10673                 { MAC_RX_MODE, TG3_FL_5705,
10674                         0x00000000, 0x000007dc },
10675                 { MAC_HASH_REG_0, 0x0000,
10676                         0x00000000, 0xffffffff },
10677                 { MAC_HASH_REG_1, 0x0000,
10678                         0x00000000, 0xffffffff },
10679                 { MAC_HASH_REG_2, 0x0000,
10680                         0x00000000, 0xffffffff },
10681                 { MAC_HASH_REG_3, 0x0000,
10682                         0x00000000, 0xffffffff },
10683
10684                 /* Receive Data and Receive BD Initiator Control Registers. */
10685                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10686                         0x00000000, 0xffffffff },
10687                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10688                         0x00000000, 0xffffffff },
10689                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10690                         0x00000000, 0x00000003 },
10691                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10692                         0x00000000, 0xffffffff },
10693                 { RCVDBDI_STD_BD+0, 0x0000,
10694                         0x00000000, 0xffffffff },
10695                 { RCVDBDI_STD_BD+4, 0x0000,
10696                         0x00000000, 0xffffffff },
10697                 { RCVDBDI_STD_BD+8, 0x0000,
10698                         0x00000000, 0xffff0002 },
10699                 { RCVDBDI_STD_BD+0xc, 0x0000,
10700                         0x00000000, 0xffffffff },
10701
10702                 /* Receive BD Initiator Control Registers. */
10703                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10704                         0x00000000, 0xffffffff },
10705                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10706                         0x00000000, 0x000003ff },
10707                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10708                         0x00000000, 0xffffffff },
10709
10710                 /* Host Coalescing Control Registers. */
10711                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10712                         0x00000000, 0x00000004 },
10713                 { HOSTCC_MODE, TG3_FL_5705,
10714                         0x00000000, 0x000000f6 },
10715                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10716                         0x00000000, 0xffffffff },
10717                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10718                         0x00000000, 0x000003ff },
10719                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10720                         0x00000000, 0xffffffff },
10721                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10722                         0x00000000, 0x000003ff },
10723                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10724                         0x00000000, 0xffffffff },
10725                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10726                         0x00000000, 0x000000ff },
10727                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10728                         0x00000000, 0xffffffff },
10729                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10730                         0x00000000, 0x000000ff },
10731                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10732                         0x00000000, 0xffffffff },
10733                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10734                         0x00000000, 0xffffffff },
10735                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10736                         0x00000000, 0xffffffff },
10737                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10738                         0x00000000, 0x000000ff },
10739                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10740                         0x00000000, 0xffffffff },
10741                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10742                         0x00000000, 0x000000ff },
10743                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10744                         0x00000000, 0xffffffff },
10745                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10746                         0x00000000, 0xffffffff },
10747                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10748                         0x00000000, 0xffffffff },
10749                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10750                         0x00000000, 0xffffffff },
10751                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10752                         0x00000000, 0xffffffff },
10753                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10754                         0xffffffff, 0x00000000 },
10755                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10756                         0xffffffff, 0x00000000 },
10757
10758                 /* Buffer Manager Control Registers. */
10759                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10760                         0x00000000, 0x007fff80 },
10761                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10762                         0x00000000, 0x007fffff },
10763                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10764                         0x00000000, 0x0000003f },
10765                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10766                         0x00000000, 0x000001ff },
10767                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10768                         0x00000000, 0x000001ff },
10769                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10770                         0xffffffff, 0x00000000 },
10771                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10772                         0xffffffff, 0x00000000 },
10773
10774                 /* Mailbox Registers */
10775                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10776                         0x00000000, 0x000001ff },
10777                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10778                         0x00000000, 0x000001ff },
10779                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10780                         0x00000000, 0x000007ff },
10781                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10782                         0x00000000, 0x000001ff },
10783
10784                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10785         };
10786
10787         is_5705 = is_5750 = 0;
10788         if (tg3_flag(tp, 5705_PLUS)) {
10789                 is_5705 = 1;
10790                 if (tg3_flag(tp, 5750_PLUS))
10791                         is_5750 = 1;
10792         }
10793
10794         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10795                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10796                         continue;
10797
10798                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10799                         continue;
10800
10801                 if (tg3_flag(tp, IS_5788) &&
10802                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10803                         continue;
10804
10805                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10806                         continue;
10807
10808                 offset = (u32) reg_tbl[i].offset;
10809                 read_mask = reg_tbl[i].read_mask;
10810                 write_mask = reg_tbl[i].write_mask;
10811
10812                 /* Save the original register content */
10813                 save_val = tr32(offset);
10814
10815                 /* Determine the read-only value. */
10816                 read_val = save_val & read_mask;
10817
10818                 /* Write zero to the register, then make sure the read-only bits
10819                  * are not changed and the read/write bits are all zeros.
10820                  */
10821                 tw32(offset, 0);
10822
10823                 val = tr32(offset);
10824
10825                 /* Test the read-only and read/write bits. */
10826                 if (((val & read_mask) != read_val) || (val & write_mask))
10827                         goto out;
10828
10829                 /* Write ones to all the bits defined by RdMask and WrMask, then
10830                  * make sure the read-only bits are not changed and the
10831                  * read/write bits are all ones.
10832                  */
10833                 tw32(offset, read_mask | write_mask);
10834
10835                 val = tr32(offset);
10836
10837                 /* Test the read-only bits. */
10838                 if ((val & read_mask) != read_val)
10839                         goto out;
10840
10841                 /* Test the read/write bits. */
10842                 if ((val & write_mask) != write_mask)
10843                         goto out;
10844
10845                 tw32(offset, save_val);
10846         }
10847
10848         return 0;
10849
10850 out:
10851         if (netif_msg_hw(tp))
10852                 netdev_err(tp->dev,
10853                            "Register test failed at offset %x\n", offset);
10854         tw32(offset, save_val);
10855         return -EIO;
10856 }
10857
10858 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10859 {
10860         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10861         int i;
10862         u32 j;
10863
10864         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10865                 for (j = 0; j < len; j += 4) {
10866                         u32 val;
10867
10868                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10869                         tg3_read_mem(tp, offset + j, &val);
10870                         if (val != test_pattern[i])
10871                                 return -EIO;
10872                 }
10873         }
10874         return 0;
10875 }
10876
10877 static int tg3_test_memory(struct tg3 *tp)
10878 {
10879         static struct mem_entry {
10880                 u32 offset;
10881                 u32 len;
10882         } mem_tbl_570x[] = {
10883                 { 0x00000000, 0x00b50},
10884                 { 0x00002000, 0x1c000},
10885                 { 0xffffffff, 0x00000}
10886         }, mem_tbl_5705[] = {
10887                 { 0x00000100, 0x0000c},
10888                 { 0x00000200, 0x00008},
10889                 { 0x00004000, 0x00800},
10890                 { 0x00006000, 0x01000},
10891                 { 0x00008000, 0x02000},
10892                 { 0x00010000, 0x0e000},
10893                 { 0xffffffff, 0x00000}
10894         }, mem_tbl_5755[] = {
10895                 { 0x00000200, 0x00008},
10896                 { 0x00004000, 0x00800},
10897                 { 0x00006000, 0x00800},
10898                 { 0x00008000, 0x02000},
10899                 { 0x00010000, 0x0c000},
10900                 { 0xffffffff, 0x00000}
10901         }, mem_tbl_5906[] = {
10902                 { 0x00000200, 0x00008},
10903                 { 0x00004000, 0x00400},
10904                 { 0x00006000, 0x00400},
10905                 { 0x00008000, 0x01000},
10906                 { 0x00010000, 0x01000},
10907                 { 0xffffffff, 0x00000}
10908         }, mem_tbl_5717[] = {
10909                 { 0x00000200, 0x00008},
10910                 { 0x00010000, 0x0a000},
10911                 { 0x00020000, 0x13c00},
10912                 { 0xffffffff, 0x00000}
10913         }, mem_tbl_57765[] = {
10914                 { 0x00000200, 0x00008},
10915                 { 0x00004000, 0x00800},
10916                 { 0x00006000, 0x09800},
10917                 { 0x00010000, 0x0a000},
10918                 { 0xffffffff, 0x00000}
10919         };
10920         struct mem_entry *mem_tbl;
10921         int err = 0;
10922         int i;
10923
10924         if (tg3_flag(tp, 5717_PLUS))
10925                 mem_tbl = mem_tbl_5717;
10926         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10927                 mem_tbl = mem_tbl_57765;
10928         else if (tg3_flag(tp, 5755_PLUS))
10929                 mem_tbl = mem_tbl_5755;
10930         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10931                 mem_tbl = mem_tbl_5906;
10932         else if (tg3_flag(tp, 5705_PLUS))
10933                 mem_tbl = mem_tbl_5705;
10934         else
10935                 mem_tbl = mem_tbl_570x;
10936
10937         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10938                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10939                 if (err)
10940                         break;
10941         }
10942
10943         return err;
10944 }
10945
10946 #define TG3_MAC_LOOPBACK        0
10947 #define TG3_PHY_LOOPBACK        1
10948 #define TG3_TSO_LOOPBACK        2
10949
10950 #define TG3_TSO_MSS             500
10951
10952 #define TG3_TSO_IP_HDR_LEN      20
10953 #define TG3_TSO_TCP_HDR_LEN     20
10954 #define TG3_TSO_TCP_OPT_LEN     12
10955
10956 static const u8 tg3_tso_header[] = {
10957 0x08, 0x00,
10958 0x45, 0x00, 0x00, 0x00,
10959 0x00, 0x00, 0x40, 0x00,
10960 0x40, 0x06, 0x00, 0x00,
10961 0x0a, 0x00, 0x00, 0x01,
10962 0x0a, 0x00, 0x00, 0x02,
10963 0x0d, 0x00, 0xe0, 0x00,
10964 0x00, 0x00, 0x01, 0x00,
10965 0x00, 0x00, 0x02, 0x00,
10966 0x80, 0x10, 0x10, 0x00,
10967 0x14, 0x09, 0x00, 0x00,
10968 0x01, 0x01, 0x08, 0x0a,
10969 0x11, 0x11, 0x11, 0x11,
10970 0x11, 0x11, 0x11, 0x11,
10971 };
10972
10973 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10974 {
10975         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10976         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10977         struct sk_buff *skb, *rx_skb;
10978         u8 *tx_data;
10979         dma_addr_t map;
10980         int num_pkts, tx_len, rx_len, i, err;
10981         struct tg3_rx_buffer_desc *desc;
10982         struct tg3_napi *tnapi, *rnapi;
10983         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10984
10985         tnapi = &tp->napi[0];
10986         rnapi = &tp->napi[0];
10987         if (tp->irq_cnt > 1) {
10988                 if (tg3_flag(tp, ENABLE_RSS))
10989                         rnapi = &tp->napi[1];
10990                 if (tg3_flag(tp, ENABLE_TSS))
10991                         tnapi = &tp->napi[1];
10992         }
10993         coal_now = tnapi->coal_now | rnapi->coal_now;
10994
10995         if (loopback_mode == TG3_MAC_LOOPBACK) {
10996                 /* HW errata - mac loopback fails in some cases on 5780.
10997                  * Normal traffic and PHY loopback are not affected by
10998                  * errata.  Also, the MAC loopback test is deprecated for
10999                  * all newer ASIC revisions.
11000                  */
11001                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11002                     tg3_flag(tp, CPMU_PRESENT))
11003                         return 0;
11004
11005                 mac_mode = tp->mac_mode &
11006                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11007                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11008                 if (!tg3_flag(tp, 5705_PLUS))
11009                         mac_mode |= MAC_MODE_LINK_POLARITY;
11010                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11011                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11012                 else
11013                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11014                 tw32(MAC_MODE, mac_mode);
11015         } else {
11016                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11017                         tg3_phy_fet_toggle_apd(tp, false);
11018                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11019                 } else
11020                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11021
11022                 tg3_phy_toggle_automdix(tp, 0);
11023
11024                 tg3_writephy(tp, MII_BMCR, val);
11025                 udelay(40);
11026
11027                 mac_mode = tp->mac_mode &
11028                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11029                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11030                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11031                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11032                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11033                         /* The write needs to be flushed for the AC131 */
11034                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11035                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11036                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11037                 } else
11038                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11039
11040                 /* reset to prevent losing 1st rx packet intermittently */
11041                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11042                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11043                         udelay(10);
11044                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11045                 }
11046                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11047                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11048                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11049                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11050                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11051                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11052                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11053                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11054                 }
11055                 tw32(MAC_MODE, mac_mode);
11056
11057                 /* Wait for link */
11058                 for (i = 0; i < 100; i++) {
11059                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11060                                 break;
11061                         mdelay(1);
11062                 }
11063         }
11064
11065         err = -EIO;
11066
11067         tx_len = pktsz;
11068         skb = netdev_alloc_skb(tp->dev, tx_len);
11069         if (!skb)
11070                 return -ENOMEM;
11071
11072         tx_data = skb_put(skb, tx_len);
11073         memcpy(tx_data, tp->dev->dev_addr, 6);
11074         memset(tx_data + 6, 0x0, 8);
11075
11076         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11077
11078         if (loopback_mode == TG3_TSO_LOOPBACK) {
11079                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11080
11081                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11082                               TG3_TSO_TCP_OPT_LEN;
11083
11084                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11085                        sizeof(tg3_tso_header));
11086                 mss = TG3_TSO_MSS;
11087
11088                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11089                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11090
11091                 /* Set the total length field in the IP header */
11092                 iph->tot_len = htons((u16)(mss + hdr_len));
11093
11094                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11095                               TXD_FLAG_CPU_POST_DMA);
11096
11097                 if (tg3_flag(tp, HW_TSO_1) ||
11098                     tg3_flag(tp, HW_TSO_2) ||
11099                     tg3_flag(tp, HW_TSO_3)) {
11100                         struct tcphdr *th;
11101                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11102                         th = (struct tcphdr *)&tx_data[val];
11103                         th->check = 0;
11104                 } else
11105                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11106
11107                 if (tg3_flag(tp, HW_TSO_3)) {
11108                         mss |= (hdr_len & 0xc) << 12;
11109                         if (hdr_len & 0x10)
11110                                 base_flags |= 0x00000010;
11111                         base_flags |= (hdr_len & 0x3e0) << 5;
11112                 } else if (tg3_flag(tp, HW_TSO_2))
11113                         mss |= hdr_len << 9;
11114                 else if (tg3_flag(tp, HW_TSO_1) ||
11115                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11116                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11117                 } else {
11118                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11119                 }
11120
11121                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11122         } else {
11123                 num_pkts = 1;
11124                 data_off = ETH_HLEN;
11125         }
11126
11127         for (i = data_off; i < tx_len; i++)
11128                 tx_data[i] = (u8) (i & 0xff);
11129
11130         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11131         if (pci_dma_mapping_error(tp->pdev, map)) {
11132                 dev_kfree_skb(skb);
11133                 return -EIO;
11134         }
11135
11136         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11137                rnapi->coal_now);
11138
11139         udelay(10);
11140
11141         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11142
11143         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11144                     base_flags, (mss << 1) | 1);
11145
11146         tnapi->tx_prod++;
11147
11148         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11149         tr32_mailbox(tnapi->prodmbox);
11150
11151         udelay(10);
11152
11153         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11154         for (i = 0; i < 35; i++) {
11155                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11156                        coal_now);
11157
11158                 udelay(10);
11159
11160                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11161                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11162                 if ((tx_idx == tnapi->tx_prod) &&
11163                     (rx_idx == (rx_start_idx + num_pkts)))
11164                         break;
11165         }
11166
11167         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11168         dev_kfree_skb(skb);
11169
11170         if (tx_idx != tnapi->tx_prod)
11171                 goto out;
11172
11173         if (rx_idx != rx_start_idx + num_pkts)
11174                 goto out;
11175
11176         val = data_off;
11177         while (rx_idx != rx_start_idx) {
11178                 desc = &rnapi->rx_rcb[rx_start_idx++];
11179                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11180                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11181
11182                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11183                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11184                         goto out;
11185
11186                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11187                          - ETH_FCS_LEN;
11188
11189                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11190                         if (rx_len != tx_len)
11191                                 goto out;
11192
11193                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11194                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11195                                         goto out;
11196                         } else {
11197                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11198                                         goto out;
11199                         }
11200                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11201                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11202                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11203                         goto out;
11204                 }
11205
11206                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11207                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11208                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11209                                              mapping);
11210                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11211                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11212                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11213                                              mapping);
11214                 } else
11215                         goto out;
11216
11217                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11218                                             PCI_DMA_FROMDEVICE);
11219
11220                 for (i = data_off; i < rx_len; i++, val++) {
11221                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11222                                 goto out;
11223                 }
11224         }
11225
11226         err = 0;
11227
11228         /* tg3_free_rings will unmap and free the rx_skb */
11229 out:
11230         return err;
11231 }
11232
11233 #define TG3_STD_LOOPBACK_FAILED         1
11234 #define TG3_JMB_LOOPBACK_FAILED         2
11235 #define TG3_TSO_LOOPBACK_FAILED         4
11236
11237 #define TG3_MAC_LOOPBACK_SHIFT          0
11238 #define TG3_PHY_LOOPBACK_SHIFT          4
11239 #define TG3_LOOPBACK_FAILED             0x00000077
11240
11241 static int tg3_test_loopback(struct tg3 *tp)
11242 {
11243         int err = 0;
11244         u32 eee_cap, cpmuctrl = 0;
11245
11246         if (!netif_running(tp->dev))
11247                 return TG3_LOOPBACK_FAILED;
11248
11249         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11250         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11251
11252         err = tg3_reset_hw(tp, 1);
11253         if (err) {
11254                 err = TG3_LOOPBACK_FAILED;
11255                 goto done;
11256         }
11257
11258         if (tg3_flag(tp, ENABLE_RSS)) {
11259                 int i;
11260
11261                 /* Reroute all rx packets to the 1st queue */
11262                 for (i = MAC_RSS_INDIR_TBL_0;
11263                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11264                         tw32(i, 0x0);
11265         }
11266
11267         /* Turn off gphy autopowerdown. */
11268         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11269                 tg3_phy_toggle_apd(tp, false);
11270
11271         if (tg3_flag(tp, CPMU_PRESENT)) {
11272                 int i;
11273                 u32 status;
11274
11275                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11276
11277                 /* Wait for up to 40 microseconds to acquire lock. */
11278                 for (i = 0; i < 4; i++) {
11279                         status = tr32(TG3_CPMU_MUTEX_GNT);
11280                         if (status == CPMU_MUTEX_GNT_DRIVER)
11281                                 break;
11282                         udelay(10);
11283                 }
11284
11285                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11286                         err = TG3_LOOPBACK_FAILED;
11287                         goto done;
11288                 }
11289
11290                 /* Turn off link-based power management. */
11291                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11292                 tw32(TG3_CPMU_CTRL,
11293                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11294                                   CPMU_CTRL_LINK_AWARE_MODE));
11295         }
11296
11297         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11298                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11299
11300         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11301             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11302                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11303
11304         if (tg3_flag(tp, CPMU_PRESENT)) {
11305                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11306
11307                 /* Release the mutex */
11308                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11309         }
11310
11311         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11312             !tg3_flag(tp, USE_PHYLIB)) {
11313                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11314                         err |= TG3_STD_LOOPBACK_FAILED <<
11315                                TG3_PHY_LOOPBACK_SHIFT;
11316                 if (tg3_flag(tp, TSO_CAPABLE) &&
11317                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11318                         err |= TG3_TSO_LOOPBACK_FAILED <<
11319                                TG3_PHY_LOOPBACK_SHIFT;
11320                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11321                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11322                         err |= TG3_JMB_LOOPBACK_FAILED <<
11323                                TG3_PHY_LOOPBACK_SHIFT;
11324         }
11325
11326         /* Re-enable gphy autopowerdown. */
11327         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11328                 tg3_phy_toggle_apd(tp, true);
11329
11330 done:
11331         tp->phy_flags |= eee_cap;
11332
11333         return err;
11334 }
11335
11336 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11337                           u64 *data)
11338 {
11339         struct tg3 *tp = netdev_priv(dev);
11340
11341         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11342                 tg3_power_up(tp);
11343
11344         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11345
11346         if (tg3_test_nvram(tp) != 0) {
11347                 etest->flags |= ETH_TEST_FL_FAILED;
11348                 data[0] = 1;
11349         }
11350         if (tg3_test_link(tp) != 0) {
11351                 etest->flags |= ETH_TEST_FL_FAILED;
11352                 data[1] = 1;
11353         }
11354         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11355                 int err, err2 = 0, irq_sync = 0;
11356
11357                 if (netif_running(dev)) {
11358                         tg3_phy_stop(tp);
11359                         tg3_netif_stop(tp);
11360                         irq_sync = 1;
11361                 }
11362
11363                 tg3_full_lock(tp, irq_sync);
11364
11365                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11366                 err = tg3_nvram_lock(tp);
11367                 tg3_halt_cpu(tp, RX_CPU_BASE);
11368                 if (!tg3_flag(tp, 5705_PLUS))
11369                         tg3_halt_cpu(tp, TX_CPU_BASE);
11370                 if (!err)
11371                         tg3_nvram_unlock(tp);
11372
11373                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11374                         tg3_phy_reset(tp);
11375
11376                 if (tg3_test_registers(tp) != 0) {
11377                         etest->flags |= ETH_TEST_FL_FAILED;
11378                         data[2] = 1;
11379                 }
11380                 if (tg3_test_memory(tp) != 0) {
11381                         etest->flags |= ETH_TEST_FL_FAILED;
11382                         data[3] = 1;
11383                 }
11384                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11385                         etest->flags |= ETH_TEST_FL_FAILED;
11386
11387                 tg3_full_unlock(tp);
11388
11389                 if (tg3_test_interrupt(tp) != 0) {
11390                         etest->flags |= ETH_TEST_FL_FAILED;
11391                         data[5] = 1;
11392                 }
11393
11394                 tg3_full_lock(tp, 0);
11395
11396                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11397                 if (netif_running(dev)) {
11398                         tg3_flag_set(tp, INIT_COMPLETE);
11399                         err2 = tg3_restart_hw(tp, 1);
11400                         if (!err2)
11401                                 tg3_netif_start(tp);
11402                 }
11403
11404                 tg3_full_unlock(tp);
11405
11406                 if (irq_sync && !err2)
11407                         tg3_phy_start(tp);
11408         }
11409         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11410                 tg3_power_down(tp);
11411
11412 }
11413
11414 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11415 {
11416         struct mii_ioctl_data *data = if_mii(ifr);
11417         struct tg3 *tp = netdev_priv(dev);
11418         int err;
11419
11420         if (tg3_flag(tp, USE_PHYLIB)) {
11421                 struct phy_device *phydev;
11422                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11423                         return -EAGAIN;
11424                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11425                 return phy_mii_ioctl(phydev, ifr, cmd);
11426         }
11427
11428         switch (cmd) {
11429         case SIOCGMIIPHY:
11430                 data->phy_id = tp->phy_addr;
11431
11432                 /* fallthru */
11433         case SIOCGMIIREG: {
11434                 u32 mii_regval;
11435
11436                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11437                         break;                  /* We have no PHY */
11438
11439                 if (!netif_running(dev))
11440                         return -EAGAIN;
11441
11442                 spin_lock_bh(&tp->lock);
11443                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11444                 spin_unlock_bh(&tp->lock);
11445
11446                 data->val_out = mii_regval;
11447
11448                 return err;
11449         }
11450
11451         case SIOCSMIIREG:
11452                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11453                         break;                  /* We have no PHY */
11454
11455                 if (!netif_running(dev))
11456                         return -EAGAIN;
11457
11458                 spin_lock_bh(&tp->lock);
11459                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11460                 spin_unlock_bh(&tp->lock);
11461
11462                 return err;
11463
11464         default:
11465                 /* do nothing */
11466                 break;
11467         }
11468         return -EOPNOTSUPP;
11469 }
11470
11471 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11472 {
11473         struct tg3 *tp = netdev_priv(dev);
11474
11475         memcpy(ec, &tp->coal, sizeof(*ec));
11476         return 0;
11477 }
11478
11479 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11480 {
11481         struct tg3 *tp = netdev_priv(dev);
11482         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11483         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11484
11485         if (!tg3_flag(tp, 5705_PLUS)) {
11486                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11487                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11488                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11489                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11490         }
11491
11492         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11493             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11494             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11495             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11496             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11497             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11498             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11499             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11500             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11501             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11502                 return -EINVAL;
11503
11504         /* No rx interrupts will be generated if both are zero */
11505         if ((ec->rx_coalesce_usecs == 0) &&
11506             (ec->rx_max_coalesced_frames == 0))
11507                 return -EINVAL;
11508
11509         /* No tx interrupts will be generated if both are zero */
11510         if ((ec->tx_coalesce_usecs == 0) &&
11511             (ec->tx_max_coalesced_frames == 0))
11512                 return -EINVAL;
11513
11514         /* Only copy relevant parameters, ignore all others. */
11515         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11516         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11517         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11518         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11519         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11520         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11521         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11522         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11523         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11524
11525         if (netif_running(dev)) {
11526                 tg3_full_lock(tp, 0);
11527                 __tg3_set_coalesce(tp, &tp->coal);
11528                 tg3_full_unlock(tp);
11529         }
11530         return 0;
11531 }
11532
11533 static const struct ethtool_ops tg3_ethtool_ops = {
11534         .get_settings           = tg3_get_settings,
11535         .set_settings           = tg3_set_settings,
11536         .get_drvinfo            = tg3_get_drvinfo,
11537         .get_regs_len           = tg3_get_regs_len,
11538         .get_regs               = tg3_get_regs,
11539         .get_wol                = tg3_get_wol,
11540         .set_wol                = tg3_set_wol,
11541         .get_msglevel           = tg3_get_msglevel,
11542         .set_msglevel           = tg3_set_msglevel,
11543         .nway_reset             = tg3_nway_reset,
11544         .get_link               = ethtool_op_get_link,
11545         .get_eeprom_len         = tg3_get_eeprom_len,
11546         .get_eeprom             = tg3_get_eeprom,
11547         .set_eeprom             = tg3_set_eeprom,
11548         .get_ringparam          = tg3_get_ringparam,
11549         .set_ringparam          = tg3_set_ringparam,
11550         .get_pauseparam         = tg3_get_pauseparam,
11551         .set_pauseparam         = tg3_set_pauseparam,
11552         .self_test              = tg3_self_test,
11553         .get_strings            = tg3_get_strings,
11554         .set_phys_id            = tg3_set_phys_id,
11555         .get_ethtool_stats      = tg3_get_ethtool_stats,
11556         .get_coalesce           = tg3_get_coalesce,
11557         .set_coalesce           = tg3_set_coalesce,
11558         .get_sset_count         = tg3_get_sset_count,
11559 };
11560
11561 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11562 {
11563         u32 cursize, val, magic;
11564
11565         tp->nvram_size = EEPROM_CHIP_SIZE;
11566
11567         if (tg3_nvram_read(tp, 0, &magic) != 0)
11568                 return;
11569
11570         if ((magic != TG3_EEPROM_MAGIC) &&
11571             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11572             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11573                 return;
11574
11575         /*
11576          * Size the chip by reading offsets at increasing powers of two.
11577          * When we encounter our validation signature, we know the addressing
11578          * has wrapped around, and thus have our chip size.
11579          */
11580         cursize = 0x10;
11581
11582         while (cursize < tp->nvram_size) {
11583                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11584                         return;
11585
11586                 if (val == magic)
11587                         break;
11588
11589                 cursize <<= 1;
11590         }
11591
11592         tp->nvram_size = cursize;
11593 }
11594
11595 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11596 {
11597         u32 val;
11598
11599         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11600                 return;
11601
11602         /* Selfboot format */
11603         if (val != TG3_EEPROM_MAGIC) {
11604                 tg3_get_eeprom_size(tp);
11605                 return;
11606         }
11607
11608         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11609                 if (val != 0) {
11610                         /* This is confusing.  We want to operate on the
11611                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11612                          * call will read from NVRAM and byteswap the data
11613                          * according to the byteswapping settings for all
11614                          * other register accesses.  This ensures the data we
11615                          * want will always reside in the lower 16-bits.
11616                          * However, the data in NVRAM is in LE format, which
11617                          * means the data from the NVRAM read will always be
11618                          * opposite the endianness of the CPU.  The 16-bit
11619                          * byteswap then brings the data to CPU endianness.
11620                          */
11621                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11622                         return;
11623                 }
11624         }
11625         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11626 }
11627
11628 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11629 {
11630         u32 nvcfg1;
11631
11632         nvcfg1 = tr32(NVRAM_CFG1);
11633         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11634                 tg3_flag_set(tp, FLASH);
11635         } else {
11636                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11637                 tw32(NVRAM_CFG1, nvcfg1);
11638         }
11639
11640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11641             tg3_flag(tp, 5780_CLASS)) {
11642                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11643                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11644                         tp->nvram_jedecnum = JEDEC_ATMEL;
11645                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11646                         tg3_flag_set(tp, NVRAM_BUFFERED);
11647                         break;
11648                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11649                         tp->nvram_jedecnum = JEDEC_ATMEL;
11650                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11651                         break;
11652                 case FLASH_VENDOR_ATMEL_EEPROM:
11653                         tp->nvram_jedecnum = JEDEC_ATMEL;
11654                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11655                         tg3_flag_set(tp, NVRAM_BUFFERED);
11656                         break;
11657                 case FLASH_VENDOR_ST:
11658                         tp->nvram_jedecnum = JEDEC_ST;
11659                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11660                         tg3_flag_set(tp, NVRAM_BUFFERED);
11661                         break;
11662                 case FLASH_VENDOR_SAIFUN:
11663                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11664                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11665                         break;
11666                 case FLASH_VENDOR_SST_SMALL:
11667                 case FLASH_VENDOR_SST_LARGE:
11668                         tp->nvram_jedecnum = JEDEC_SST;
11669                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11670                         break;
11671                 }
11672         } else {
11673                 tp->nvram_jedecnum = JEDEC_ATMEL;
11674                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11675                 tg3_flag_set(tp, NVRAM_BUFFERED);
11676         }
11677 }
11678
11679 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11680 {
11681         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11682         case FLASH_5752PAGE_SIZE_256:
11683                 tp->nvram_pagesize = 256;
11684                 break;
11685         case FLASH_5752PAGE_SIZE_512:
11686                 tp->nvram_pagesize = 512;
11687                 break;
11688         case FLASH_5752PAGE_SIZE_1K:
11689                 tp->nvram_pagesize = 1024;
11690                 break;
11691         case FLASH_5752PAGE_SIZE_2K:
11692                 tp->nvram_pagesize = 2048;
11693                 break;
11694         case FLASH_5752PAGE_SIZE_4K:
11695                 tp->nvram_pagesize = 4096;
11696                 break;
11697         case FLASH_5752PAGE_SIZE_264:
11698                 tp->nvram_pagesize = 264;
11699                 break;
11700         case FLASH_5752PAGE_SIZE_528:
11701                 tp->nvram_pagesize = 528;
11702                 break;
11703         }
11704 }
11705
11706 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11707 {
11708         u32 nvcfg1;
11709
11710         nvcfg1 = tr32(NVRAM_CFG1);
11711
11712         /* NVRAM protection for TPM */
11713         if (nvcfg1 & (1 << 27))
11714                 tg3_flag_set(tp, PROTECTED_NVRAM);
11715
11716         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11717         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11718         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11719                 tp->nvram_jedecnum = JEDEC_ATMEL;
11720                 tg3_flag_set(tp, NVRAM_BUFFERED);
11721                 break;
11722         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11723                 tp->nvram_jedecnum = JEDEC_ATMEL;
11724                 tg3_flag_set(tp, NVRAM_BUFFERED);
11725                 tg3_flag_set(tp, FLASH);
11726                 break;
11727         case FLASH_5752VENDOR_ST_M45PE10:
11728         case FLASH_5752VENDOR_ST_M45PE20:
11729         case FLASH_5752VENDOR_ST_M45PE40:
11730                 tp->nvram_jedecnum = JEDEC_ST;
11731                 tg3_flag_set(tp, NVRAM_BUFFERED);
11732                 tg3_flag_set(tp, FLASH);
11733                 break;
11734         }
11735
11736         if (tg3_flag(tp, FLASH)) {
11737                 tg3_nvram_get_pagesize(tp, nvcfg1);
11738         } else {
11739                 /* For eeprom, set pagesize to maximum eeprom size */
11740                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11741
11742                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11743                 tw32(NVRAM_CFG1, nvcfg1);
11744         }
11745 }
11746
11747 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11748 {
11749         u32 nvcfg1, protect = 0;
11750
11751         nvcfg1 = tr32(NVRAM_CFG1);
11752
11753         /* NVRAM protection for TPM */
11754         if (nvcfg1 & (1 << 27)) {
11755                 tg3_flag_set(tp, PROTECTED_NVRAM);
11756                 protect = 1;
11757         }
11758
11759         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11760         switch (nvcfg1) {
11761         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11762         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11763         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11764         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11765                 tp->nvram_jedecnum = JEDEC_ATMEL;
11766                 tg3_flag_set(tp, NVRAM_BUFFERED);
11767                 tg3_flag_set(tp, FLASH);
11768                 tp->nvram_pagesize = 264;
11769                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11770                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11771                         tp->nvram_size = (protect ? 0x3e200 :
11772                                           TG3_NVRAM_SIZE_512KB);
11773                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11774                         tp->nvram_size = (protect ? 0x1f200 :
11775                                           TG3_NVRAM_SIZE_256KB);
11776                 else
11777                         tp->nvram_size = (protect ? 0x1f200 :
11778                                           TG3_NVRAM_SIZE_128KB);
11779                 break;
11780         case FLASH_5752VENDOR_ST_M45PE10:
11781         case FLASH_5752VENDOR_ST_M45PE20:
11782         case FLASH_5752VENDOR_ST_M45PE40:
11783                 tp->nvram_jedecnum = JEDEC_ST;
11784                 tg3_flag_set(tp, NVRAM_BUFFERED);
11785                 tg3_flag_set(tp, FLASH);
11786                 tp->nvram_pagesize = 256;
11787                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11788                         tp->nvram_size = (protect ?
11789                                           TG3_NVRAM_SIZE_64KB :
11790                                           TG3_NVRAM_SIZE_128KB);
11791                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11792                         tp->nvram_size = (protect ?
11793                                           TG3_NVRAM_SIZE_64KB :
11794                                           TG3_NVRAM_SIZE_256KB);
11795                 else
11796                         tp->nvram_size = (protect ?
11797                                           TG3_NVRAM_SIZE_128KB :
11798                                           TG3_NVRAM_SIZE_512KB);
11799                 break;
11800         }
11801 }
11802
11803 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11804 {
11805         u32 nvcfg1;
11806
11807         nvcfg1 = tr32(NVRAM_CFG1);
11808
11809         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11810         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11811         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11812         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11813         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11814                 tp->nvram_jedecnum = JEDEC_ATMEL;
11815                 tg3_flag_set(tp, NVRAM_BUFFERED);
11816                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11817
11818                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11819                 tw32(NVRAM_CFG1, nvcfg1);
11820                 break;
11821         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11822         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11823         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11824         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11825                 tp->nvram_jedecnum = JEDEC_ATMEL;
11826                 tg3_flag_set(tp, NVRAM_BUFFERED);
11827                 tg3_flag_set(tp, FLASH);
11828                 tp->nvram_pagesize = 264;
11829                 break;
11830         case FLASH_5752VENDOR_ST_M45PE10:
11831         case FLASH_5752VENDOR_ST_M45PE20:
11832         case FLASH_5752VENDOR_ST_M45PE40:
11833                 tp->nvram_jedecnum = JEDEC_ST;
11834                 tg3_flag_set(tp, NVRAM_BUFFERED);
11835                 tg3_flag_set(tp, FLASH);
11836                 tp->nvram_pagesize = 256;
11837                 break;
11838         }
11839 }
11840
11841 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11842 {
11843         u32 nvcfg1, protect = 0;
11844
11845         nvcfg1 = tr32(NVRAM_CFG1);
11846
11847         /* NVRAM protection for TPM */
11848         if (nvcfg1 & (1 << 27)) {
11849                 tg3_flag_set(tp, PROTECTED_NVRAM);
11850                 protect = 1;
11851         }
11852
11853         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11854         switch (nvcfg1) {
11855         case FLASH_5761VENDOR_ATMEL_ADB021D:
11856         case FLASH_5761VENDOR_ATMEL_ADB041D:
11857         case FLASH_5761VENDOR_ATMEL_ADB081D:
11858         case FLASH_5761VENDOR_ATMEL_ADB161D:
11859         case FLASH_5761VENDOR_ATMEL_MDB021D:
11860         case FLASH_5761VENDOR_ATMEL_MDB041D:
11861         case FLASH_5761VENDOR_ATMEL_MDB081D:
11862         case FLASH_5761VENDOR_ATMEL_MDB161D:
11863                 tp->nvram_jedecnum = JEDEC_ATMEL;
11864                 tg3_flag_set(tp, NVRAM_BUFFERED);
11865                 tg3_flag_set(tp, FLASH);
11866                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11867                 tp->nvram_pagesize = 256;
11868                 break;
11869         case FLASH_5761VENDOR_ST_A_M45PE20:
11870         case FLASH_5761VENDOR_ST_A_M45PE40:
11871         case FLASH_5761VENDOR_ST_A_M45PE80:
11872         case FLASH_5761VENDOR_ST_A_M45PE16:
11873         case FLASH_5761VENDOR_ST_M_M45PE20:
11874         case FLASH_5761VENDOR_ST_M_M45PE40:
11875         case FLASH_5761VENDOR_ST_M_M45PE80:
11876         case FLASH_5761VENDOR_ST_M_M45PE16:
11877                 tp->nvram_jedecnum = JEDEC_ST;
11878                 tg3_flag_set(tp, NVRAM_BUFFERED);
11879                 tg3_flag_set(tp, FLASH);
11880                 tp->nvram_pagesize = 256;
11881                 break;
11882         }
11883
11884         if (protect) {
11885                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11886         } else {
11887                 switch (nvcfg1) {
11888                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11889                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11890                 case FLASH_5761VENDOR_ST_A_M45PE16:
11891                 case FLASH_5761VENDOR_ST_M_M45PE16:
11892                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11893                         break;
11894                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11895                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11896                 case FLASH_5761VENDOR_ST_A_M45PE80:
11897                 case FLASH_5761VENDOR_ST_M_M45PE80:
11898                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11899                         break;
11900                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11901                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11902                 case FLASH_5761VENDOR_ST_A_M45PE40:
11903                 case FLASH_5761VENDOR_ST_M_M45PE40:
11904                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11905                         break;
11906                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11907                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11908                 case FLASH_5761VENDOR_ST_A_M45PE20:
11909                 case FLASH_5761VENDOR_ST_M_M45PE20:
11910                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11911                         break;
11912                 }
11913         }
11914 }
11915
11916 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11917 {
11918         tp->nvram_jedecnum = JEDEC_ATMEL;
11919         tg3_flag_set(tp, NVRAM_BUFFERED);
11920         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11921 }
11922
11923 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11924 {
11925         u32 nvcfg1;
11926
11927         nvcfg1 = tr32(NVRAM_CFG1);
11928
11929         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11930         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11931         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11932                 tp->nvram_jedecnum = JEDEC_ATMEL;
11933                 tg3_flag_set(tp, NVRAM_BUFFERED);
11934                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11935
11936                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11937                 tw32(NVRAM_CFG1, nvcfg1);
11938                 return;
11939         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11940         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11941         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11942         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11943         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11944         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11945         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11946                 tp->nvram_jedecnum = JEDEC_ATMEL;
11947                 tg3_flag_set(tp, NVRAM_BUFFERED);
11948                 tg3_flag_set(tp, FLASH);
11949
11950                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11951                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11952                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11953                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11954                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11955                         break;
11956                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11957                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11958                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11959                         break;
11960                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11961                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11962                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11963                         break;
11964                 }
11965                 break;
11966         case FLASH_5752VENDOR_ST_M45PE10:
11967         case FLASH_5752VENDOR_ST_M45PE20:
11968         case FLASH_5752VENDOR_ST_M45PE40:
11969                 tp->nvram_jedecnum = JEDEC_ST;
11970                 tg3_flag_set(tp, NVRAM_BUFFERED);
11971                 tg3_flag_set(tp, FLASH);
11972
11973                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11974                 case FLASH_5752VENDOR_ST_M45PE10:
11975                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11976                         break;
11977                 case FLASH_5752VENDOR_ST_M45PE20:
11978                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11979                         break;
11980                 case FLASH_5752VENDOR_ST_M45PE40:
11981                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11982                         break;
11983                 }
11984                 break;
11985         default:
11986                 tg3_flag_set(tp, NO_NVRAM);
11987                 return;
11988         }
11989
11990         tg3_nvram_get_pagesize(tp, nvcfg1);
11991         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11992                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11993 }
11994
11995
11996 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11997 {
11998         u32 nvcfg1;
11999
12000         nvcfg1 = tr32(NVRAM_CFG1);
12001
12002         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12003         case FLASH_5717VENDOR_ATMEL_EEPROM:
12004         case FLASH_5717VENDOR_MICRO_EEPROM:
12005                 tp->nvram_jedecnum = JEDEC_ATMEL;
12006                 tg3_flag_set(tp, NVRAM_BUFFERED);
12007                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12008
12009                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12010                 tw32(NVRAM_CFG1, nvcfg1);
12011                 return;
12012         case FLASH_5717VENDOR_ATMEL_MDB011D:
12013         case FLASH_5717VENDOR_ATMEL_ADB011B:
12014         case FLASH_5717VENDOR_ATMEL_ADB011D:
12015         case FLASH_5717VENDOR_ATMEL_MDB021D:
12016         case FLASH_5717VENDOR_ATMEL_ADB021B:
12017         case FLASH_5717VENDOR_ATMEL_ADB021D:
12018         case FLASH_5717VENDOR_ATMEL_45USPT:
12019                 tp->nvram_jedecnum = JEDEC_ATMEL;
12020                 tg3_flag_set(tp, NVRAM_BUFFERED);
12021                 tg3_flag_set(tp, FLASH);
12022
12023                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12024                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12025                         /* Detect size with tg3_nvram_get_size() */
12026                         break;
12027                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12028                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12029                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12030                         break;
12031                 default:
12032                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12033                         break;
12034                 }
12035                 break;
12036         case FLASH_5717VENDOR_ST_M_M25PE10:
12037         case FLASH_5717VENDOR_ST_A_M25PE10:
12038         case FLASH_5717VENDOR_ST_M_M45PE10:
12039         case FLASH_5717VENDOR_ST_A_M45PE10:
12040         case FLASH_5717VENDOR_ST_M_M25PE20:
12041         case FLASH_5717VENDOR_ST_A_M25PE20:
12042         case FLASH_5717VENDOR_ST_M_M45PE20:
12043         case FLASH_5717VENDOR_ST_A_M45PE20:
12044         case FLASH_5717VENDOR_ST_25USPT:
12045         case FLASH_5717VENDOR_ST_45USPT:
12046                 tp->nvram_jedecnum = JEDEC_ST;
12047                 tg3_flag_set(tp, NVRAM_BUFFERED);
12048                 tg3_flag_set(tp, FLASH);
12049
12050                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12051                 case FLASH_5717VENDOR_ST_M_M25PE20:
12052                 case FLASH_5717VENDOR_ST_M_M45PE20:
12053                         /* Detect size with tg3_nvram_get_size() */
12054                         break;
12055                 case FLASH_5717VENDOR_ST_A_M25PE20:
12056                 case FLASH_5717VENDOR_ST_A_M45PE20:
12057                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12058                         break;
12059                 default:
12060                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12061                         break;
12062                 }
12063                 break;
12064         default:
12065                 tg3_flag_set(tp, NO_NVRAM);
12066                 return;
12067         }
12068
12069         tg3_nvram_get_pagesize(tp, nvcfg1);
12070         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12071                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12072 }
12073
12074 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12075 {
12076         u32 nvcfg1, nvmpinstrp;
12077
12078         nvcfg1 = tr32(NVRAM_CFG1);
12079         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12080
12081         switch (nvmpinstrp) {
12082         case FLASH_5720_EEPROM_HD:
12083         case FLASH_5720_EEPROM_LD:
12084                 tp->nvram_jedecnum = JEDEC_ATMEL;
12085                 tg3_flag_set(tp, NVRAM_BUFFERED);
12086
12087                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12088                 tw32(NVRAM_CFG1, nvcfg1);
12089                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12090                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12091                 else
12092                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12093                 return;
12094         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12095         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12096         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12097         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12098         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12099         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12100         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12101         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12102         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12103         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12104         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12105         case FLASH_5720VENDOR_ATMEL_45USPT:
12106                 tp->nvram_jedecnum = JEDEC_ATMEL;
12107                 tg3_flag_set(tp, NVRAM_BUFFERED);
12108                 tg3_flag_set(tp, FLASH);
12109
12110                 switch (nvmpinstrp) {
12111                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12112                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12113                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12114                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12115                         break;
12116                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12117                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12118                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12119                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12120                         break;
12121                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12122                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12123                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12124                         break;
12125                 default:
12126                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12127                         break;
12128                 }
12129                 break;
12130         case FLASH_5720VENDOR_M_ST_M25PE10:
12131         case FLASH_5720VENDOR_M_ST_M45PE10:
12132         case FLASH_5720VENDOR_A_ST_M25PE10:
12133         case FLASH_5720VENDOR_A_ST_M45PE10:
12134         case FLASH_5720VENDOR_M_ST_M25PE20:
12135         case FLASH_5720VENDOR_M_ST_M45PE20:
12136         case FLASH_5720VENDOR_A_ST_M25PE20:
12137         case FLASH_5720VENDOR_A_ST_M45PE20:
12138         case FLASH_5720VENDOR_M_ST_M25PE40:
12139         case FLASH_5720VENDOR_M_ST_M45PE40:
12140         case FLASH_5720VENDOR_A_ST_M25PE40:
12141         case FLASH_5720VENDOR_A_ST_M45PE40:
12142         case FLASH_5720VENDOR_M_ST_M25PE80:
12143         case FLASH_5720VENDOR_M_ST_M45PE80:
12144         case FLASH_5720VENDOR_A_ST_M25PE80:
12145         case FLASH_5720VENDOR_A_ST_M45PE80:
12146         case FLASH_5720VENDOR_ST_25USPT:
12147         case FLASH_5720VENDOR_ST_45USPT:
12148                 tp->nvram_jedecnum = JEDEC_ST;
12149                 tg3_flag_set(tp, NVRAM_BUFFERED);
12150                 tg3_flag_set(tp, FLASH);
12151
12152                 switch (nvmpinstrp) {
12153                 case FLASH_5720VENDOR_M_ST_M25PE20:
12154                 case FLASH_5720VENDOR_M_ST_M45PE20:
12155                 case FLASH_5720VENDOR_A_ST_M25PE20:
12156                 case FLASH_5720VENDOR_A_ST_M45PE20:
12157                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12158                         break;
12159                 case FLASH_5720VENDOR_M_ST_M25PE40:
12160                 case FLASH_5720VENDOR_M_ST_M45PE40:
12161                 case FLASH_5720VENDOR_A_ST_M25PE40:
12162                 case FLASH_5720VENDOR_A_ST_M45PE40:
12163                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12164                         break;
12165                 case FLASH_5720VENDOR_M_ST_M25PE80:
12166                 case FLASH_5720VENDOR_M_ST_M45PE80:
12167                 case FLASH_5720VENDOR_A_ST_M25PE80:
12168                 case FLASH_5720VENDOR_A_ST_M45PE80:
12169                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12170                         break;
12171                 default:
12172                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12173                         break;
12174                 }
12175                 break;
12176         default:
12177                 tg3_flag_set(tp, NO_NVRAM);
12178                 return;
12179         }
12180
12181         tg3_nvram_get_pagesize(tp, nvcfg1);
12182         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12183                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12184 }
12185
12186 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12187 static void __devinit tg3_nvram_init(struct tg3 *tp)
12188 {
12189         tw32_f(GRC_EEPROM_ADDR,
12190              (EEPROM_ADDR_FSM_RESET |
12191               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12192                EEPROM_ADDR_CLKPERD_SHIFT)));
12193
12194         msleep(1);
12195
12196         /* Enable seeprom accesses. */
12197         tw32_f(GRC_LOCAL_CTRL,
12198              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12199         udelay(100);
12200
12201         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12202             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12203                 tg3_flag_set(tp, NVRAM);
12204
12205                 if (tg3_nvram_lock(tp)) {
12206                         netdev_warn(tp->dev,
12207                                     "Cannot get nvram lock, %s failed\n",
12208                                     __func__);
12209                         return;
12210                 }
12211                 tg3_enable_nvram_access(tp);
12212
12213                 tp->nvram_size = 0;
12214
12215                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12216                         tg3_get_5752_nvram_info(tp);
12217                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12218                         tg3_get_5755_nvram_info(tp);
12219                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12220                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12221                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12222                         tg3_get_5787_nvram_info(tp);
12223                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12224                         tg3_get_5761_nvram_info(tp);
12225                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12226                         tg3_get_5906_nvram_info(tp);
12227                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12228                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12229                         tg3_get_57780_nvram_info(tp);
12230                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12231                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12232                         tg3_get_5717_nvram_info(tp);
12233                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12234                         tg3_get_5720_nvram_info(tp);
12235                 else
12236                         tg3_get_nvram_info(tp);
12237
12238                 if (tp->nvram_size == 0)
12239                         tg3_get_nvram_size(tp);
12240
12241                 tg3_disable_nvram_access(tp);
12242                 tg3_nvram_unlock(tp);
12243
12244         } else {
12245                 tg3_flag_clear(tp, NVRAM);
12246                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12247
12248                 tg3_get_eeprom_size(tp);
12249         }
12250 }
12251
12252 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12253                                     u32 offset, u32 len, u8 *buf)
12254 {
12255         int i, j, rc = 0;
12256         u32 val;
12257
12258         for (i = 0; i < len; i += 4) {
12259                 u32 addr;
12260                 __be32 data;
12261
12262                 addr = offset + i;
12263
12264                 memcpy(&data, buf + i, 4);
12265
12266                 /*
12267                  * The SEEPROM interface expects the data to always be opposite
12268                  * the native endian format.  We accomplish this by reversing
12269                  * all the operations that would have been performed on the
12270                  * data from a call to tg3_nvram_read_be32().
12271                  */
12272                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12273
12274                 val = tr32(GRC_EEPROM_ADDR);
12275                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12276
12277                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12278                         EEPROM_ADDR_READ);
12279                 tw32(GRC_EEPROM_ADDR, val |
12280                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12281                         (addr & EEPROM_ADDR_ADDR_MASK) |
12282                         EEPROM_ADDR_START |
12283                         EEPROM_ADDR_WRITE);
12284
12285                 for (j = 0; j < 1000; j++) {
12286                         val = tr32(GRC_EEPROM_ADDR);
12287
12288                         if (val & EEPROM_ADDR_COMPLETE)
12289                                 break;
12290                         msleep(1);
12291                 }
12292                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12293                         rc = -EBUSY;
12294                         break;
12295                 }
12296         }
12297
12298         return rc;
12299 }
12300
12301 /* offset and length are dword aligned */
12302 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12303                 u8 *buf)
12304 {
12305         int ret = 0;
12306         u32 pagesize = tp->nvram_pagesize;
12307         u32 pagemask = pagesize - 1;
12308         u32 nvram_cmd;
12309         u8 *tmp;
12310
12311         tmp = kmalloc(pagesize, GFP_KERNEL);
12312         if (tmp == NULL)
12313                 return -ENOMEM;
12314
12315         while (len) {
12316                 int j;
12317                 u32 phy_addr, page_off, size;
12318
12319                 phy_addr = offset & ~pagemask;
12320
12321                 for (j = 0; j < pagesize; j += 4) {
12322                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12323                                                   (__be32 *) (tmp + j));
12324                         if (ret)
12325                                 break;
12326                 }
12327                 if (ret)
12328                         break;
12329
12330                 page_off = offset & pagemask;
12331                 size = pagesize;
12332                 if (len < size)
12333                         size = len;
12334
12335                 len -= size;
12336
12337                 memcpy(tmp + page_off, buf, size);
12338
12339                 offset = offset + (pagesize - page_off);
12340
12341                 tg3_enable_nvram_access(tp);
12342
12343                 /*
12344                  * Before we can erase the flash page, we need
12345                  * to issue a special "write enable" command.
12346                  */
12347                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12348
12349                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12350                         break;
12351
12352                 /* Erase the target page */
12353                 tw32(NVRAM_ADDR, phy_addr);
12354
12355                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12356                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12357
12358                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12359                         break;
12360
12361                 /* Issue another write enable to start the write. */
12362                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12363
12364                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12365                         break;
12366
12367                 for (j = 0; j < pagesize; j += 4) {
12368                         __be32 data;
12369
12370                         data = *((__be32 *) (tmp + j));
12371
12372                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12373
12374                         tw32(NVRAM_ADDR, phy_addr + j);
12375
12376                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12377                                 NVRAM_CMD_WR;
12378
12379                         if (j == 0)
12380                                 nvram_cmd |= NVRAM_CMD_FIRST;
12381                         else if (j == (pagesize - 4))
12382                                 nvram_cmd |= NVRAM_CMD_LAST;
12383
12384                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12385                                 break;
12386                 }
12387                 if (ret)
12388                         break;
12389         }
12390
12391         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12392         tg3_nvram_exec_cmd(tp, nvram_cmd);
12393
12394         kfree(tmp);
12395
12396         return ret;
12397 }
12398
12399 /* offset and length are dword aligned */
12400 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12401                 u8 *buf)
12402 {
12403         int i, ret = 0;
12404
12405         for (i = 0; i < len; i += 4, offset += 4) {
12406                 u32 page_off, phy_addr, nvram_cmd;
12407                 __be32 data;
12408
12409                 memcpy(&data, buf + i, 4);
12410                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12411
12412                 page_off = offset % tp->nvram_pagesize;
12413
12414                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12415
12416                 tw32(NVRAM_ADDR, phy_addr);
12417
12418                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12419
12420                 if (page_off == 0 || i == 0)
12421                         nvram_cmd |= NVRAM_CMD_FIRST;
12422                 if (page_off == (tp->nvram_pagesize - 4))
12423                         nvram_cmd |= NVRAM_CMD_LAST;
12424
12425                 if (i == (len - 4))
12426                         nvram_cmd |= NVRAM_CMD_LAST;
12427
12428                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12429                     !tg3_flag(tp, 5755_PLUS) &&
12430                     (tp->nvram_jedecnum == JEDEC_ST) &&
12431                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12432
12433                         if ((ret = tg3_nvram_exec_cmd(tp,
12434                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12435                                 NVRAM_CMD_DONE)))
12436
12437                                 break;
12438                 }
12439                 if (!tg3_flag(tp, FLASH)) {
12440                         /* We always do complete word writes to eeprom. */
12441                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12442                 }
12443
12444                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12445                         break;
12446         }
12447         return ret;
12448 }
12449
12450 /* offset and length are dword aligned */
12451 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12452 {
12453         int ret;
12454
12455         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12456                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12457                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12458                 udelay(40);
12459         }
12460
12461         if (!tg3_flag(tp, NVRAM)) {
12462                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12463         } else {
12464                 u32 grc_mode;
12465
12466                 ret = tg3_nvram_lock(tp);
12467                 if (ret)
12468                         return ret;
12469
12470                 tg3_enable_nvram_access(tp);
12471                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12472                         tw32(NVRAM_WRITE1, 0x406);
12473
12474                 grc_mode = tr32(GRC_MODE);
12475                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12476
12477                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12478                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12479                                 buf);
12480                 } else {
12481                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12482                                 buf);
12483                 }
12484
12485                 grc_mode = tr32(GRC_MODE);
12486                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12487
12488                 tg3_disable_nvram_access(tp);
12489                 tg3_nvram_unlock(tp);
12490         }
12491
12492         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12493                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12494                 udelay(40);
12495         }
12496
12497         return ret;
12498 }
12499
12500 struct subsys_tbl_ent {
12501         u16 subsys_vendor, subsys_devid;
12502         u32 phy_id;
12503 };
12504
12505 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12506         /* Broadcom boards. */
12507         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12508           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12509         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12510           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12511         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12512           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12513         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12514           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12515         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12516           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12517         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12518           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12519         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12520           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12521         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12523         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12525         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12526           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12527         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12528           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12529
12530         /* 3com boards. */
12531         { TG3PCI_SUBVENDOR_ID_3COM,
12532           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12533         { TG3PCI_SUBVENDOR_ID_3COM,
12534           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12535         { TG3PCI_SUBVENDOR_ID_3COM,
12536           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12537         { TG3PCI_SUBVENDOR_ID_3COM,
12538           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12539         { TG3PCI_SUBVENDOR_ID_3COM,
12540           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12541
12542         /* DELL boards. */
12543         { TG3PCI_SUBVENDOR_ID_DELL,
12544           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12545         { TG3PCI_SUBVENDOR_ID_DELL,
12546           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12547         { TG3PCI_SUBVENDOR_ID_DELL,
12548           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12549         { TG3PCI_SUBVENDOR_ID_DELL,
12550           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12551
12552         /* Compaq boards. */
12553         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12554           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12555         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12556           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12557         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12558           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12559         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12560           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12561         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12562           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12563
12564         /* IBM boards. */
12565         { TG3PCI_SUBVENDOR_ID_IBM,
12566           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12567 };
12568
12569 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12570 {
12571         int i;
12572
12573         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12574                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12575                      tp->pdev->subsystem_vendor) &&
12576                     (subsys_id_to_phy_id[i].subsys_devid ==
12577                      tp->pdev->subsystem_device))
12578                         return &subsys_id_to_phy_id[i];
12579         }
12580         return NULL;
12581 }
12582
12583 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12584 {
12585         u32 val;
12586         u16 pmcsr;
12587
12588         /* On some early chips the SRAM cannot be accessed in D3hot state,
12589          * so need make sure we're in D0.
12590          */
12591         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12592         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12593         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12594         msleep(1);
12595
12596         /* Make sure register accesses (indirect or otherwise)
12597          * will function correctly.
12598          */
12599         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12600                                tp->misc_host_ctrl);
12601
12602         /* The memory arbiter has to be enabled in order for SRAM accesses
12603          * to succeed.  Normally on powerup the tg3 chip firmware will make
12604          * sure it is enabled, but other entities such as system netboot
12605          * code might disable it.
12606          */
12607         val = tr32(MEMARB_MODE);
12608         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12609
12610         tp->phy_id = TG3_PHY_ID_INVALID;
12611         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12612
12613         /* Assume an onboard device and WOL capable by default.  */
12614         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12615         tg3_flag_set(tp, WOL_CAP);
12616
12617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12618                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12619                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12620                         tg3_flag_set(tp, IS_NIC);
12621                 }
12622                 val = tr32(VCPU_CFGSHDW);
12623                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12624                         tg3_flag_set(tp, ASPM_WORKAROUND);
12625                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12626                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12627                         tg3_flag_set(tp, WOL_ENABLE);
12628                         device_set_wakeup_enable(&tp->pdev->dev, true);
12629                 }
12630                 goto done;
12631         }
12632
12633         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12634         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12635                 u32 nic_cfg, led_cfg;
12636                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12637                 int eeprom_phy_serdes = 0;
12638
12639                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12640                 tp->nic_sram_data_cfg = nic_cfg;
12641
12642                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12643                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12644                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12645                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12646                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12647                     (ver > 0) && (ver < 0x100))
12648                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12649
12650                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12651                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12652
12653                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12654                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12655                         eeprom_phy_serdes = 1;
12656
12657                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12658                 if (nic_phy_id != 0) {
12659                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12660                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12661
12662                         eeprom_phy_id  = (id1 >> 16) << 10;
12663                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12664                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12665                 } else
12666                         eeprom_phy_id = 0;
12667
12668                 tp->phy_id = eeprom_phy_id;
12669                 if (eeprom_phy_serdes) {
12670                         if (!tg3_flag(tp, 5705_PLUS))
12671                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12672                         else
12673                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12674                 }
12675
12676                 if (tg3_flag(tp, 5750_PLUS))
12677                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12678                                     SHASTA_EXT_LED_MODE_MASK);
12679                 else
12680                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12681
12682                 switch (led_cfg) {
12683                 default:
12684                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12685                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12686                         break;
12687
12688                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12689                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12690                         break;
12691
12692                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12693                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12694
12695                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12696                          * read on some older 5700/5701 bootcode.
12697                          */
12698                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12699                             ASIC_REV_5700 ||
12700                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12701                             ASIC_REV_5701)
12702                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12703
12704                         break;
12705
12706                 case SHASTA_EXT_LED_SHARED:
12707                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12708                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12709                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12710                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12711                                                  LED_CTRL_MODE_PHY_2);
12712                         break;
12713
12714                 case SHASTA_EXT_LED_MAC:
12715                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12716                         break;
12717
12718                 case SHASTA_EXT_LED_COMBO:
12719                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12720                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12721                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12722                                                  LED_CTRL_MODE_PHY_2);
12723                         break;
12724
12725                 }
12726
12727                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12728                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12729                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12730                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12731
12732                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12733                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12734
12735                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12736                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12737                         if ((tp->pdev->subsystem_vendor ==
12738                              PCI_VENDOR_ID_ARIMA) &&
12739                             (tp->pdev->subsystem_device == 0x205a ||
12740                              tp->pdev->subsystem_device == 0x2063))
12741                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12742                 } else {
12743                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12744                         tg3_flag_set(tp, IS_NIC);
12745                 }
12746
12747                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12748                         tg3_flag_set(tp, ENABLE_ASF);
12749                         if (tg3_flag(tp, 5750_PLUS))
12750                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12751                 }
12752
12753                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12754                     tg3_flag(tp, 5750_PLUS))
12755                         tg3_flag_set(tp, ENABLE_APE);
12756
12757                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12758                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12759                         tg3_flag_clear(tp, WOL_CAP);
12760
12761                 if (tg3_flag(tp, WOL_CAP) &&
12762                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12763                         tg3_flag_set(tp, WOL_ENABLE);
12764                         device_set_wakeup_enable(&tp->pdev->dev, true);
12765                 }
12766
12767                 if (cfg2 & (1 << 17))
12768                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12769
12770                 /* serdes signal pre-emphasis in register 0x590 set by */
12771                 /* bootcode if bit 18 is set */
12772                 if (cfg2 & (1 << 18))
12773                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12774
12775                 if ((tg3_flag(tp, 57765_PLUS) ||
12776                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12777                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12778                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12779                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12780
12781                 if (tg3_flag(tp, PCI_EXPRESS) &&
12782                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12783                     !tg3_flag(tp, 57765_PLUS)) {
12784                         u32 cfg3;
12785
12786                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12787                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12788                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12789                 }
12790
12791                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12792                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12793                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12794                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12795                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12796                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12797         }
12798 done:
12799         if (tg3_flag(tp, WOL_CAP))
12800                 device_set_wakeup_enable(&tp->pdev->dev,
12801                                          tg3_flag(tp, WOL_ENABLE));
12802         else
12803                 device_set_wakeup_capable(&tp->pdev->dev, false);
12804 }
12805
12806 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12807 {
12808         int i;
12809         u32 val;
12810
12811         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12812         tw32(OTP_CTRL, cmd);
12813
12814         /* Wait for up to 1 ms for command to execute. */
12815         for (i = 0; i < 100; i++) {
12816                 val = tr32(OTP_STATUS);
12817                 if (val & OTP_STATUS_CMD_DONE)
12818                         break;
12819                 udelay(10);
12820         }
12821
12822         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12823 }
12824
12825 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12826  * configuration is a 32-bit value that straddles the alignment boundary.
12827  * We do two 32-bit reads and then shift and merge the results.
12828  */
12829 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12830 {
12831         u32 bhalf_otp, thalf_otp;
12832
12833         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12834
12835         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12836                 return 0;
12837
12838         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12839
12840         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12841                 return 0;
12842
12843         thalf_otp = tr32(OTP_READ_DATA);
12844
12845         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12846
12847         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12848                 return 0;
12849
12850         bhalf_otp = tr32(OTP_READ_DATA);
12851
12852         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12853 }
12854
12855 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12856 {
12857         u32 adv = ADVERTISED_Autoneg |
12858                   ADVERTISED_Pause;
12859
12860         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12861                 adv |= ADVERTISED_1000baseT_Half |
12862                        ADVERTISED_1000baseT_Full;
12863
12864         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12865                 adv |= ADVERTISED_100baseT_Half |
12866                        ADVERTISED_100baseT_Full |
12867                        ADVERTISED_10baseT_Half |
12868                        ADVERTISED_10baseT_Full |
12869                        ADVERTISED_TP;
12870         else
12871                 adv |= ADVERTISED_FIBRE;
12872
12873         tp->link_config.advertising = adv;
12874         tp->link_config.speed = SPEED_INVALID;
12875         tp->link_config.duplex = DUPLEX_INVALID;
12876         tp->link_config.autoneg = AUTONEG_ENABLE;
12877         tp->link_config.active_speed = SPEED_INVALID;
12878         tp->link_config.active_duplex = DUPLEX_INVALID;
12879         tp->link_config.orig_speed = SPEED_INVALID;
12880         tp->link_config.orig_duplex = DUPLEX_INVALID;
12881         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12882 }
12883
12884 static int __devinit tg3_phy_probe(struct tg3 *tp)
12885 {
12886         u32 hw_phy_id_1, hw_phy_id_2;
12887         u32 hw_phy_id, hw_phy_id_masked;
12888         int err;
12889
12890         /* flow control autonegotiation is default behavior */
12891         tg3_flag_set(tp, PAUSE_AUTONEG);
12892         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12893
12894         if (tg3_flag(tp, USE_PHYLIB))
12895                 return tg3_phy_init(tp);
12896
12897         /* Reading the PHY ID register can conflict with ASF
12898          * firmware access to the PHY hardware.
12899          */
12900         err = 0;
12901         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12902                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12903         } else {
12904                 /* Now read the physical PHY_ID from the chip and verify
12905                  * that it is sane.  If it doesn't look good, we fall back
12906                  * to either the hard-coded table based PHY_ID and failing
12907                  * that the value found in the eeprom area.
12908                  */
12909                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12910                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12911
12912                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12913                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12914                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12915
12916                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12917         }
12918
12919         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12920                 tp->phy_id = hw_phy_id;
12921                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12922                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12923                 else
12924                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12925         } else {
12926                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12927                         /* Do nothing, phy ID already set up in
12928                          * tg3_get_eeprom_hw_cfg().
12929                          */
12930                 } else {
12931                         struct subsys_tbl_ent *p;
12932
12933                         /* No eeprom signature?  Try the hardcoded
12934                          * subsys device table.
12935                          */
12936                         p = tg3_lookup_by_subsys(tp);
12937                         if (!p)
12938                                 return -ENODEV;
12939
12940                         tp->phy_id = p->phy_id;
12941                         if (!tp->phy_id ||
12942                             tp->phy_id == TG3_PHY_ID_BCM8002)
12943                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12944                 }
12945         }
12946
12947         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12948             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
12949              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
12950              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12951               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12952              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12953               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12954                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12955
12956         tg3_phy_init_link_config(tp);
12957
12958         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12959             !tg3_flag(tp, ENABLE_APE) &&
12960             !tg3_flag(tp, ENABLE_ASF)) {
12961                 u32 bmsr, mask;
12962
12963                 tg3_readphy(tp, MII_BMSR, &bmsr);
12964                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12965                     (bmsr & BMSR_LSTATUS))
12966                         goto skip_phy_reset;
12967
12968                 err = tg3_phy_reset(tp);
12969                 if (err)
12970                         return err;
12971
12972                 tg3_phy_set_wirespeed(tp);
12973
12974                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12975                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12976                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12977                 if (!tg3_copper_is_advertising_all(tp, mask)) {
12978                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12979                                             tp->link_config.flowctrl);
12980
12981                         tg3_writephy(tp, MII_BMCR,
12982                                      BMCR_ANENABLE | BMCR_ANRESTART);
12983                 }
12984         }
12985
12986 skip_phy_reset:
12987         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12988                 err = tg3_init_5401phy_dsp(tp);
12989                 if (err)
12990                         return err;
12991
12992                 err = tg3_init_5401phy_dsp(tp);
12993         }
12994
12995         return err;
12996 }
12997
12998 static void __devinit tg3_read_vpd(struct tg3 *tp)
12999 {
13000         u8 *vpd_data;
13001         unsigned int block_end, rosize, len;
13002         int j, i = 0;
13003
13004         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13005         if (!vpd_data)
13006                 goto out_no_vpd;
13007
13008         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13009                              PCI_VPD_LRDT_RO_DATA);
13010         if (i < 0)
13011                 goto out_not_found;
13012
13013         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13014         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13015         i += PCI_VPD_LRDT_TAG_SIZE;
13016
13017         if (block_end > TG3_NVM_VPD_LEN)
13018                 goto out_not_found;
13019
13020         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13021                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13022         if (j > 0) {
13023                 len = pci_vpd_info_field_size(&vpd_data[j]);
13024
13025                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13026                 if (j + len > block_end || len != 4 ||
13027                     memcmp(&vpd_data[j], "1028", 4))
13028                         goto partno;
13029
13030                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13031                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13032                 if (j < 0)
13033                         goto partno;
13034
13035                 len = pci_vpd_info_field_size(&vpd_data[j]);
13036
13037                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13038                 if (j + len > block_end)
13039                         goto partno;
13040
13041                 memcpy(tp->fw_ver, &vpd_data[j], len);
13042                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13043         }
13044
13045 partno:
13046         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13047                                       PCI_VPD_RO_KEYWORD_PARTNO);
13048         if (i < 0)
13049                 goto out_not_found;
13050
13051         len = pci_vpd_info_field_size(&vpd_data[i]);
13052
13053         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13054         if (len > TG3_BPN_SIZE ||
13055             (len + i) > TG3_NVM_VPD_LEN)
13056                 goto out_not_found;
13057
13058         memcpy(tp->board_part_number, &vpd_data[i], len);
13059
13060 out_not_found:
13061         kfree(vpd_data);
13062         if (tp->board_part_number[0])
13063                 return;
13064
13065 out_no_vpd:
13066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13067                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13068                         strcpy(tp->board_part_number, "BCM5717");
13069                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13070                         strcpy(tp->board_part_number, "BCM5718");
13071                 else
13072                         goto nomatch;
13073         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13074                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13075                         strcpy(tp->board_part_number, "BCM57780");
13076                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13077                         strcpy(tp->board_part_number, "BCM57760");
13078                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13079                         strcpy(tp->board_part_number, "BCM57790");
13080                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13081                         strcpy(tp->board_part_number, "BCM57788");
13082                 else
13083                         goto nomatch;
13084         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13085                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13086                         strcpy(tp->board_part_number, "BCM57761");
13087                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13088                         strcpy(tp->board_part_number, "BCM57765");
13089                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13090                         strcpy(tp->board_part_number, "BCM57781");
13091                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13092                         strcpy(tp->board_part_number, "BCM57785");
13093                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13094                         strcpy(tp->board_part_number, "BCM57791");
13095                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13096                         strcpy(tp->board_part_number, "BCM57795");
13097                 else
13098                         goto nomatch;
13099         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13100                 strcpy(tp->board_part_number, "BCM95906");
13101         } else {
13102 nomatch:
13103                 strcpy(tp->board_part_number, "none");
13104         }
13105 }
13106
13107 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13108 {
13109         u32 val;
13110
13111         if (tg3_nvram_read(tp, offset, &val) ||
13112             (val & 0xfc000000) != 0x0c000000 ||
13113             tg3_nvram_read(tp, offset + 4, &val) ||
13114             val != 0)
13115                 return 0;
13116
13117         return 1;
13118 }
13119
13120 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13121 {
13122         u32 val, offset, start, ver_offset;
13123         int i, dst_off;
13124         bool newver = false;
13125
13126         if (tg3_nvram_read(tp, 0xc, &offset) ||
13127             tg3_nvram_read(tp, 0x4, &start))
13128                 return;
13129
13130         offset = tg3_nvram_logical_addr(tp, offset);
13131
13132         if (tg3_nvram_read(tp, offset, &val))
13133                 return;
13134
13135         if ((val & 0xfc000000) == 0x0c000000) {
13136                 if (tg3_nvram_read(tp, offset + 4, &val))
13137                         return;
13138
13139                 if (val == 0)
13140                         newver = true;
13141         }
13142
13143         dst_off = strlen(tp->fw_ver);
13144
13145         if (newver) {
13146                 if (TG3_VER_SIZE - dst_off < 16 ||
13147                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13148                         return;
13149
13150                 offset = offset + ver_offset - start;
13151                 for (i = 0; i < 16; i += 4) {
13152                         __be32 v;
13153                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13154                                 return;
13155
13156                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13157                 }
13158         } else {
13159                 u32 major, minor;
13160
13161                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13162                         return;
13163
13164                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13165                         TG3_NVM_BCVER_MAJSFT;
13166                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13167                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13168                          "v%d.%02d", major, minor);
13169         }
13170 }
13171
13172 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13173 {
13174         u32 val, major, minor;
13175
13176         /* Use native endian representation */
13177         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13178                 return;
13179
13180         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13181                 TG3_NVM_HWSB_CFG1_MAJSFT;
13182         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13183                 TG3_NVM_HWSB_CFG1_MINSFT;
13184
13185         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13186 }
13187
13188 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13189 {
13190         u32 offset, major, minor, build;
13191
13192         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13193
13194         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13195                 return;
13196
13197         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13198         case TG3_EEPROM_SB_REVISION_0:
13199                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13200                 break;
13201         case TG3_EEPROM_SB_REVISION_2:
13202                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13203                 break;
13204         case TG3_EEPROM_SB_REVISION_3:
13205                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13206                 break;
13207         case TG3_EEPROM_SB_REVISION_4:
13208                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13209                 break;
13210         case TG3_EEPROM_SB_REVISION_5:
13211                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13212                 break;
13213         case TG3_EEPROM_SB_REVISION_6:
13214                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13215                 break;
13216         default:
13217                 return;
13218         }
13219
13220         if (tg3_nvram_read(tp, offset, &val))
13221                 return;
13222
13223         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13224                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13225         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13226                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13227         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13228
13229         if (minor > 99 || build > 26)
13230                 return;
13231
13232         offset = strlen(tp->fw_ver);
13233         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13234                  " v%d.%02d", major, minor);
13235
13236         if (build > 0) {
13237                 offset = strlen(tp->fw_ver);
13238                 if (offset < TG3_VER_SIZE - 1)
13239                         tp->fw_ver[offset] = 'a' + build - 1;
13240         }
13241 }
13242
13243 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13244 {
13245         u32 val, offset, start;
13246         int i, vlen;
13247
13248         for (offset = TG3_NVM_DIR_START;
13249              offset < TG3_NVM_DIR_END;
13250              offset += TG3_NVM_DIRENT_SIZE) {
13251                 if (tg3_nvram_read(tp, offset, &val))
13252                         return;
13253
13254                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13255                         break;
13256         }
13257
13258         if (offset == TG3_NVM_DIR_END)
13259                 return;
13260
13261         if (!tg3_flag(tp, 5705_PLUS))
13262                 start = 0x08000000;
13263         else if (tg3_nvram_read(tp, offset - 4, &start))
13264                 return;
13265
13266         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13267             !tg3_fw_img_is_valid(tp, offset) ||
13268             tg3_nvram_read(tp, offset + 8, &val))
13269                 return;
13270
13271         offset += val - start;
13272
13273         vlen = strlen(tp->fw_ver);
13274
13275         tp->fw_ver[vlen++] = ',';
13276         tp->fw_ver[vlen++] = ' ';
13277
13278         for (i = 0; i < 4; i++) {
13279                 __be32 v;
13280                 if (tg3_nvram_read_be32(tp, offset, &v))
13281                         return;
13282
13283                 offset += sizeof(v);
13284
13285                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13286                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13287                         break;
13288                 }
13289
13290                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13291                 vlen += sizeof(v);
13292         }
13293 }
13294
13295 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13296 {
13297         int vlen;
13298         u32 apedata;
13299         char *fwtype;
13300
13301         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13302                 return;
13303
13304         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13305         if (apedata != APE_SEG_SIG_MAGIC)
13306                 return;
13307
13308         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13309         if (!(apedata & APE_FW_STATUS_READY))
13310                 return;
13311
13312         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13313
13314         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13315                 tg3_flag_set(tp, APE_HAS_NCSI);
13316                 fwtype = "NCSI";
13317         } else {
13318                 fwtype = "DASH";
13319         }
13320
13321         vlen = strlen(tp->fw_ver);
13322
13323         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13324                  fwtype,
13325                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13326                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13327                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13328                  (apedata & APE_FW_VERSION_BLDMSK));
13329 }
13330
13331 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13332 {
13333         u32 val;
13334         bool vpd_vers = false;
13335
13336         if (tp->fw_ver[0] != 0)
13337                 vpd_vers = true;
13338
13339         if (tg3_flag(tp, NO_NVRAM)) {
13340                 strcat(tp->fw_ver, "sb");
13341                 return;
13342         }
13343
13344         if (tg3_nvram_read(tp, 0, &val))
13345                 return;
13346
13347         if (val == TG3_EEPROM_MAGIC)
13348                 tg3_read_bc_ver(tp);
13349         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13350                 tg3_read_sb_ver(tp, val);
13351         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13352                 tg3_read_hwsb_ver(tp);
13353         else
13354                 return;
13355
13356         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13357                 goto done;
13358
13359         tg3_read_mgmtfw_ver(tp);
13360
13361 done:
13362         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13363 }
13364
13365 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13366
13367 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13368 {
13369         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13370                 return TG3_RX_RET_MAX_SIZE_5717;
13371         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13372                 return TG3_RX_RET_MAX_SIZE_5700;
13373         else
13374                 return TG3_RX_RET_MAX_SIZE_5705;
13375 }
13376
13377 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13378         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13379         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13380         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13381         { },
13382 };
13383
13384 static int __devinit tg3_get_invariants(struct tg3 *tp)
13385 {
13386         u32 misc_ctrl_reg;
13387         u32 pci_state_reg, grc_misc_cfg;
13388         u32 val;
13389         u16 pci_cmd;
13390         int err;
13391
13392         /* Force memory write invalidate off.  If we leave it on,
13393          * then on 5700_BX chips we have to enable a workaround.
13394          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13395          * to match the cacheline size.  The Broadcom driver have this
13396          * workaround but turns MWI off all the times so never uses
13397          * it.  This seems to suggest that the workaround is insufficient.
13398          */
13399         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13400         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13401         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13402
13403         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13404          * has the register indirect write enable bit set before
13405          * we try to access any of the MMIO registers.  It is also
13406          * critical that the PCI-X hw workaround situation is decided
13407          * before that as well.
13408          */
13409         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13410                               &misc_ctrl_reg);
13411
13412         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13413                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13414         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13415                 u32 prod_id_asic_rev;
13416
13417                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13418                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13419                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13420                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13421                         pci_read_config_dword(tp->pdev,
13422                                               TG3PCI_GEN2_PRODID_ASICREV,
13423                                               &prod_id_asic_rev);
13424                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13425                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13426                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13427                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13428                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13429                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13430                         pci_read_config_dword(tp->pdev,
13431                                               TG3PCI_GEN15_PRODID_ASICREV,
13432                                               &prod_id_asic_rev);
13433                 else
13434                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13435                                               &prod_id_asic_rev);
13436
13437                 tp->pci_chip_rev_id = prod_id_asic_rev;
13438         }
13439
13440         /* Wrong chip ID in 5752 A0. This code can be removed later
13441          * as A0 is not in production.
13442          */
13443         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13444                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13445
13446         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13447          * we need to disable memory and use config. cycles
13448          * only to access all registers. The 5702/03 chips
13449          * can mistakenly decode the special cycles from the
13450          * ICH chipsets as memory write cycles, causing corruption
13451          * of register and memory space. Only certain ICH bridges
13452          * will drive special cycles with non-zero data during the
13453          * address phase which can fall within the 5703's address
13454          * range. This is not an ICH bug as the PCI spec allows
13455          * non-zero address during special cycles. However, only
13456          * these ICH bridges are known to drive non-zero addresses
13457          * during special cycles.
13458          *
13459          * Since special cycles do not cross PCI bridges, we only
13460          * enable this workaround if the 5703 is on the secondary
13461          * bus of these ICH bridges.
13462          */
13463         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13464             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13465                 static struct tg3_dev_id {
13466                         u32     vendor;
13467                         u32     device;
13468                         u32     rev;
13469                 } ich_chipsets[] = {
13470                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13471                           PCI_ANY_ID },
13472                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13473                           PCI_ANY_ID },
13474                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13475                           0xa },
13476                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13477                           PCI_ANY_ID },
13478                         { },
13479                 };
13480                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13481                 struct pci_dev *bridge = NULL;
13482
13483                 while (pci_id->vendor != 0) {
13484                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13485                                                 bridge);
13486                         if (!bridge) {
13487                                 pci_id++;
13488                                 continue;
13489                         }
13490                         if (pci_id->rev != PCI_ANY_ID) {
13491                                 if (bridge->revision > pci_id->rev)
13492                                         continue;
13493                         }
13494                         if (bridge->subordinate &&
13495                             (bridge->subordinate->number ==
13496                              tp->pdev->bus->number)) {
13497                                 tg3_flag_set(tp, ICH_WORKAROUND);
13498                                 pci_dev_put(bridge);
13499                                 break;
13500                         }
13501                 }
13502         }
13503
13504         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13505                 static struct tg3_dev_id {
13506                         u32     vendor;
13507                         u32     device;
13508                 } bridge_chipsets[] = {
13509                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13510                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13511                         { },
13512                 };
13513                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13514                 struct pci_dev *bridge = NULL;
13515
13516                 while (pci_id->vendor != 0) {
13517                         bridge = pci_get_device(pci_id->vendor,
13518                                                 pci_id->device,
13519                                                 bridge);
13520                         if (!bridge) {
13521                                 pci_id++;
13522                                 continue;
13523                         }
13524                         if (bridge->subordinate &&
13525                             (bridge->subordinate->number <=
13526                              tp->pdev->bus->number) &&
13527                             (bridge->subordinate->subordinate >=
13528                              tp->pdev->bus->number)) {
13529                                 tg3_flag_set(tp, 5701_DMA_BUG);
13530                                 pci_dev_put(bridge);
13531                                 break;
13532                         }
13533                 }
13534         }
13535
13536         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13537          * DMA addresses > 40-bit. This bridge may have other additional
13538          * 57xx devices behind it in some 4-port NIC designs for example.
13539          * Any tg3 device found behind the bridge will also need the 40-bit
13540          * DMA workaround.
13541          */
13542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13543             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13544                 tg3_flag_set(tp, 5780_CLASS);
13545                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13546                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13547         } else {
13548                 struct pci_dev *bridge = NULL;
13549
13550                 do {
13551                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13552                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13553                                                 bridge);
13554                         if (bridge && bridge->subordinate &&
13555                             (bridge->subordinate->number <=
13556                              tp->pdev->bus->number) &&
13557                             (bridge->subordinate->subordinate >=
13558                              tp->pdev->bus->number)) {
13559                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13560                                 pci_dev_put(bridge);
13561                                 break;
13562                         }
13563                 } while (bridge);
13564         }
13565
13566         /* Initialize misc host control in PCI block. */
13567         tp->misc_host_ctrl |= (misc_ctrl_reg &
13568                                MISC_HOST_CTRL_CHIPREV);
13569         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13570                                tp->misc_host_ctrl);
13571
13572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13573             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13575             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13576                 tp->pdev_peer = tg3_find_peer(tp);
13577
13578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13580             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13581                 tg3_flag_set(tp, 5717_PLUS);
13582
13583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13584             tg3_flag(tp, 5717_PLUS))
13585                 tg3_flag_set(tp, 57765_PLUS);
13586
13587         /* Intentionally exclude ASIC_REV_5906 */
13588         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13589             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13590             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13592             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13594             tg3_flag(tp, 57765_PLUS))
13595                 tg3_flag_set(tp, 5755_PLUS);
13596
13597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13599             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13600             tg3_flag(tp, 5755_PLUS) ||
13601             tg3_flag(tp, 5780_CLASS))
13602                 tg3_flag_set(tp, 5750_PLUS);
13603
13604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13605             tg3_flag(tp, 5750_PLUS))
13606                 tg3_flag_set(tp, 5705_PLUS);
13607
13608         /* Determine TSO capabilities */
13609         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13610                 ; /* Do nothing. HW bug. */
13611         else if (tg3_flag(tp, 57765_PLUS))
13612                 tg3_flag_set(tp, HW_TSO_3);
13613         else if (tg3_flag(tp, 5755_PLUS) ||
13614                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13615                 tg3_flag_set(tp, HW_TSO_2);
13616         else if (tg3_flag(tp, 5750_PLUS)) {
13617                 tg3_flag_set(tp, HW_TSO_1);
13618                 tg3_flag_set(tp, TSO_BUG);
13619                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13620                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13621                         tg3_flag_clear(tp, TSO_BUG);
13622         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13623                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13624                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13625                         tg3_flag_set(tp, TSO_BUG);
13626                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13627                         tp->fw_needed = FIRMWARE_TG3TSO5;
13628                 else
13629                         tp->fw_needed = FIRMWARE_TG3TSO;
13630         }
13631
13632         /* Selectively allow TSO based on operating conditions */
13633         if (tg3_flag(tp, HW_TSO_1) ||
13634             tg3_flag(tp, HW_TSO_2) ||
13635             tg3_flag(tp, HW_TSO_3) ||
13636             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13637                 tg3_flag_set(tp, TSO_CAPABLE);
13638         else {
13639                 tg3_flag_clear(tp, TSO_CAPABLE);
13640                 tg3_flag_clear(tp, TSO_BUG);
13641                 tp->fw_needed = NULL;
13642         }
13643
13644         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13645                 tp->fw_needed = FIRMWARE_TG3;
13646
13647         tp->irq_max = 1;
13648
13649         if (tg3_flag(tp, 5750_PLUS)) {
13650                 tg3_flag_set(tp, SUPPORT_MSI);
13651                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13652                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13653                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13654                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13655                      tp->pdev_peer == tp->pdev))
13656                         tg3_flag_clear(tp, SUPPORT_MSI);
13657
13658                 if (tg3_flag(tp, 5755_PLUS) ||
13659                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13660                         tg3_flag_set(tp, 1SHOT_MSI);
13661                 }
13662
13663                 if (tg3_flag(tp, 57765_PLUS)) {
13664                         tg3_flag_set(tp, SUPPORT_MSIX);
13665                         tp->irq_max = TG3_IRQ_MAX_VECS;
13666                 }
13667         }
13668
13669         /* All chips can get confused if TX buffers
13670          * straddle the 4GB address boundary.
13671          */
13672         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13673
13674         if (tg3_flag(tp, 5755_PLUS))
13675                 tg3_flag_set(tp, SHORT_DMA_BUG);
13676         else
13677                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13678
13679         if (tg3_flag(tp, 5717_PLUS))
13680                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13681
13682         if (tg3_flag(tp, 57765_PLUS) &&
13683             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13684                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13685
13686         if (!tg3_flag(tp, 5705_PLUS) ||
13687             tg3_flag(tp, 5780_CLASS) ||
13688             tg3_flag(tp, USE_JUMBO_BDFLAG))
13689                 tg3_flag_set(tp, JUMBO_CAPABLE);
13690
13691         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13692                               &pci_state_reg);
13693
13694         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13695         if (tp->pcie_cap != 0) {
13696                 u16 lnkctl;
13697
13698                 tg3_flag_set(tp, PCI_EXPRESS);
13699
13700                 tp->pcie_readrq = 4096;
13701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13702                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13703                         tp->pcie_readrq = 2048;
13704
13705                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13706
13707                 pci_read_config_word(tp->pdev,
13708                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13709                                      &lnkctl);
13710                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13711                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13712                             ASIC_REV_5906) {
13713                                 tg3_flag_clear(tp, HW_TSO_2);
13714                                 tg3_flag_clear(tp, TSO_CAPABLE);
13715                         }
13716                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13717                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13718                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13719                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13720                                 tg3_flag_set(tp, CLKREQ_BUG);
13721                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13722                         tg3_flag_set(tp, L1PLLPD_EN);
13723                 }
13724         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13725                 tg3_flag_set(tp, PCI_EXPRESS);
13726         } else if (!tg3_flag(tp, 5705_PLUS) ||
13727                    tg3_flag(tp, 5780_CLASS)) {
13728                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13729                 if (!tp->pcix_cap) {
13730                         dev_err(&tp->pdev->dev,
13731                                 "Cannot find PCI-X capability, aborting\n");
13732                         return -EIO;
13733                 }
13734
13735                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13736                         tg3_flag_set(tp, PCIX_MODE);
13737         }
13738
13739         /* If we have an AMD 762 or VIA K8T800 chipset, write
13740          * reordering to the mailbox registers done by the host
13741          * controller can cause major troubles.  We read back from
13742          * every mailbox register write to force the writes to be
13743          * posted to the chip in order.
13744          */
13745         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13746             !tg3_flag(tp, PCI_EXPRESS))
13747                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13748
13749         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13750                              &tp->pci_cacheline_sz);
13751         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13752                              &tp->pci_lat_timer);
13753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13754             tp->pci_lat_timer < 64) {
13755                 tp->pci_lat_timer = 64;
13756                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13757                                       tp->pci_lat_timer);
13758         }
13759
13760         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13761                 /* 5700 BX chips need to have their TX producer index
13762                  * mailboxes written twice to workaround a bug.
13763                  */
13764                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13765
13766                 /* If we are in PCI-X mode, enable register write workaround.
13767                  *
13768                  * The workaround is to use indirect register accesses
13769                  * for all chip writes not to mailbox registers.
13770                  */
13771                 if (tg3_flag(tp, PCIX_MODE)) {
13772                         u32 pm_reg;
13773
13774                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13775
13776                         /* The chip can have it's power management PCI config
13777                          * space registers clobbered due to this bug.
13778                          * So explicitly force the chip into D0 here.
13779                          */
13780                         pci_read_config_dword(tp->pdev,
13781                                               tp->pm_cap + PCI_PM_CTRL,
13782                                               &pm_reg);
13783                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13784                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13785                         pci_write_config_dword(tp->pdev,
13786                                                tp->pm_cap + PCI_PM_CTRL,
13787                                                pm_reg);
13788
13789                         /* Also, force SERR#/PERR# in PCI command. */
13790                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13791                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13792                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13793                 }
13794         }
13795
13796         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13797                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13798         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13799                 tg3_flag_set(tp, PCI_32BIT);
13800
13801         /* Chip-specific fixup from Broadcom driver */
13802         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13803             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13804                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13805                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13806         }
13807
13808         /* Default fast path register access methods */
13809         tp->read32 = tg3_read32;
13810         tp->write32 = tg3_write32;
13811         tp->read32_mbox = tg3_read32;
13812         tp->write32_mbox = tg3_write32;
13813         tp->write32_tx_mbox = tg3_write32;
13814         tp->write32_rx_mbox = tg3_write32;
13815
13816         /* Various workaround register access methods */
13817         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13818                 tp->write32 = tg3_write_indirect_reg32;
13819         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13820                  (tg3_flag(tp, PCI_EXPRESS) &&
13821                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13822                 /*
13823                  * Back to back register writes can cause problems on these
13824                  * chips, the workaround is to read back all reg writes
13825                  * except those to mailbox regs.
13826                  *
13827                  * See tg3_write_indirect_reg32().
13828                  */
13829                 tp->write32 = tg3_write_flush_reg32;
13830         }
13831
13832         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13833                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13834                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13835                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13836         }
13837
13838         if (tg3_flag(tp, ICH_WORKAROUND)) {
13839                 tp->read32 = tg3_read_indirect_reg32;
13840                 tp->write32 = tg3_write_indirect_reg32;
13841                 tp->read32_mbox = tg3_read_indirect_mbox;
13842                 tp->write32_mbox = tg3_write_indirect_mbox;
13843                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13844                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13845
13846                 iounmap(tp->regs);
13847                 tp->regs = NULL;
13848
13849                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13850                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13851                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13852         }
13853         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13854                 tp->read32_mbox = tg3_read32_mbox_5906;
13855                 tp->write32_mbox = tg3_write32_mbox_5906;
13856                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13857                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13858         }
13859
13860         if (tp->write32 == tg3_write_indirect_reg32 ||
13861             (tg3_flag(tp, PCIX_MODE) &&
13862              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13863               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13864                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13865
13866         /* Get eeprom hw config before calling tg3_set_power_state().
13867          * In particular, the TG3_FLAG_IS_NIC flag must be
13868          * determined before calling tg3_set_power_state() so that
13869          * we know whether or not to switch out of Vaux power.
13870          * When the flag is set, it means that GPIO1 is used for eeprom
13871          * write protect and also implies that it is a LOM where GPIOs
13872          * are not used to switch power.
13873          */
13874         tg3_get_eeprom_hw_cfg(tp);
13875
13876         if (tg3_flag(tp, ENABLE_APE)) {
13877                 /* Allow reads and writes to the
13878                  * APE register and memory space.
13879                  */
13880                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13881                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13882                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13883                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13884                                        pci_state_reg);
13885         }
13886
13887         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13888             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13890             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13891             tg3_flag(tp, 57765_PLUS))
13892                 tg3_flag_set(tp, CPMU_PRESENT);
13893
13894         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13895          * GPIO1 driven high will bring 5700's external PHY out of reset.
13896          * It is also used as eeprom write protect on LOMs.
13897          */
13898         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13899         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13900             tg3_flag(tp, EEPROM_WRITE_PROT))
13901                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13902                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13903         /* Unused GPIO3 must be driven as output on 5752 because there
13904          * are no pull-up resistors on unused GPIO pins.
13905          */
13906         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13907                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13908
13909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13910             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13911             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13912                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13913
13914         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13915             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13916                 /* Turn off the debug UART. */
13917                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13918                 if (tg3_flag(tp, IS_NIC))
13919                         /* Keep VMain power. */
13920                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13921                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13922         }
13923
13924         /* Force the chip into D0. */
13925         err = tg3_power_up(tp);
13926         if (err) {
13927                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13928                 return err;
13929         }
13930
13931         /* Derive initial jumbo mode from MTU assigned in
13932          * ether_setup() via the alloc_etherdev() call
13933          */
13934         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13935                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13936
13937         /* Determine WakeOnLan speed to use. */
13938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13939             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13940             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13941             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13942                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13943         } else {
13944                 tg3_flag_set(tp, WOL_SPEED_100MB);
13945         }
13946
13947         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13948                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13949
13950         /* A few boards don't want Ethernet@WireSpeed phy feature */
13951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13952             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13953              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13954              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13955             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13956             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13957                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13958
13959         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13960             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13961                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13962         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13963                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13964
13965         if (tg3_flag(tp, 5705_PLUS) &&
13966             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13967             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13968             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13969             !tg3_flag(tp, 57765_PLUS)) {
13970                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13971                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13972                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13974                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13975                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13976                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13977                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13978                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13979                 } else
13980                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13981         }
13982
13983         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13984             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13985                 tp->phy_otp = tg3_read_otp_phycfg(tp);
13986                 if (tp->phy_otp == 0)
13987                         tp->phy_otp = TG3_OTP_DEFAULT;
13988         }
13989
13990         if (tg3_flag(tp, CPMU_PRESENT))
13991                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13992         else
13993                 tp->mi_mode = MAC_MI_MODE_BASE;
13994
13995         tp->coalesce_mode = 0;
13996         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13997             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13998                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13999
14000         /* Set these bits to enable statistics workaround. */
14001         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14002             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14003             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14004                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14005                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14006         }
14007
14008         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14010                 tg3_flag_set(tp, USE_PHYLIB);
14011
14012         err = tg3_mdio_init(tp);
14013         if (err)
14014                 return err;
14015
14016         /* Initialize data/descriptor byte/word swapping. */
14017         val = tr32(GRC_MODE);
14018         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14019                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14020                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14021                         GRC_MODE_B2HRX_ENABLE |
14022                         GRC_MODE_HTX2B_ENABLE |
14023                         GRC_MODE_HOST_STACKUP);
14024         else
14025                 val &= GRC_MODE_HOST_STACKUP;
14026
14027         tw32(GRC_MODE, val | tp->grc_mode);
14028
14029         tg3_switch_clocks(tp);
14030
14031         /* Clear this out for sanity. */
14032         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14033
14034         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14035                               &pci_state_reg);
14036         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14037             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14038                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14039
14040                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14041                     chiprevid == CHIPREV_ID_5701_B0 ||
14042                     chiprevid == CHIPREV_ID_5701_B2 ||
14043                     chiprevid == CHIPREV_ID_5701_B5) {
14044                         void __iomem *sram_base;
14045
14046                         /* Write some dummy words into the SRAM status block
14047                          * area, see if it reads back correctly.  If the return
14048                          * value is bad, force enable the PCIX workaround.
14049                          */
14050                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14051
14052                         writel(0x00000000, sram_base);
14053                         writel(0x00000000, sram_base + 4);
14054                         writel(0xffffffff, sram_base + 4);
14055                         if (readl(sram_base) != 0x00000000)
14056                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14057                 }
14058         }
14059
14060         udelay(50);
14061         tg3_nvram_init(tp);
14062
14063         grc_misc_cfg = tr32(GRC_MISC_CFG);
14064         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14065
14066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14067             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14068              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14069                 tg3_flag_set(tp, IS_5788);
14070
14071         if (!tg3_flag(tp, IS_5788) &&
14072             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14073                 tg3_flag_set(tp, TAGGED_STATUS);
14074         if (tg3_flag(tp, TAGGED_STATUS)) {
14075                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14076                                       HOSTCC_MODE_CLRTICK_TXBD);
14077
14078                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14079                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14080                                        tp->misc_host_ctrl);
14081         }
14082
14083         /* Preserve the APE MAC_MODE bits */
14084         if (tg3_flag(tp, ENABLE_APE))
14085                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14086         else
14087                 tp->mac_mode = TG3_DEF_MAC_MODE;
14088
14089         /* these are limited to 10/100 only */
14090         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14091              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14092             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14093              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14094              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14095               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14096               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14097             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14098              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14099               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14100               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14101             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14102             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14103             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14104             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14105                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14106
14107         err = tg3_phy_probe(tp);
14108         if (err) {
14109                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14110                 /* ... but do not return immediately ... */
14111                 tg3_mdio_fini(tp);
14112         }
14113
14114         tg3_read_vpd(tp);
14115         tg3_read_fw_ver(tp);
14116
14117         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14118                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14119         } else {
14120                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14121                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14122                 else
14123                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14124         }
14125
14126         /* 5700 {AX,BX} chips have a broken status block link
14127          * change bit implementation, so we must use the
14128          * status register in those cases.
14129          */
14130         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14131                 tg3_flag_set(tp, USE_LINKCHG_REG);
14132         else
14133                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14134
14135         /* The led_ctrl is set during tg3_phy_probe, here we might
14136          * have to force the link status polling mechanism based
14137          * upon subsystem IDs.
14138          */
14139         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14140             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14141             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14142                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14143                 tg3_flag_set(tp, USE_LINKCHG_REG);
14144         }
14145
14146         /* For all SERDES we poll the MAC status register. */
14147         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14148                 tg3_flag_set(tp, POLL_SERDES);
14149         else
14150                 tg3_flag_clear(tp, POLL_SERDES);
14151
14152         tp->rx_offset = NET_IP_ALIGN;
14153         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14155             tg3_flag(tp, PCIX_MODE)) {
14156                 tp->rx_offset = 0;
14157 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14158                 tp->rx_copy_thresh = ~(u16)0;
14159 #endif
14160         }
14161
14162         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14163         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14164         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14165
14166         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14167
14168         /* Increment the rx prod index on the rx std ring by at most
14169          * 8 for these chips to workaround hw errata.
14170          */
14171         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14172             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14174                 tp->rx_std_max_post = 8;
14175
14176         if (tg3_flag(tp, ASPM_WORKAROUND))
14177                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14178                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14179
14180         return err;
14181 }
14182
14183 #ifdef CONFIG_SPARC
14184 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14185 {
14186         struct net_device *dev = tp->dev;
14187         struct pci_dev *pdev = tp->pdev;
14188         struct device_node *dp = pci_device_to_OF_node(pdev);
14189         const unsigned char *addr;
14190         int len;
14191
14192         addr = of_get_property(dp, "local-mac-address", &len);
14193         if (addr && len == 6) {
14194                 memcpy(dev->dev_addr, addr, 6);
14195                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14196                 return 0;
14197         }
14198         return -ENODEV;
14199 }
14200
14201 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14202 {
14203         struct net_device *dev = tp->dev;
14204
14205         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14206         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14207         return 0;
14208 }
14209 #endif
14210
14211 static int __devinit tg3_get_device_address(struct tg3 *tp)
14212 {
14213         struct net_device *dev = tp->dev;
14214         u32 hi, lo, mac_offset;
14215         int addr_ok = 0;
14216
14217 #ifdef CONFIG_SPARC
14218         if (!tg3_get_macaddr_sparc(tp))
14219                 return 0;
14220 #endif
14221
14222         mac_offset = 0x7c;
14223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14224             tg3_flag(tp, 5780_CLASS)) {
14225                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14226                         mac_offset = 0xcc;
14227                 if (tg3_nvram_lock(tp))
14228                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14229                 else
14230                         tg3_nvram_unlock(tp);
14231         } else if (tg3_flag(tp, 5717_PLUS)) {
14232                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14233                         mac_offset = 0xcc;
14234                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14235                         mac_offset += 0x18c;
14236         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14237                 mac_offset = 0x10;
14238
14239         /* First try to get it from MAC address mailbox. */
14240         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14241         if ((hi >> 16) == 0x484b) {
14242                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14243                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14244
14245                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14246                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14247                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14248                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14249                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14250
14251                 /* Some old bootcode may report a 0 MAC address in SRAM */
14252                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14253         }
14254         if (!addr_ok) {
14255                 /* Next, try NVRAM. */
14256                 if (!tg3_flag(tp, NO_NVRAM) &&
14257                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14258                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14259                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14260                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14261                 }
14262                 /* Finally just fetch it out of the MAC control regs. */
14263                 else {
14264                         hi = tr32(MAC_ADDR_0_HIGH);
14265                         lo = tr32(MAC_ADDR_0_LOW);
14266
14267                         dev->dev_addr[5] = lo & 0xff;
14268                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14269                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14270                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14271                         dev->dev_addr[1] = hi & 0xff;
14272                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14273                 }
14274         }
14275
14276         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14277 #ifdef CONFIG_SPARC
14278                 if (!tg3_get_default_macaddr_sparc(tp))
14279                         return 0;
14280 #endif
14281                 return -EINVAL;
14282         }
14283         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14284         return 0;
14285 }
14286
14287 #define BOUNDARY_SINGLE_CACHELINE       1
14288 #define BOUNDARY_MULTI_CACHELINE        2
14289
14290 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14291 {
14292         int cacheline_size;
14293         u8 byte;
14294         int goal;
14295
14296         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14297         if (byte == 0)
14298                 cacheline_size = 1024;
14299         else
14300                 cacheline_size = (int) byte * 4;
14301
14302         /* On 5703 and later chips, the boundary bits have no
14303          * effect.
14304          */
14305         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14306             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14307             !tg3_flag(tp, PCI_EXPRESS))
14308                 goto out;
14309
14310 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14311         goal = BOUNDARY_MULTI_CACHELINE;
14312 #else
14313 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14314         goal = BOUNDARY_SINGLE_CACHELINE;
14315 #else
14316         goal = 0;
14317 #endif
14318 #endif
14319
14320         if (tg3_flag(tp, 57765_PLUS)) {
14321                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14322                 goto out;
14323         }
14324
14325         if (!goal)
14326                 goto out;
14327
14328         /* PCI controllers on most RISC systems tend to disconnect
14329          * when a device tries to burst across a cache-line boundary.
14330          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14331          *
14332          * Unfortunately, for PCI-E there are only limited
14333          * write-side controls for this, and thus for reads
14334          * we will still get the disconnects.  We'll also waste
14335          * these PCI cycles for both read and write for chips
14336          * other than 5700 and 5701 which do not implement the
14337          * boundary bits.
14338          */
14339         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14340                 switch (cacheline_size) {
14341                 case 16:
14342                 case 32:
14343                 case 64:
14344                 case 128:
14345                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14346                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14347                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14348                         } else {
14349                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14350                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14351                         }
14352                         break;
14353
14354                 case 256:
14355                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14356                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14357                         break;
14358
14359                 default:
14360                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14361                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14362                         break;
14363                 }
14364         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14365                 switch (cacheline_size) {
14366                 case 16:
14367                 case 32:
14368                 case 64:
14369                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14370                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14371                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14372                                 break;
14373                         }
14374                         /* fallthrough */
14375                 case 128:
14376                 default:
14377                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14378                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14379                         break;
14380                 }
14381         } else {
14382                 switch (cacheline_size) {
14383                 case 16:
14384                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14385                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14386                                         DMA_RWCTRL_WRITE_BNDRY_16);
14387                                 break;
14388                         }
14389                         /* fallthrough */
14390                 case 32:
14391                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14392                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14393                                         DMA_RWCTRL_WRITE_BNDRY_32);
14394                                 break;
14395                         }
14396                         /* fallthrough */
14397                 case 64:
14398                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14399                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14400                                         DMA_RWCTRL_WRITE_BNDRY_64);
14401                                 break;
14402                         }
14403                         /* fallthrough */
14404                 case 128:
14405                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14406                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14407                                         DMA_RWCTRL_WRITE_BNDRY_128);
14408                                 break;
14409                         }
14410                         /* fallthrough */
14411                 case 256:
14412                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14413                                 DMA_RWCTRL_WRITE_BNDRY_256);
14414                         break;
14415                 case 512:
14416                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14417                                 DMA_RWCTRL_WRITE_BNDRY_512);
14418                         break;
14419                 case 1024:
14420                 default:
14421                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14422                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14423                         break;
14424                 }
14425         }
14426
14427 out:
14428         return val;
14429 }
14430
14431 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14432 {
14433         struct tg3_internal_buffer_desc test_desc;
14434         u32 sram_dma_descs;
14435         int i, ret;
14436
14437         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14438
14439         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14440         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14441         tw32(RDMAC_STATUS, 0);
14442         tw32(WDMAC_STATUS, 0);
14443
14444         tw32(BUFMGR_MODE, 0);
14445         tw32(FTQ_RESET, 0);
14446
14447         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14448         test_desc.addr_lo = buf_dma & 0xffffffff;
14449         test_desc.nic_mbuf = 0x00002100;
14450         test_desc.len = size;
14451
14452         /*
14453          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14454          * the *second* time the tg3 driver was getting loaded after an
14455          * initial scan.
14456          *
14457          * Broadcom tells me:
14458          *   ...the DMA engine is connected to the GRC block and a DMA
14459          *   reset may affect the GRC block in some unpredictable way...
14460          *   The behavior of resets to individual blocks has not been tested.
14461          *
14462          * Broadcom noted the GRC reset will also reset all sub-components.
14463          */
14464         if (to_device) {
14465                 test_desc.cqid_sqid = (13 << 8) | 2;
14466
14467                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14468                 udelay(40);
14469         } else {
14470                 test_desc.cqid_sqid = (16 << 8) | 7;
14471
14472                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14473                 udelay(40);
14474         }
14475         test_desc.flags = 0x00000005;
14476
14477         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14478                 u32 val;
14479
14480                 val = *(((u32 *)&test_desc) + i);
14481                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14482                                        sram_dma_descs + (i * sizeof(u32)));
14483                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14484         }
14485         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14486
14487         if (to_device)
14488                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14489         else
14490                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14491
14492         ret = -ENODEV;
14493         for (i = 0; i < 40; i++) {
14494                 u32 val;
14495
14496                 if (to_device)
14497                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14498                 else
14499                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14500                 if ((val & 0xffff) == sram_dma_descs) {
14501                         ret = 0;
14502                         break;
14503                 }
14504
14505                 udelay(100);
14506         }
14507
14508         return ret;
14509 }
14510
14511 #define TEST_BUFFER_SIZE        0x2000
14512
14513 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14514         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14515         { },
14516 };
14517
14518 static int __devinit tg3_test_dma(struct tg3 *tp)
14519 {
14520         dma_addr_t buf_dma;
14521         u32 *buf, saved_dma_rwctrl;
14522         int ret = 0;
14523
14524         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14525                                  &buf_dma, GFP_KERNEL);
14526         if (!buf) {
14527                 ret = -ENOMEM;
14528                 goto out_nofree;
14529         }
14530
14531         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14532                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14533
14534         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14535
14536         if (tg3_flag(tp, 57765_PLUS))
14537                 goto out;
14538
14539         if (tg3_flag(tp, PCI_EXPRESS)) {
14540                 /* DMA read watermark not used on PCIE */
14541                 tp->dma_rwctrl |= 0x00180000;
14542         } else if (!tg3_flag(tp, PCIX_MODE)) {
14543                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14544                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14545                         tp->dma_rwctrl |= 0x003f0000;
14546                 else
14547                         tp->dma_rwctrl |= 0x003f000f;
14548         } else {
14549                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14550                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14551                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14552                         u32 read_water = 0x7;
14553
14554                         /* If the 5704 is behind the EPB bridge, we can
14555                          * do the less restrictive ONE_DMA workaround for
14556                          * better performance.
14557                          */
14558                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14559                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14560                                 tp->dma_rwctrl |= 0x8000;
14561                         else if (ccval == 0x6 || ccval == 0x7)
14562                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14563
14564                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14565                                 read_water = 4;
14566                         /* Set bit 23 to enable PCIX hw bug fix */
14567                         tp->dma_rwctrl |=
14568                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14569                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14570                                 (1 << 23);
14571                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14572                         /* 5780 always in PCIX mode */
14573                         tp->dma_rwctrl |= 0x00144000;
14574                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14575                         /* 5714 always in PCIX mode */
14576                         tp->dma_rwctrl |= 0x00148000;
14577                 } else {
14578                         tp->dma_rwctrl |= 0x001b000f;
14579                 }
14580         }
14581
14582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14583             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14584                 tp->dma_rwctrl &= 0xfffffff0;
14585
14586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14588                 /* Remove this if it causes problems for some boards. */
14589                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14590
14591                 /* On 5700/5701 chips, we need to set this bit.
14592                  * Otherwise the chip will issue cacheline transactions
14593                  * to streamable DMA memory with not all the byte
14594                  * enables turned on.  This is an error on several
14595                  * RISC PCI controllers, in particular sparc64.
14596                  *
14597                  * On 5703/5704 chips, this bit has been reassigned
14598                  * a different meaning.  In particular, it is used
14599                  * on those chips to enable a PCI-X workaround.
14600                  */
14601                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14602         }
14603
14604         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14605
14606 #if 0
14607         /* Unneeded, already done by tg3_get_invariants.  */
14608         tg3_switch_clocks(tp);
14609 #endif
14610
14611         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14612             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14613                 goto out;
14614
14615         /* It is best to perform DMA test with maximum write burst size
14616          * to expose the 5700/5701 write DMA bug.
14617          */
14618         saved_dma_rwctrl = tp->dma_rwctrl;
14619         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14620         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14621
14622         while (1) {
14623                 u32 *p = buf, i;
14624
14625                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14626                         p[i] = i;
14627
14628                 /* Send the buffer to the chip. */
14629                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14630                 if (ret) {
14631                         dev_err(&tp->pdev->dev,
14632                                 "%s: Buffer write failed. err = %d\n",
14633                                 __func__, ret);
14634                         break;
14635                 }
14636
14637 #if 0
14638                 /* validate data reached card RAM correctly. */
14639                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14640                         u32 val;
14641                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14642                         if (le32_to_cpu(val) != p[i]) {
14643                                 dev_err(&tp->pdev->dev,
14644                                         "%s: Buffer corrupted on device! "
14645                                         "(%d != %d)\n", __func__, val, i);
14646                                 /* ret = -ENODEV here? */
14647                         }
14648                         p[i] = 0;
14649                 }
14650 #endif
14651                 /* Now read it back. */
14652                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14653                 if (ret) {
14654                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14655                                 "err = %d\n", __func__, ret);
14656                         break;
14657                 }
14658
14659                 /* Verify it. */
14660                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14661                         if (p[i] == i)
14662                                 continue;
14663
14664                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14665                             DMA_RWCTRL_WRITE_BNDRY_16) {
14666                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14667                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14668                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14669                                 break;
14670                         } else {
14671                                 dev_err(&tp->pdev->dev,
14672                                         "%s: Buffer corrupted on read back! "
14673                                         "(%d != %d)\n", __func__, p[i], i);
14674                                 ret = -ENODEV;
14675                                 goto out;
14676                         }
14677                 }
14678
14679                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14680                         /* Success. */
14681                         ret = 0;
14682                         break;
14683                 }
14684         }
14685         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14686             DMA_RWCTRL_WRITE_BNDRY_16) {
14687                 /* DMA test passed without adjusting DMA boundary,
14688                  * now look for chipsets that are known to expose the
14689                  * DMA bug without failing the test.
14690                  */
14691                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14692                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14693                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14694                 } else {
14695                         /* Safe to use the calculated DMA boundary. */
14696                         tp->dma_rwctrl = saved_dma_rwctrl;
14697                 }
14698
14699                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14700         }
14701
14702 out:
14703         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14704 out_nofree:
14705         return ret;
14706 }
14707
14708 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14709 {
14710         if (tg3_flag(tp, 57765_PLUS)) {
14711                 tp->bufmgr_config.mbuf_read_dma_low_water =
14712                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14713                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14714                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14715                 tp->bufmgr_config.mbuf_high_water =
14716                         DEFAULT_MB_HIGH_WATER_57765;
14717
14718                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14719                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14720                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14721                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14722                 tp->bufmgr_config.mbuf_high_water_jumbo =
14723                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14724         } else if (tg3_flag(tp, 5705_PLUS)) {
14725                 tp->bufmgr_config.mbuf_read_dma_low_water =
14726                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14727                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14728                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14729                 tp->bufmgr_config.mbuf_high_water =
14730                         DEFAULT_MB_HIGH_WATER_5705;
14731                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14732                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14733                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14734                         tp->bufmgr_config.mbuf_high_water =
14735                                 DEFAULT_MB_HIGH_WATER_5906;
14736                 }
14737
14738                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14739                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14740                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14741                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14742                 tp->bufmgr_config.mbuf_high_water_jumbo =
14743                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14744         } else {
14745                 tp->bufmgr_config.mbuf_read_dma_low_water =
14746                         DEFAULT_MB_RDMA_LOW_WATER;
14747                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14748                         DEFAULT_MB_MACRX_LOW_WATER;
14749                 tp->bufmgr_config.mbuf_high_water =
14750                         DEFAULT_MB_HIGH_WATER;
14751
14752                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14753                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14754                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14755                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14756                 tp->bufmgr_config.mbuf_high_water_jumbo =
14757                         DEFAULT_MB_HIGH_WATER_JUMBO;
14758         }
14759
14760         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14761         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14762 }
14763
14764 static char * __devinit tg3_phy_string(struct tg3 *tp)
14765 {
14766         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14767         case TG3_PHY_ID_BCM5400:        return "5400";
14768         case TG3_PHY_ID_BCM5401:        return "5401";
14769         case TG3_PHY_ID_BCM5411:        return "5411";
14770         case TG3_PHY_ID_BCM5701:        return "5701";
14771         case TG3_PHY_ID_BCM5703:        return "5703";
14772         case TG3_PHY_ID_BCM5704:        return "5704";
14773         case TG3_PHY_ID_BCM5705:        return "5705";
14774         case TG3_PHY_ID_BCM5750:        return "5750";
14775         case TG3_PHY_ID_BCM5752:        return "5752";
14776         case TG3_PHY_ID_BCM5714:        return "5714";
14777         case TG3_PHY_ID_BCM5780:        return "5780";
14778         case TG3_PHY_ID_BCM5755:        return "5755";
14779         case TG3_PHY_ID_BCM5787:        return "5787";
14780         case TG3_PHY_ID_BCM5784:        return "5784";
14781         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14782         case TG3_PHY_ID_BCM5906:        return "5906";
14783         case TG3_PHY_ID_BCM5761:        return "5761";
14784         case TG3_PHY_ID_BCM5718C:       return "5718C";
14785         case TG3_PHY_ID_BCM5718S:       return "5718S";
14786         case TG3_PHY_ID_BCM57765:       return "57765";
14787         case TG3_PHY_ID_BCM5719C:       return "5719C";
14788         case TG3_PHY_ID_BCM5720C:       return "5720C";
14789         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14790         case 0:                 return "serdes";
14791         default:                return "unknown";
14792         }
14793 }
14794
14795 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14796 {
14797         if (tg3_flag(tp, PCI_EXPRESS)) {
14798                 strcpy(str, "PCI Express");
14799                 return str;
14800         } else if (tg3_flag(tp, PCIX_MODE)) {
14801                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14802
14803                 strcpy(str, "PCIX:");
14804
14805                 if ((clock_ctrl == 7) ||
14806                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14807                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14808                         strcat(str, "133MHz");
14809                 else if (clock_ctrl == 0)
14810                         strcat(str, "33MHz");
14811                 else if (clock_ctrl == 2)
14812                         strcat(str, "50MHz");
14813                 else if (clock_ctrl == 4)
14814                         strcat(str, "66MHz");
14815                 else if (clock_ctrl == 6)
14816                         strcat(str, "100MHz");
14817         } else {
14818                 strcpy(str, "PCI:");
14819                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14820                         strcat(str, "66MHz");
14821                 else
14822                         strcat(str, "33MHz");
14823         }
14824         if (tg3_flag(tp, PCI_32BIT))
14825                 strcat(str, ":32-bit");
14826         else
14827                 strcat(str, ":64-bit");
14828         return str;
14829 }
14830
14831 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14832 {
14833         struct pci_dev *peer;
14834         unsigned int func, devnr = tp->pdev->devfn & ~7;
14835
14836         for (func = 0; func < 8; func++) {
14837                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14838                 if (peer && peer != tp->pdev)
14839                         break;
14840                 pci_dev_put(peer);
14841         }
14842         /* 5704 can be configured in single-port mode, set peer to
14843          * tp->pdev in that case.
14844          */
14845         if (!peer) {
14846                 peer = tp->pdev;
14847                 return peer;
14848         }
14849
14850         /*
14851          * We don't need to keep the refcount elevated; there's no way
14852          * to remove one half of this device without removing the other
14853          */
14854         pci_dev_put(peer);
14855
14856         return peer;
14857 }
14858
14859 static void __devinit tg3_init_coal(struct tg3 *tp)
14860 {
14861         struct ethtool_coalesce *ec = &tp->coal;
14862
14863         memset(ec, 0, sizeof(*ec));
14864         ec->cmd = ETHTOOL_GCOALESCE;
14865         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14866         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14867         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14868         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14869         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14870         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14871         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14872         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14873         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14874
14875         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14876                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14877                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14878                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14879                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14880                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14881         }
14882
14883         if (tg3_flag(tp, 5705_PLUS)) {
14884                 ec->rx_coalesce_usecs_irq = 0;
14885                 ec->tx_coalesce_usecs_irq = 0;
14886                 ec->stats_block_coalesce_usecs = 0;
14887         }
14888 }
14889
14890 static const struct net_device_ops tg3_netdev_ops = {
14891         .ndo_open               = tg3_open,
14892         .ndo_stop               = tg3_close,
14893         .ndo_start_xmit         = tg3_start_xmit,
14894         .ndo_get_stats64        = tg3_get_stats64,
14895         .ndo_validate_addr      = eth_validate_addr,
14896         .ndo_set_multicast_list = tg3_set_rx_mode,
14897         .ndo_set_mac_address    = tg3_set_mac_addr,
14898         .ndo_do_ioctl           = tg3_ioctl,
14899         .ndo_tx_timeout         = tg3_tx_timeout,
14900         .ndo_change_mtu         = tg3_change_mtu,
14901         .ndo_fix_features       = tg3_fix_features,
14902         .ndo_set_features       = tg3_set_features,
14903 #ifdef CONFIG_NET_POLL_CONTROLLER
14904         .ndo_poll_controller    = tg3_poll_controller,
14905 #endif
14906 };
14907
14908 static int __devinit tg3_init_one(struct pci_dev *pdev,
14909                                   const struct pci_device_id *ent)
14910 {
14911         struct net_device *dev;
14912         struct tg3 *tp;
14913         int i, err, pm_cap;
14914         u32 sndmbx, rcvmbx, intmbx;
14915         char str[40];
14916         u64 dma_mask, persist_dma_mask;
14917         u32 features = 0;
14918
14919         printk_once(KERN_INFO "%s\n", version);
14920
14921         err = pci_enable_device(pdev);
14922         if (err) {
14923                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14924                 return err;
14925         }
14926
14927         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14928         if (err) {
14929                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14930                 goto err_out_disable_pdev;
14931         }
14932
14933         pci_set_master(pdev);
14934
14935         /* Find power-management capability. */
14936         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14937         if (pm_cap == 0) {
14938                 dev_err(&pdev->dev,
14939                         "Cannot find Power Management capability, aborting\n");
14940                 err = -EIO;
14941                 goto err_out_free_res;
14942         }
14943
14944         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14945         if (!dev) {
14946                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14947                 err = -ENOMEM;
14948                 goto err_out_free_res;
14949         }
14950
14951         SET_NETDEV_DEV(dev, &pdev->dev);
14952
14953         tp = netdev_priv(dev);
14954         tp->pdev = pdev;
14955         tp->dev = dev;
14956         tp->pm_cap = pm_cap;
14957         tp->rx_mode = TG3_DEF_RX_MODE;
14958         tp->tx_mode = TG3_DEF_TX_MODE;
14959
14960         if (tg3_debug > 0)
14961                 tp->msg_enable = tg3_debug;
14962         else
14963                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14964
14965         /* The word/byte swap controls here control register access byte
14966          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14967          * setting below.
14968          */
14969         tp->misc_host_ctrl =
14970                 MISC_HOST_CTRL_MASK_PCI_INT |
14971                 MISC_HOST_CTRL_WORD_SWAP |
14972                 MISC_HOST_CTRL_INDIR_ACCESS |
14973                 MISC_HOST_CTRL_PCISTATE_RW;
14974
14975         /* The NONFRM (non-frame) byte/word swap controls take effect
14976          * on descriptor entries, anything which isn't packet data.
14977          *
14978          * The StrongARM chips on the board (one for tx, one for rx)
14979          * are running in big-endian mode.
14980          */
14981         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14982                         GRC_MODE_WSWAP_NONFRM_DATA);
14983 #ifdef __BIG_ENDIAN
14984         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14985 #endif
14986         spin_lock_init(&tp->lock);
14987         spin_lock_init(&tp->indirect_lock);
14988         INIT_WORK(&tp->reset_task, tg3_reset_task);
14989
14990         tp->regs = pci_ioremap_bar(pdev, BAR_0);
14991         if (!tp->regs) {
14992                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14993                 err = -ENOMEM;
14994                 goto err_out_free_dev;
14995         }
14996
14997         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14998         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14999
15000         dev->ethtool_ops = &tg3_ethtool_ops;
15001         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15002         dev->netdev_ops = &tg3_netdev_ops;
15003         dev->irq = pdev->irq;
15004
15005         err = tg3_get_invariants(tp);
15006         if (err) {
15007                 dev_err(&pdev->dev,
15008                         "Problem fetching invariants of chip, aborting\n");
15009                 goto err_out_iounmap;
15010         }
15011
15012         /* The EPB bridge inside 5714, 5715, and 5780 and any
15013          * device behind the EPB cannot support DMA addresses > 40-bit.
15014          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15015          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15016          * do DMA address check in tg3_start_xmit().
15017          */
15018         if (tg3_flag(tp, IS_5788))
15019                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15020         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15021                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15022 #ifdef CONFIG_HIGHMEM
15023                 dma_mask = DMA_BIT_MASK(64);
15024 #endif
15025         } else
15026                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15027
15028         /* Configure DMA attributes. */
15029         if (dma_mask > DMA_BIT_MASK(32)) {
15030                 err = pci_set_dma_mask(pdev, dma_mask);
15031                 if (!err) {
15032                         features |= NETIF_F_HIGHDMA;
15033                         err = pci_set_consistent_dma_mask(pdev,
15034                                                           persist_dma_mask);
15035                         if (err < 0) {
15036                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15037                                         "DMA for consistent allocations\n");
15038                                 goto err_out_iounmap;
15039                         }
15040                 }
15041         }
15042         if (err || dma_mask == DMA_BIT_MASK(32)) {
15043                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15044                 if (err) {
15045                         dev_err(&pdev->dev,
15046                                 "No usable DMA configuration, aborting\n");
15047                         goto err_out_iounmap;
15048                 }
15049         }
15050
15051         tg3_init_bufmgr_config(tp);
15052
15053         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15054
15055         /* 5700 B0 chips do not support checksumming correctly due
15056          * to hardware bugs.
15057          */
15058         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15059                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15060
15061                 if (tg3_flag(tp, 5755_PLUS))
15062                         features |= NETIF_F_IPV6_CSUM;
15063         }
15064
15065         /* TSO is on by default on chips that support hardware TSO.
15066          * Firmware TSO on older chips gives lower performance, so it
15067          * is off by default, but can be enabled using ethtool.
15068          */
15069         if ((tg3_flag(tp, HW_TSO_1) ||
15070              tg3_flag(tp, HW_TSO_2) ||
15071              tg3_flag(tp, HW_TSO_3)) &&
15072             (features & NETIF_F_IP_CSUM))
15073                 features |= NETIF_F_TSO;
15074         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15075                 if (features & NETIF_F_IPV6_CSUM)
15076                         features |= NETIF_F_TSO6;
15077                 if (tg3_flag(tp, HW_TSO_3) ||
15078                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15079                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15080                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15081                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15082                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15083                         features |= NETIF_F_TSO_ECN;
15084         }
15085
15086         dev->features |= features;
15087         dev->vlan_features |= features;
15088
15089         /*
15090          * Add loopback capability only for a subset of devices that support
15091          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15092          * loopback for the remaining devices.
15093          */
15094         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15095             !tg3_flag(tp, CPMU_PRESENT))
15096                 /* Add the loopback capability */
15097                 features |= NETIF_F_LOOPBACK;
15098
15099         dev->hw_features |= features;
15100
15101         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15102             !tg3_flag(tp, TSO_CAPABLE) &&
15103             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15104                 tg3_flag_set(tp, MAX_RXPEND_64);
15105                 tp->rx_pending = 63;
15106         }
15107
15108         err = tg3_get_device_address(tp);
15109         if (err) {
15110                 dev_err(&pdev->dev,
15111                         "Could not obtain valid ethernet address, aborting\n");
15112                 goto err_out_iounmap;
15113         }
15114
15115         if (tg3_flag(tp, ENABLE_APE)) {
15116                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15117                 if (!tp->aperegs) {
15118                         dev_err(&pdev->dev,
15119                                 "Cannot map APE registers, aborting\n");
15120                         err = -ENOMEM;
15121                         goto err_out_iounmap;
15122                 }
15123
15124                 tg3_ape_lock_init(tp);
15125
15126                 if (tg3_flag(tp, ENABLE_ASF))
15127                         tg3_read_dash_ver(tp);
15128         }
15129
15130         /*
15131          * Reset chip in case UNDI or EFI driver did not shutdown
15132          * DMA self test will enable WDMAC and we'll see (spurious)
15133          * pending DMA on the PCI bus at that point.
15134          */
15135         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15136             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15137                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15138                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15139         }
15140
15141         err = tg3_test_dma(tp);
15142         if (err) {
15143                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15144                 goto err_out_apeunmap;
15145         }
15146
15147         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15148         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15149         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15150         for (i = 0; i < tp->irq_max; i++) {
15151                 struct tg3_napi *tnapi = &tp->napi[i];
15152
15153                 tnapi->tp = tp;
15154                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15155
15156                 tnapi->int_mbox = intmbx;
15157                 if (i < 4)
15158                         intmbx += 0x8;
15159                 else
15160                         intmbx += 0x4;
15161
15162                 tnapi->consmbox = rcvmbx;
15163                 tnapi->prodmbox = sndmbx;
15164
15165                 if (i)
15166                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15167                 else
15168                         tnapi->coal_now = HOSTCC_MODE_NOW;
15169
15170                 if (!tg3_flag(tp, SUPPORT_MSIX))
15171                         break;
15172
15173                 /*
15174                  * If we support MSIX, we'll be using RSS.  If we're using
15175                  * RSS, the first vector only handles link interrupts and the
15176                  * remaining vectors handle rx and tx interrupts.  Reuse the
15177                  * mailbox values for the next iteration.  The values we setup
15178                  * above are still useful for the single vectored mode.
15179                  */
15180                 if (!i)
15181                         continue;
15182
15183                 rcvmbx += 0x8;
15184
15185                 if (sndmbx & 0x4)
15186                         sndmbx -= 0x4;
15187                 else
15188                         sndmbx += 0xc;
15189         }
15190
15191         tg3_init_coal(tp);
15192
15193         pci_set_drvdata(pdev, dev);
15194
15195         err = register_netdev(dev);
15196         if (err) {
15197                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15198                 goto err_out_apeunmap;
15199         }
15200
15201         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15202                     tp->board_part_number,
15203                     tp->pci_chip_rev_id,
15204                     tg3_bus_string(tp, str),
15205                     dev->dev_addr);
15206
15207         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15208                 struct phy_device *phydev;
15209                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15210                 netdev_info(dev,
15211                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15212                             phydev->drv->name, dev_name(&phydev->dev));
15213         } else {
15214                 char *ethtype;
15215
15216                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15217                         ethtype = "10/100Base-TX";
15218                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15219                         ethtype = "1000Base-SX";
15220                 else
15221                         ethtype = "10/100/1000Base-T";
15222
15223                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15224                             "(WireSpeed[%d], EEE[%d])\n",
15225                             tg3_phy_string(tp), ethtype,
15226                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15227                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15228         }
15229
15230         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15231                     (dev->features & NETIF_F_RXCSUM) != 0,
15232                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15233                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15234                     tg3_flag(tp, ENABLE_ASF) != 0,
15235                     tg3_flag(tp, TSO_CAPABLE) != 0);
15236         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15237                     tp->dma_rwctrl,
15238                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15239                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15240
15241         pci_save_state(pdev);
15242
15243         return 0;
15244
15245 err_out_apeunmap:
15246         if (tp->aperegs) {
15247                 iounmap(tp->aperegs);
15248                 tp->aperegs = NULL;
15249         }
15250
15251 err_out_iounmap:
15252         if (tp->regs) {
15253                 iounmap(tp->regs);
15254                 tp->regs = NULL;
15255         }
15256
15257 err_out_free_dev:
15258         free_netdev(dev);
15259
15260 err_out_free_res:
15261         pci_release_regions(pdev);
15262
15263 err_out_disable_pdev:
15264         pci_disable_device(pdev);
15265         pci_set_drvdata(pdev, NULL);
15266         return err;
15267 }
15268
15269 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15270 {
15271         struct net_device *dev = pci_get_drvdata(pdev);
15272
15273         if (dev) {
15274                 struct tg3 *tp = netdev_priv(dev);
15275
15276                 if (tp->fw)
15277                         release_firmware(tp->fw);
15278
15279                 cancel_work_sync(&tp->reset_task);
15280
15281                 if (!tg3_flag(tp, USE_PHYLIB)) {
15282                         tg3_phy_fini(tp);
15283                         tg3_mdio_fini(tp);
15284                 }
15285
15286                 unregister_netdev(dev);
15287                 if (tp->aperegs) {
15288                         iounmap(tp->aperegs);
15289                         tp->aperegs = NULL;
15290                 }
15291                 if (tp->regs) {
15292                         iounmap(tp->regs);
15293                         tp->regs = NULL;
15294                 }
15295                 free_netdev(dev);
15296                 pci_release_regions(pdev);
15297                 pci_disable_device(pdev);
15298                 pci_set_drvdata(pdev, NULL);
15299         }
15300 }
15301
15302 #ifdef CONFIG_PM_SLEEP
15303 static int tg3_suspend(struct device *device)
15304 {
15305         struct pci_dev *pdev = to_pci_dev(device);
15306         struct net_device *dev = pci_get_drvdata(pdev);
15307         struct tg3 *tp = netdev_priv(dev);
15308         int err;
15309
15310         if (!netif_running(dev))
15311                 return 0;
15312
15313         flush_work_sync(&tp->reset_task);
15314         tg3_phy_stop(tp);
15315         tg3_netif_stop(tp);
15316
15317         del_timer_sync(&tp->timer);
15318
15319         tg3_full_lock(tp, 1);
15320         tg3_disable_ints(tp);
15321         tg3_full_unlock(tp);
15322
15323         netif_device_detach(dev);
15324
15325         tg3_full_lock(tp, 0);
15326         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15327         tg3_flag_clear(tp, INIT_COMPLETE);
15328         tg3_full_unlock(tp);
15329
15330         err = tg3_power_down_prepare(tp);
15331         if (err) {
15332                 int err2;
15333
15334                 tg3_full_lock(tp, 0);
15335
15336                 tg3_flag_set(tp, INIT_COMPLETE);
15337                 err2 = tg3_restart_hw(tp, 1);
15338                 if (err2)
15339                         goto out;
15340
15341                 tp->timer.expires = jiffies + tp->timer_offset;
15342                 add_timer(&tp->timer);
15343
15344                 netif_device_attach(dev);
15345                 tg3_netif_start(tp);
15346
15347 out:
15348                 tg3_full_unlock(tp);
15349
15350                 if (!err2)
15351                         tg3_phy_start(tp);
15352         }
15353
15354         return err;
15355 }
15356
15357 static int tg3_resume(struct device *device)
15358 {
15359         struct pci_dev *pdev = to_pci_dev(device);
15360         struct net_device *dev = pci_get_drvdata(pdev);
15361         struct tg3 *tp = netdev_priv(dev);
15362         int err;
15363
15364         if (!netif_running(dev))
15365                 return 0;
15366
15367         netif_device_attach(dev);
15368
15369         tg3_full_lock(tp, 0);
15370
15371         tg3_flag_set(tp, INIT_COMPLETE);
15372         err = tg3_restart_hw(tp, 1);
15373         if (err)
15374                 goto out;
15375
15376         tp->timer.expires = jiffies + tp->timer_offset;
15377         add_timer(&tp->timer);
15378
15379         tg3_netif_start(tp);
15380
15381 out:
15382         tg3_full_unlock(tp);
15383
15384         if (!err)
15385                 tg3_phy_start(tp);
15386
15387         return err;
15388 }
15389
15390 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15391 #define TG3_PM_OPS (&tg3_pm_ops)
15392
15393 #else
15394
15395 #define TG3_PM_OPS NULL
15396
15397 #endif /* CONFIG_PM_SLEEP */
15398
15399 /**
15400  * tg3_io_error_detected - called when PCI error is detected
15401  * @pdev: Pointer to PCI device
15402  * @state: The current pci connection state
15403  *
15404  * This function is called after a PCI bus error affecting
15405  * this device has been detected.
15406  */
15407 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15408                                               pci_channel_state_t state)
15409 {
15410         struct net_device *netdev = pci_get_drvdata(pdev);
15411         struct tg3 *tp = netdev_priv(netdev);
15412         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15413
15414         netdev_info(netdev, "PCI I/O error detected\n");
15415
15416         rtnl_lock();
15417
15418         if (!netif_running(netdev))
15419                 goto done;
15420
15421         tg3_phy_stop(tp);
15422
15423         tg3_netif_stop(tp);
15424
15425         del_timer_sync(&tp->timer);
15426         tg3_flag_clear(tp, RESTART_TIMER);
15427
15428         /* Want to make sure that the reset task doesn't run */
15429         cancel_work_sync(&tp->reset_task);
15430         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15431         tg3_flag_clear(tp, RESTART_TIMER);
15432
15433         netif_device_detach(netdev);
15434
15435         /* Clean up software state, even if MMIO is blocked */
15436         tg3_full_lock(tp, 0);
15437         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15438         tg3_full_unlock(tp);
15439
15440 done:
15441         if (state == pci_channel_io_perm_failure)
15442                 err = PCI_ERS_RESULT_DISCONNECT;
15443         else
15444                 pci_disable_device(pdev);
15445
15446         rtnl_unlock();
15447
15448         return err;
15449 }
15450
15451 /**
15452  * tg3_io_slot_reset - called after the pci bus has been reset.
15453  * @pdev: Pointer to PCI device
15454  *
15455  * Restart the card from scratch, as if from a cold-boot.
15456  * At this point, the card has exprienced a hard reset,
15457  * followed by fixups by BIOS, and has its config space
15458  * set up identically to what it was at cold boot.
15459  */
15460 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15461 {
15462         struct net_device *netdev = pci_get_drvdata(pdev);
15463         struct tg3 *tp = netdev_priv(netdev);
15464         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15465         int err;
15466
15467         rtnl_lock();
15468
15469         if (pci_enable_device(pdev)) {
15470                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15471                 goto done;
15472         }
15473
15474         pci_set_master(pdev);
15475         pci_restore_state(pdev);
15476         pci_save_state(pdev);
15477
15478         if (!netif_running(netdev)) {
15479                 rc = PCI_ERS_RESULT_RECOVERED;
15480                 goto done;
15481         }
15482
15483         err = tg3_power_up(tp);
15484         if (err) {
15485                 netdev_err(netdev, "Failed to restore register access.\n");
15486                 goto done;
15487         }
15488
15489         rc = PCI_ERS_RESULT_RECOVERED;
15490
15491 done:
15492         rtnl_unlock();
15493
15494         return rc;
15495 }
15496
15497 /**
15498  * tg3_io_resume - called when traffic can start flowing again.
15499  * @pdev: Pointer to PCI device
15500  *
15501  * This callback is called when the error recovery driver tells
15502  * us that its OK to resume normal operation.
15503  */
15504 static void tg3_io_resume(struct pci_dev *pdev)
15505 {
15506         struct net_device *netdev = pci_get_drvdata(pdev);
15507         struct tg3 *tp = netdev_priv(netdev);
15508         int err;
15509
15510         rtnl_lock();
15511
15512         if (!netif_running(netdev))
15513                 goto done;
15514
15515         tg3_full_lock(tp, 0);
15516         tg3_flag_set(tp, INIT_COMPLETE);
15517         err = tg3_restart_hw(tp, 1);
15518         tg3_full_unlock(tp);
15519         if (err) {
15520                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15521                 goto done;
15522         }
15523
15524         netif_device_attach(netdev);
15525
15526         tp->timer.expires = jiffies + tp->timer_offset;
15527         add_timer(&tp->timer);
15528
15529         tg3_netif_start(tp);
15530
15531         tg3_phy_start(tp);
15532
15533 done:
15534         rtnl_unlock();
15535 }
15536
15537 static struct pci_error_handlers tg3_err_handler = {
15538         .error_detected = tg3_io_error_detected,
15539         .slot_reset     = tg3_io_slot_reset,
15540         .resume         = tg3_io_resume
15541 };
15542
15543 static struct pci_driver tg3_driver = {
15544         .name           = DRV_MODULE_NAME,
15545         .id_table       = tg3_pci_tbl,
15546         .probe          = tg3_init_one,
15547         .remove         = __devexit_p(tg3_remove_one),
15548         .err_handler    = &tg3_err_handler,
15549         .driver.pm      = TG3_PM_OPS,
15550 };
15551
15552 static int __init tg3_init(void)
15553 {
15554         return pci_register_driver(&tg3_driver);
15555 }
15556
15557 static void __exit tg3_cleanup(void)
15558 {
15559         pci_unregister_driver(&tg3_driver);
15560 }
15561
15562 module_init(tg3_init);
15563 module_exit(tg3_cleanup);