tg3: Workaround tagged status update bug
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 /* length of time before we decide the hardware is borked,
111  * and dev->tx_timeout() should be called to fix the problem
112  */
113
114 #define TG3_TX_TIMEOUT                  (5 * HZ)
115
116 /* hardware minimum and maximum for a single frame's data payload */
117 #define TG3_MIN_MTU                     60
118 #define TG3_MAX_MTU(tp) \
119         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
120
121 /* These numbers seem to be hard coded in the NIC firmware somehow.
122  * You can't change the ring sizes, but you can change where you place
123  * them in the NIC onboard memory.
124  */
125 #define TG3_RX_STD_RING_SIZE(tp) \
126         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
127          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
128 #define TG3_DEF_RX_RING_PENDING         200
129 #define TG3_RX_JMB_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
132 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
133 #define TG3_RSS_INDIR_TBL_SIZE          128
134
135 /* Do not place this n-ring entries value into the tp struct itself,
136  * we really want to expose these constants to GCC so that modulo et
137  * al.  operations are done with shifts and masks instead of with
138  * hw multiply/modulo instructions.  Another solution would be to
139  * replace things like '% foo' with '& (foo - 1)'.
140  */
141
142 #define TG3_TX_RING_SIZE                512
143 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
144
145 #define TG3_RX_STD_RING_BYTES(tp) \
146         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
147 #define TG3_RX_JMB_RING_BYTES(tp) \
148         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
149 #define TG3_RX_RCB_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
151 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
152                                  TG3_TX_RING_SIZE)
153 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
154
155 #define TG3_DMA_BYTE_ENAB               64
156
157 #define TG3_RX_STD_DMA_SZ               1536
158 #define TG3_RX_JMB_DMA_SZ               9046
159
160 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
161
162 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
163 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
164
165 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
166         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
167
168 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
170
171 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
172  * that are at least dword aligned when used in PCIX mode.  The driver
173  * works around this bug by double copying the packet.  This workaround
174  * is built into the normal double copy length check for efficiency.
175  *
176  * However, the double copy is only necessary on those architectures
177  * where unaligned memory accesses are inefficient.  For those architectures
178  * where unaligned memory accesses incur little penalty, we can reintegrate
179  * the 5701 in the normal rx path.  Doing so saves a device structure
180  * dereference by hardcoding the double copy threshold in place.
181  */
182 #define TG3_RX_COPY_THRESHOLD           256
183 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
184         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
185 #else
186         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
187 #endif
188
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
191
192 #define TG3_RAW_IP_ALIGN 2
193
194 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
195
196 #define FIRMWARE_TG3            "tigon/tg3.bin"
197 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
198 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
199
200 static char version[] __devinitdata =
201         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
202
203 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
204 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
205 MODULE_LICENSE("GPL");
206 MODULE_VERSION(DRV_MODULE_VERSION);
207 MODULE_FIRMWARE(FIRMWARE_TG3);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
209 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
210
211 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
212 module_param(tg3_debug, int, 0);
213 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
214
215 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
295         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
296         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
297         {}
298 };
299
300 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
301
302 static const struct {
303         const char string[ETH_GSTRING_LEN];
304 } ethtool_stats_keys[] = {
305         { "rx_octets" },
306         { "rx_fragments" },
307         { "rx_ucast_packets" },
308         { "rx_mcast_packets" },
309         { "rx_bcast_packets" },
310         { "rx_fcs_errors" },
311         { "rx_align_errors" },
312         { "rx_xon_pause_rcvd" },
313         { "rx_xoff_pause_rcvd" },
314         { "rx_mac_ctrl_rcvd" },
315         { "rx_xoff_entered" },
316         { "rx_frame_too_long_errors" },
317         { "rx_jabbers" },
318         { "rx_undersize_packets" },
319         { "rx_in_length_errors" },
320         { "rx_out_length_errors" },
321         { "rx_64_or_less_octet_packets" },
322         { "rx_65_to_127_octet_packets" },
323         { "rx_128_to_255_octet_packets" },
324         { "rx_256_to_511_octet_packets" },
325         { "rx_512_to_1023_octet_packets" },
326         { "rx_1024_to_1522_octet_packets" },
327         { "rx_1523_to_2047_octet_packets" },
328         { "rx_2048_to_4095_octet_packets" },
329         { "rx_4096_to_8191_octet_packets" },
330         { "rx_8192_to_9022_octet_packets" },
331
332         { "tx_octets" },
333         { "tx_collisions" },
334
335         { "tx_xon_sent" },
336         { "tx_xoff_sent" },
337         { "tx_flow_control" },
338         { "tx_mac_errors" },
339         { "tx_single_collisions" },
340         { "tx_mult_collisions" },
341         { "tx_deferred" },
342         { "tx_excessive_collisions" },
343         { "tx_late_collisions" },
344         { "tx_collide_2times" },
345         { "tx_collide_3times" },
346         { "tx_collide_4times" },
347         { "tx_collide_5times" },
348         { "tx_collide_6times" },
349         { "tx_collide_7times" },
350         { "tx_collide_8times" },
351         { "tx_collide_9times" },
352         { "tx_collide_10times" },
353         { "tx_collide_11times" },
354         { "tx_collide_12times" },
355         { "tx_collide_13times" },
356         { "tx_collide_14times" },
357         { "tx_collide_15times" },
358         { "tx_ucast_packets" },
359         { "tx_mcast_packets" },
360         { "tx_bcast_packets" },
361         { "tx_carrier_sense_errors" },
362         { "tx_discards" },
363         { "tx_errors" },
364
365         { "dma_writeq_full" },
366         { "dma_write_prioq_full" },
367         { "rxbds_empty" },
368         { "rx_discards" },
369         { "rx_errors" },
370         { "rx_threshold_hit" },
371
372         { "dma_readq_full" },
373         { "dma_read_prioq_full" },
374         { "tx_comp_queue_full" },
375
376         { "ring_set_send_prod_index" },
377         { "ring_status_update" },
378         { "nic_irqs" },
379         { "nic_avoided_irqs" },
380         { "nic_tx_threshold_hit" },
381
382         { "mbuf_lwm_thresh_hit" },
383 };
384
385 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
386
387
388 static const struct {
389         const char string[ETH_GSTRING_LEN];
390 } ethtool_test_keys[] = {
391         { "nvram test     (online) " },
392         { "link test      (online) " },
393         { "register test  (offline)" },
394         { "memory test    (offline)" },
395         { "loopback test  (offline)" },
396         { "interrupt test (offline)" },
397 };
398
399 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
400
401
402 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
403 {
404         writel(val, tp->regs + off);
405 }
406
407 static u32 tg3_read32(struct tg3 *tp, u32 off)
408 {
409         return readl(tp->regs + off);
410 }
411
412 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
413 {
414         writel(val, tp->aperegs + off);
415 }
416
417 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
418 {
419         return readl(tp->aperegs + off);
420 }
421
422 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
423 {
424         unsigned long flags;
425
426         spin_lock_irqsave(&tp->indirect_lock, flags);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
428         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
429         spin_unlock_irqrestore(&tp->indirect_lock, flags);
430 }
431
432 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
433 {
434         writel(val, tp->regs + off);
435         readl(tp->regs + off);
436 }
437
438 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
439 {
440         unsigned long flags;
441         u32 val;
442
443         spin_lock_irqsave(&tp->indirect_lock, flags);
444         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
445         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
446         spin_unlock_irqrestore(&tp->indirect_lock, flags);
447         return val;
448 }
449
450 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
451 {
452         unsigned long flags;
453
454         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
455                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
456                                        TG3_64BIT_REG_LOW, val);
457                 return;
458         }
459         if (off == TG3_RX_STD_PROD_IDX_REG) {
460                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
461                                        TG3_64BIT_REG_LOW, val);
462                 return;
463         }
464
465         spin_lock_irqsave(&tp->indirect_lock, flags);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
467         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
468         spin_unlock_irqrestore(&tp->indirect_lock, flags);
469
470         /* In indirect mode when disabling interrupts, we also need
471          * to clear the interrupt bit in the GRC local ctrl register.
472          */
473         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
474             (val == 0x1)) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
476                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
477         }
478 }
479
480 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
481 {
482         unsigned long flags;
483         u32 val;
484
485         spin_lock_irqsave(&tp->indirect_lock, flags);
486         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
487         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
488         spin_unlock_irqrestore(&tp->indirect_lock, flags);
489         return val;
490 }
491
492 /* usec_wait specifies the wait time in usec when writing to certain registers
493  * where it is unsafe to read back the register without some delay.
494  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
495  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
496  */
497 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
498 {
499         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
500                 /* Non-posted methods */
501                 tp->write32(tp, off, val);
502         else {
503                 /* Posted method */
504                 tg3_write32(tp, off, val);
505                 if (usec_wait)
506                         udelay(usec_wait);
507                 tp->read32(tp, off);
508         }
509         /* Wait again after the read for the posted method to guarantee that
510          * the wait time is met.
511          */
512         if (usec_wait)
513                 udelay(usec_wait);
514 }
515
516 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
517 {
518         tp->write32_mbox(tp, off, val);
519         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
520                 tp->read32_mbox(tp, off);
521 }
522
523 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
524 {
525         void __iomem *mbox = tp->regs + off;
526         writel(val, mbox);
527         if (tg3_flag(tp, TXD_MBOX_HWBUG))
528                 writel(val, mbox);
529         if (tg3_flag(tp, MBOX_WRITE_REORDER))
530                 readl(mbox);
531 }
532
533 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
534 {
535         return readl(tp->regs + off + GRCMBOX_BASE);
536 }
537
538 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
539 {
540         writel(val, tp->regs + off + GRCMBOX_BASE);
541 }
542
543 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
544 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
545 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
546 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
547 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
548
549 #define tw32(reg, val)                  tp->write32(tp, reg, val)
550 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
551 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
552 #define tr32(reg)                       tp->read32(tp, reg)
553
554 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
555 {
556         unsigned long flags;
557
558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
559             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
560                 return;
561
562         spin_lock_irqsave(&tp->indirect_lock, flags);
563         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
566
567                 /* Always leave this as zero. */
568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
569         } else {
570                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
572
573                 /* Always leave this as zero. */
574                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
575         }
576         spin_unlock_irqrestore(&tp->indirect_lock, flags);
577 }
578
579 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
580 {
581         unsigned long flags;
582
583         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
584             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
585                 *val = 0;
586                 return;
587         }
588
589         spin_lock_irqsave(&tp->indirect_lock, flags);
590         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
591                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
592                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
593
594                 /* Always leave this as zero. */
595                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
596         } else {
597                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
598                 *val = tr32(TG3PCI_MEM_WIN_DATA);
599
600                 /* Always leave this as zero. */
601                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
602         }
603         spin_unlock_irqrestore(&tp->indirect_lock, flags);
604 }
605
606 static void tg3_ape_lock_init(struct tg3 *tp)
607 {
608         int i;
609         u32 regbase;
610
611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
612                 regbase = TG3_APE_LOCK_GRANT;
613         else
614                 regbase = TG3_APE_PER_LOCK_GRANT;
615
616         /* Make sure the driver hasn't any stale locks. */
617         for (i = 0; i < 8; i++)
618                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
619 }
620
621 static int tg3_ape_lock(struct tg3 *tp, int locknum)
622 {
623         int i, off;
624         int ret = 0;
625         u32 status, req, gnt;
626
627         if (!tg3_flag(tp, ENABLE_APE))
628                 return 0;
629
630         switch (locknum) {
631         case TG3_APE_LOCK_GRC:
632         case TG3_APE_LOCK_MEM:
633                 break;
634         default:
635                 return -EINVAL;
636         }
637
638         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
639                 req = TG3_APE_LOCK_REQ;
640                 gnt = TG3_APE_LOCK_GRANT;
641         } else {
642                 req = TG3_APE_PER_LOCK_REQ;
643                 gnt = TG3_APE_PER_LOCK_GRANT;
644         }
645
646         off = 4 * locknum;
647
648         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
649
650         /* Wait for up to 1 millisecond to acquire lock. */
651         for (i = 0; i < 100; i++) {
652                 status = tg3_ape_read32(tp, gnt + off);
653                 if (status == APE_LOCK_GRANT_DRIVER)
654                         break;
655                 udelay(10);
656         }
657
658         if (status != APE_LOCK_GRANT_DRIVER) {
659                 /* Revoke the lock request. */
660                 tg3_ape_write32(tp, gnt + off,
661                                 APE_LOCK_GRANT_DRIVER);
662
663                 ret = -EBUSY;
664         }
665
666         return ret;
667 }
668
669 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
670 {
671         u32 gnt;
672
673         if (!tg3_flag(tp, ENABLE_APE))
674                 return;
675
676         switch (locknum) {
677         case TG3_APE_LOCK_GRC:
678         case TG3_APE_LOCK_MEM:
679                 break;
680         default:
681                 return;
682         }
683
684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
685                 gnt = TG3_APE_LOCK_GRANT;
686         else
687                 gnt = TG3_APE_PER_LOCK_GRANT;
688
689         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
690 }
691
692 static void tg3_disable_ints(struct tg3 *tp)
693 {
694         int i;
695
696         tw32(TG3PCI_MISC_HOST_CTRL,
697              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
698         for (i = 0; i < tp->irq_max; i++)
699                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
700 }
701
702 static void tg3_enable_ints(struct tg3 *tp)
703 {
704         int i;
705
706         tp->irq_sync = 0;
707         wmb();
708
709         tw32(TG3PCI_MISC_HOST_CTRL,
710              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
711
712         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
713         for (i = 0; i < tp->irq_cnt; i++) {
714                 struct tg3_napi *tnapi = &tp->napi[i];
715
716                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
717                 if (tg3_flag(tp, 1SHOT_MSI))
718                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719
720                 tp->coal_now |= tnapi->coal_now;
721         }
722
723         /* Force an initial interrupt */
724         if (!tg3_flag(tp, TAGGED_STATUS) &&
725             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
726                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
727         else
728                 tw32(HOSTCC_MODE, tp->coal_now);
729
730         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
731 }
732
733 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
734 {
735         struct tg3 *tp = tnapi->tp;
736         struct tg3_hw_status *sblk = tnapi->hw_status;
737         unsigned int work_exists = 0;
738
739         /* check for phy events */
740         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
741                 if (sblk->status & SD_STATUS_LINK_CHG)
742                         work_exists = 1;
743         }
744         /* check for RX/TX work to do */
745         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
746             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
747                 work_exists = 1;
748
749         return work_exists;
750 }
751
752 /* tg3_int_reenable
753  *  similar to tg3_enable_ints, but it accurately determines whether there
754  *  is new work pending and can return without flushing the PIO write
755  *  which reenables interrupts
756  */
757 static void tg3_int_reenable(struct tg3_napi *tnapi)
758 {
759         struct tg3 *tp = tnapi->tp;
760
761         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
762         mmiowb();
763
764         /* When doing tagged status, this work check is unnecessary.
765          * The last_tag we write above tells the chip which piece of
766          * work we've completed.
767          */
768         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
769                 tw32(HOSTCC_MODE, tp->coalesce_mode |
770                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
771 }
772
773 static void tg3_switch_clocks(struct tg3 *tp)
774 {
775         u32 clock_ctrl;
776         u32 orig_clock_ctrl;
777
778         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
779                 return;
780
781         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
782
783         orig_clock_ctrl = clock_ctrl;
784         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
785                        CLOCK_CTRL_CLKRUN_OENABLE |
786                        0x1f);
787         tp->pci_clock_ctrl = clock_ctrl;
788
789         if (tg3_flag(tp, 5705_PLUS)) {
790                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
791                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
792                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
793                 }
794         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
795                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
796                             clock_ctrl |
797                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
798                             40);
799                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
801                             40);
802         }
803         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
804 }
805
806 #define PHY_BUSY_LOOPS  5000
807
808 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
809 {
810         u32 frame_val;
811         unsigned int loops;
812         int ret;
813
814         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
815                 tw32_f(MAC_MI_MODE,
816                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
817                 udelay(80);
818         }
819
820         *val = 0x0;
821
822         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
823                       MI_COM_PHY_ADDR_MASK);
824         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
825                       MI_COM_REG_ADDR_MASK);
826         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
827
828         tw32_f(MAC_MI_COM, frame_val);
829
830         loops = PHY_BUSY_LOOPS;
831         while (loops != 0) {
832                 udelay(10);
833                 frame_val = tr32(MAC_MI_COM);
834
835                 if ((frame_val & MI_COM_BUSY) == 0) {
836                         udelay(5);
837                         frame_val = tr32(MAC_MI_COM);
838                         break;
839                 }
840                 loops -= 1;
841         }
842
843         ret = -EBUSY;
844         if (loops != 0) {
845                 *val = frame_val & MI_COM_DATA_MASK;
846                 ret = 0;
847         }
848
849         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
850                 tw32_f(MAC_MI_MODE, tp->mi_mode);
851                 udelay(80);
852         }
853
854         return ret;
855 }
856
857 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
858 {
859         u32 frame_val;
860         unsigned int loops;
861         int ret;
862
863         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
864             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
865                 return 0;
866
867         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
868                 tw32_f(MAC_MI_MODE,
869                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
870                 udelay(80);
871         }
872
873         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
874                       MI_COM_PHY_ADDR_MASK);
875         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
876                       MI_COM_REG_ADDR_MASK);
877         frame_val |= (val & MI_COM_DATA_MASK);
878         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
879
880         tw32_f(MAC_MI_COM, frame_val);
881
882         loops = PHY_BUSY_LOOPS;
883         while (loops != 0) {
884                 udelay(10);
885                 frame_val = tr32(MAC_MI_COM);
886                 if ((frame_val & MI_COM_BUSY) == 0) {
887                         udelay(5);
888                         frame_val = tr32(MAC_MI_COM);
889                         break;
890                 }
891                 loops -= 1;
892         }
893
894         ret = -EBUSY;
895         if (loops != 0)
896                 ret = 0;
897
898         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
899                 tw32_f(MAC_MI_MODE, tp->mi_mode);
900                 udelay(80);
901         }
902
903         return ret;
904 }
905
906 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
907 {
908         int err;
909
910         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
911         if (err)
912                 goto done;
913
914         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
915         if (err)
916                 goto done;
917
918         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
919                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
920         if (err)
921                 goto done;
922
923         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
924
925 done:
926         return err;
927 }
928
929 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
930 {
931         int err;
932
933         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
934         if (err)
935                 goto done;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
942                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
943         if (err)
944                 goto done;
945
946         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
947
948 done:
949         return err;
950 }
951
952 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
953 {
954         int err;
955
956         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
957         if (!err)
958                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
959
960         return err;
961 }
962
963 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
964 {
965         int err;
966
967         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
968         if (!err)
969                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
970
971         return err;
972 }
973
974 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
975 {
976         int err;
977
978         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
979                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
980                            MII_TG3_AUXCTL_SHDWSEL_MISC);
981         if (!err)
982                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
983
984         return err;
985 }
986
987 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
988 {
989         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
990                 set |= MII_TG3_AUXCTL_MISC_WREN;
991
992         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
993 }
994
995 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
996         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
997                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
998                              MII_TG3_AUXCTL_ACTL_TX_6DB)
999
1000 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1001         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1002                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1003
1004 static int tg3_bmcr_reset(struct tg3 *tp)
1005 {
1006         u32 phy_control;
1007         int limit, err;
1008
1009         /* OK, reset it, and poll the BMCR_RESET bit until it
1010          * clears or we time out.
1011          */
1012         phy_control = BMCR_RESET;
1013         err = tg3_writephy(tp, MII_BMCR, phy_control);
1014         if (err != 0)
1015                 return -EBUSY;
1016
1017         limit = 5000;
1018         while (limit--) {
1019                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1020                 if (err != 0)
1021                         return -EBUSY;
1022
1023                 if ((phy_control & BMCR_RESET) == 0) {
1024                         udelay(40);
1025                         break;
1026                 }
1027                 udelay(10);
1028         }
1029         if (limit < 0)
1030                 return -EBUSY;
1031
1032         return 0;
1033 }
1034
1035 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1036 {
1037         struct tg3 *tp = bp->priv;
1038         u32 val;
1039
1040         spin_lock_bh(&tp->lock);
1041
1042         if (tg3_readphy(tp, reg, &val))
1043                 val = -EIO;
1044
1045         spin_unlock_bh(&tp->lock);
1046
1047         return val;
1048 }
1049
1050 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1051 {
1052         struct tg3 *tp = bp->priv;
1053         u32 ret = 0;
1054
1055         spin_lock_bh(&tp->lock);
1056
1057         if (tg3_writephy(tp, reg, val))
1058                 ret = -EIO;
1059
1060         spin_unlock_bh(&tp->lock);
1061
1062         return ret;
1063 }
1064
1065 static int tg3_mdio_reset(struct mii_bus *bp)
1066 {
1067         return 0;
1068 }
1069
1070 static void tg3_mdio_config_5785(struct tg3 *tp)
1071 {
1072         u32 val;
1073         struct phy_device *phydev;
1074
1075         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1076         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1077         case PHY_ID_BCM50610:
1078         case PHY_ID_BCM50610M:
1079                 val = MAC_PHYCFG2_50610_LED_MODES;
1080                 break;
1081         case PHY_ID_BCMAC131:
1082                 val = MAC_PHYCFG2_AC131_LED_MODES;
1083                 break;
1084         case PHY_ID_RTL8211C:
1085                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1086                 break;
1087         case PHY_ID_RTL8201E:
1088                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1089                 break;
1090         default:
1091                 return;
1092         }
1093
1094         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1095                 tw32(MAC_PHYCFG2, val);
1096
1097                 val = tr32(MAC_PHYCFG1);
1098                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1099                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1100                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1101                 tw32(MAC_PHYCFG1, val);
1102
1103                 return;
1104         }
1105
1106         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1107                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1108                        MAC_PHYCFG2_FMODE_MASK_MASK |
1109                        MAC_PHYCFG2_GMODE_MASK_MASK |
1110                        MAC_PHYCFG2_ACT_MASK_MASK   |
1111                        MAC_PHYCFG2_QUAL_MASK_MASK |
1112                        MAC_PHYCFG2_INBAND_ENABLE;
1113
1114         tw32(MAC_PHYCFG2, val);
1115
1116         val = tr32(MAC_PHYCFG1);
1117         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1118                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1119         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1120                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1121                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1124         }
1125         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1126                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1127         tw32(MAC_PHYCFG1, val);
1128
1129         val = tr32(MAC_EXT_RGMII_MODE);
1130         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1131                  MAC_RGMII_MODE_RX_QUALITY |
1132                  MAC_RGMII_MODE_RX_ACTIVITY |
1133                  MAC_RGMII_MODE_RX_ENG_DET |
1134                  MAC_RGMII_MODE_TX_ENABLE |
1135                  MAC_RGMII_MODE_TX_LOWPWR |
1136                  MAC_RGMII_MODE_TX_RESET);
1137         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1138                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1139                         val |= MAC_RGMII_MODE_RX_INT_B |
1140                                MAC_RGMII_MODE_RX_QUALITY |
1141                                MAC_RGMII_MODE_RX_ACTIVITY |
1142                                MAC_RGMII_MODE_RX_ENG_DET;
1143                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1144                         val |= MAC_RGMII_MODE_TX_ENABLE |
1145                                MAC_RGMII_MODE_TX_LOWPWR |
1146                                MAC_RGMII_MODE_TX_RESET;
1147         }
1148         tw32(MAC_EXT_RGMII_MODE, val);
1149 }
1150
1151 static void tg3_mdio_start(struct tg3 *tp)
1152 {
1153         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1154         tw32_f(MAC_MI_MODE, tp->mi_mode);
1155         udelay(80);
1156
1157         if (tg3_flag(tp, MDIOBUS_INITED) &&
1158             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1159                 tg3_mdio_config_5785(tp);
1160 }
1161
1162 static int tg3_mdio_init(struct tg3 *tp)
1163 {
1164         int i;
1165         u32 reg;
1166         struct phy_device *phydev;
1167
1168         if (tg3_flag(tp, 5717_PLUS)) {
1169                 u32 is_serdes;
1170
1171                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1172
1173                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1174                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1175                 else
1176                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1177                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1178                 if (is_serdes)
1179                         tp->phy_addr += 7;
1180         } else
1181                 tp->phy_addr = TG3_PHY_MII_ADDR;
1182
1183         tg3_mdio_start(tp);
1184
1185         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1186                 return 0;
1187
1188         tp->mdio_bus = mdiobus_alloc();
1189         if (tp->mdio_bus == NULL)
1190                 return -ENOMEM;
1191
1192         tp->mdio_bus->name     = "tg3 mdio bus";
1193         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1194                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1195         tp->mdio_bus->priv     = tp;
1196         tp->mdio_bus->parent   = &tp->pdev->dev;
1197         tp->mdio_bus->read     = &tg3_mdio_read;
1198         tp->mdio_bus->write    = &tg3_mdio_write;
1199         tp->mdio_bus->reset    = &tg3_mdio_reset;
1200         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1201         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1202
1203         for (i = 0; i < PHY_MAX_ADDR; i++)
1204                 tp->mdio_bus->irq[i] = PHY_POLL;
1205
1206         /* The bus registration will look for all the PHYs on the mdio bus.
1207          * Unfortunately, it does not ensure the PHY is powered up before
1208          * accessing the PHY ID registers.  A chip reset is the
1209          * quickest way to bring the device back to an operational state..
1210          */
1211         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1212                 tg3_bmcr_reset(tp);
1213
1214         i = mdiobus_register(tp->mdio_bus);
1215         if (i) {
1216                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1217                 mdiobus_free(tp->mdio_bus);
1218                 return i;
1219         }
1220
1221         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1222
1223         if (!phydev || !phydev->drv) {
1224                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1225                 mdiobus_unregister(tp->mdio_bus);
1226                 mdiobus_free(tp->mdio_bus);
1227                 return -ENODEV;
1228         }
1229
1230         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1231         case PHY_ID_BCM57780:
1232                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1233                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1234                 break;
1235         case PHY_ID_BCM50610:
1236         case PHY_ID_BCM50610M:
1237                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1238                                      PHY_BRCM_RX_REFCLK_UNUSED |
1239                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1240                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1241                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1242                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1243                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1244                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1247                 /* fallthru */
1248         case PHY_ID_RTL8211C:
1249                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1250                 break;
1251         case PHY_ID_RTL8201E:
1252         case PHY_ID_BCMAC131:
1253                 phydev->interface = PHY_INTERFACE_MODE_MII;
1254                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1255                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1256                 break;
1257         }
1258
1259         tg3_flag_set(tp, MDIOBUS_INITED);
1260
1261         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1262                 tg3_mdio_config_5785(tp);
1263
1264         return 0;
1265 }
1266
1267 static void tg3_mdio_fini(struct tg3 *tp)
1268 {
1269         if (tg3_flag(tp, MDIOBUS_INITED)) {
1270                 tg3_flag_clear(tp, MDIOBUS_INITED);
1271                 mdiobus_unregister(tp->mdio_bus);
1272                 mdiobus_free(tp->mdio_bus);
1273         }
1274 }
1275
1276 /* tp->lock is held. */
1277 static inline void tg3_generate_fw_event(struct tg3 *tp)
1278 {
1279         u32 val;
1280
1281         val = tr32(GRC_RX_CPU_EVENT);
1282         val |= GRC_RX_CPU_DRIVER_EVENT;
1283         tw32_f(GRC_RX_CPU_EVENT, val);
1284
1285         tp->last_event_jiffies = jiffies;
1286 }
1287
1288 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1289
1290 /* tp->lock is held. */
1291 static void tg3_wait_for_event_ack(struct tg3 *tp)
1292 {
1293         int i;
1294         unsigned int delay_cnt;
1295         long time_remain;
1296
1297         /* If enough time has passed, no wait is necessary. */
1298         time_remain = (long)(tp->last_event_jiffies + 1 +
1299                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1300                       (long)jiffies;
1301         if (time_remain < 0)
1302                 return;
1303
1304         /* Check if we can shorten the wait time. */
1305         delay_cnt = jiffies_to_usecs(time_remain);
1306         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1307                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1308         delay_cnt = (delay_cnt >> 3) + 1;
1309
1310         for (i = 0; i < delay_cnt; i++) {
1311                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1312                         break;
1313                 udelay(8);
1314         }
1315 }
1316
1317 /* tp->lock is held. */
1318 static void tg3_ump_link_report(struct tg3 *tp)
1319 {
1320         u32 reg;
1321         u32 val;
1322
1323         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1324                 return;
1325
1326         tg3_wait_for_event_ack(tp);
1327
1328         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1331
1332         val = 0;
1333         if (!tg3_readphy(tp, MII_BMCR, &reg))
1334                 val = reg << 16;
1335         if (!tg3_readphy(tp, MII_BMSR, &reg))
1336                 val |= (reg & 0xffff);
1337         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1338
1339         val = 0;
1340         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1341                 val = reg << 16;
1342         if (!tg3_readphy(tp, MII_LPA, &reg))
1343                 val |= (reg & 0xffff);
1344         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1345
1346         val = 0;
1347         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1348                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1349                         val = reg << 16;
1350                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1351                         val |= (reg & 0xffff);
1352         }
1353         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1354
1355         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1356                 val = reg << 16;
1357         else
1358                 val = 0;
1359         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1360
1361         tg3_generate_fw_event(tp);
1362 }
1363
1364 static void tg3_link_report(struct tg3 *tp)
1365 {
1366         if (!netif_carrier_ok(tp->dev)) {
1367                 netif_info(tp, link, tp->dev, "Link is down\n");
1368                 tg3_ump_link_report(tp);
1369         } else if (netif_msg_link(tp)) {
1370                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1371                             (tp->link_config.active_speed == SPEED_1000 ?
1372                              1000 :
1373                              (tp->link_config.active_speed == SPEED_100 ?
1374                               100 : 10)),
1375                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1376                              "full" : "half"));
1377
1378                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1379                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1380                             "on" : "off",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1382                             "on" : "off");
1383
1384                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1385                         netdev_info(tp->dev, "EEE is %s\n",
1386                                     tp->setlpicnt ? "enabled" : "disabled");
1387
1388                 tg3_ump_link_report(tp);
1389         }
1390 }
1391
1392 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1393 {
1394         u16 miireg;
1395
1396         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1397                 miireg = ADVERTISE_PAUSE_CAP;
1398         else if (flow_ctrl & FLOW_CTRL_TX)
1399                 miireg = ADVERTISE_PAUSE_ASYM;
1400         else if (flow_ctrl & FLOW_CTRL_RX)
1401                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1402         else
1403                 miireg = 0;
1404
1405         return miireg;
1406 }
1407
1408 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1409 {
1410         u16 miireg;
1411
1412         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1413                 miireg = ADVERTISE_1000XPAUSE;
1414         else if (flow_ctrl & FLOW_CTRL_TX)
1415                 miireg = ADVERTISE_1000XPSE_ASYM;
1416         else if (flow_ctrl & FLOW_CTRL_RX)
1417                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1418         else
1419                 miireg = 0;
1420
1421         return miireg;
1422 }
1423
1424 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1425 {
1426         u8 cap = 0;
1427
1428         if (lcladv & ADVERTISE_1000XPAUSE) {
1429                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1430                         if (rmtadv & LPA_1000XPAUSE)
1431                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1432                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1433                                 cap = FLOW_CTRL_RX;
1434                 } else {
1435                         if (rmtadv & LPA_1000XPAUSE)
1436                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1437                 }
1438         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1439                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1440                         cap = FLOW_CTRL_TX;
1441         }
1442
1443         return cap;
1444 }
1445
1446 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1447 {
1448         u8 autoneg;
1449         u8 flowctrl = 0;
1450         u32 old_rx_mode = tp->rx_mode;
1451         u32 old_tx_mode = tp->tx_mode;
1452
1453         if (tg3_flag(tp, USE_PHYLIB))
1454                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1455         else
1456                 autoneg = tp->link_config.autoneg;
1457
1458         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1459                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1460                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1461                 else
1462                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1463         } else
1464                 flowctrl = tp->link_config.flowctrl;
1465
1466         tp->link_config.active_flowctrl = flowctrl;
1467
1468         if (flowctrl & FLOW_CTRL_RX)
1469                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1470         else
1471                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1472
1473         if (old_rx_mode != tp->rx_mode)
1474                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1475
1476         if (flowctrl & FLOW_CTRL_TX)
1477                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1478         else
1479                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1480
1481         if (old_tx_mode != tp->tx_mode)
1482                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1483 }
1484
1485 static void tg3_adjust_link(struct net_device *dev)
1486 {
1487         u8 oldflowctrl, linkmesg = 0;
1488         u32 mac_mode, lcl_adv, rmt_adv;
1489         struct tg3 *tp = netdev_priv(dev);
1490         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1491
1492         spin_lock_bh(&tp->lock);
1493
1494         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1495                                     MAC_MODE_HALF_DUPLEX);
1496
1497         oldflowctrl = tp->link_config.active_flowctrl;
1498
1499         if (phydev->link) {
1500                 lcl_adv = 0;
1501                 rmt_adv = 0;
1502
1503                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1504                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1505                 else if (phydev->speed == SPEED_1000 ||
1506                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1507                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1508                 else
1509                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1510
1511                 if (phydev->duplex == DUPLEX_HALF)
1512                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1513                 else {
1514                         lcl_adv = tg3_advert_flowctrl_1000T(
1515                                   tp->link_config.flowctrl);
1516
1517                         if (phydev->pause)
1518                                 rmt_adv = LPA_PAUSE_CAP;
1519                         if (phydev->asym_pause)
1520                                 rmt_adv |= LPA_PAUSE_ASYM;
1521                 }
1522
1523                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1524         } else
1525                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1526
1527         if (mac_mode != tp->mac_mode) {
1528                 tp->mac_mode = mac_mode;
1529                 tw32_f(MAC_MODE, tp->mac_mode);
1530                 udelay(40);
1531         }
1532
1533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1534                 if (phydev->speed == SPEED_10)
1535                         tw32(MAC_MI_STAT,
1536                              MAC_MI_STAT_10MBPS_MODE |
1537                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1538                 else
1539                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540         }
1541
1542         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1543                 tw32(MAC_TX_LENGTHS,
1544                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1545                       (6 << TX_LENGTHS_IPG_SHIFT) |
1546                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1547         else
1548                 tw32(MAC_TX_LENGTHS,
1549                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1550                       (6 << TX_LENGTHS_IPG_SHIFT) |
1551                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1552
1553         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1554             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1555             phydev->speed != tp->link_config.active_speed ||
1556             phydev->duplex != tp->link_config.active_duplex ||
1557             oldflowctrl != tp->link_config.active_flowctrl)
1558                 linkmesg = 1;
1559
1560         tp->link_config.active_speed = phydev->speed;
1561         tp->link_config.active_duplex = phydev->duplex;
1562
1563         spin_unlock_bh(&tp->lock);
1564
1565         if (linkmesg)
1566                 tg3_link_report(tp);
1567 }
1568
1569 static int tg3_phy_init(struct tg3 *tp)
1570 {
1571         struct phy_device *phydev;
1572
1573         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1574                 return 0;
1575
1576         /* Bring the PHY back to a known state. */
1577         tg3_bmcr_reset(tp);
1578
1579         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1580
1581         /* Attach the MAC to the PHY. */
1582         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1583                              phydev->dev_flags, phydev->interface);
1584         if (IS_ERR(phydev)) {
1585                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1586                 return PTR_ERR(phydev);
1587         }
1588
1589         /* Mask with MAC supported features. */
1590         switch (phydev->interface) {
1591         case PHY_INTERFACE_MODE_GMII:
1592         case PHY_INTERFACE_MODE_RGMII:
1593                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1594                         phydev->supported &= (PHY_GBIT_FEATURES |
1595                                               SUPPORTED_Pause |
1596                                               SUPPORTED_Asym_Pause);
1597                         break;
1598                 }
1599                 /* fallthru */
1600         case PHY_INTERFACE_MODE_MII:
1601                 phydev->supported &= (PHY_BASIC_FEATURES |
1602                                       SUPPORTED_Pause |
1603                                       SUPPORTED_Asym_Pause);
1604                 break;
1605         default:
1606                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1607                 return -EINVAL;
1608         }
1609
1610         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1611
1612         phydev->advertising = phydev->supported;
1613
1614         return 0;
1615 }
1616
1617 static void tg3_phy_start(struct tg3 *tp)
1618 {
1619         struct phy_device *phydev;
1620
1621         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1622                 return;
1623
1624         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1625
1626         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1627                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1628                 phydev->speed = tp->link_config.orig_speed;
1629                 phydev->duplex = tp->link_config.orig_duplex;
1630                 phydev->autoneg = tp->link_config.orig_autoneg;
1631                 phydev->advertising = tp->link_config.orig_advertising;
1632         }
1633
1634         phy_start(phydev);
1635
1636         phy_start_aneg(phydev);
1637 }
1638
1639 static void tg3_phy_stop(struct tg3 *tp)
1640 {
1641         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1642                 return;
1643
1644         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1645 }
1646
1647 static void tg3_phy_fini(struct tg3 *tp)
1648 {
1649         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1650                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1651                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1652         }
1653 }
1654
1655 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1656 {
1657         u32 phytest;
1658
1659         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1660                 u32 phy;
1661
1662                 tg3_writephy(tp, MII_TG3_FET_TEST,
1663                              phytest | MII_TG3_FET_SHADOW_EN);
1664                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1665                         if (enable)
1666                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1667                         else
1668                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1670                 }
1671                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1672         }
1673 }
1674
1675 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1676 {
1677         u32 reg;
1678
1679         if (!tg3_flag(tp, 5705_PLUS) ||
1680             (tg3_flag(tp, 5717_PLUS) &&
1681              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1682                 return;
1683
1684         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1685                 tg3_phy_fet_toggle_apd(tp, enable);
1686                 return;
1687         }
1688
1689         reg = MII_TG3_MISC_SHDW_WREN |
1690               MII_TG3_MISC_SHDW_SCR5_SEL |
1691               MII_TG3_MISC_SHDW_SCR5_LPED |
1692               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1693               MII_TG3_MISC_SHDW_SCR5_SDTL |
1694               MII_TG3_MISC_SHDW_SCR5_C125OE;
1695         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1696                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1697
1698         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1699
1700
1701         reg = MII_TG3_MISC_SHDW_WREN |
1702               MII_TG3_MISC_SHDW_APD_SEL |
1703               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1704         if (enable)
1705                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1706
1707         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1708 }
1709
1710 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1711 {
1712         u32 phy;
1713
1714         if (!tg3_flag(tp, 5705_PLUS) ||
1715             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1716                 return;
1717
1718         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1719                 u32 ephy;
1720
1721                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1722                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1723
1724                         tg3_writephy(tp, MII_TG3_FET_TEST,
1725                                      ephy | MII_TG3_FET_SHADOW_EN);
1726                         if (!tg3_readphy(tp, reg, &phy)) {
1727                                 if (enable)
1728                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1729                                 else
1730                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 tg3_writephy(tp, reg, phy);
1732                         }
1733                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1734                 }
1735         } else {
1736                 int ret;
1737
1738                 ret = tg3_phy_auxctl_read(tp,
1739                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1740                 if (!ret) {
1741                         if (enable)
1742                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1743                         else
1744                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         tg3_phy_auxctl_write(tp,
1746                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1747                 }
1748         }
1749 }
1750
1751 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1752 {
1753         int ret;
1754         u32 val;
1755
1756         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1757                 return;
1758
1759         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1760         if (!ret)
1761                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1762                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1763 }
1764
1765 static void tg3_phy_apply_otp(struct tg3 *tp)
1766 {
1767         u32 otp, phy;
1768
1769         if (!tp->phy_otp)
1770                 return;
1771
1772         otp = tp->phy_otp;
1773
1774         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1775                 return;
1776
1777         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1778         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1779         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1780
1781         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1782               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1783         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1784
1785         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1786         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1787         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1788
1789         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1790         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1791
1792         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1793         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1794
1795         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1796               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1797         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1798
1799         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1800 }
1801
1802 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1803 {
1804         u32 val;
1805
1806         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1807                 return;
1808
1809         tp->setlpicnt = 0;
1810
1811         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1812             current_link_up == 1 &&
1813             tp->link_config.active_duplex == DUPLEX_FULL &&
1814             (tp->link_config.active_speed == SPEED_100 ||
1815              tp->link_config.active_speed == SPEED_1000)) {
1816                 u32 eeectl;
1817
1818                 if (tp->link_config.active_speed == SPEED_1000)
1819                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1820                 else
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1822
1823                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1824
1825                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1826                                   TG3_CL45_D7_EEERES_STAT, &val);
1827
1828                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1829                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1830                         tp->setlpicnt = 2;
1831         }
1832
1833         if (!tp->setlpicnt) {
1834                 val = tr32(TG3_CPMU_EEE_MODE);
1835                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1836         }
1837 }
1838
1839 static void tg3_phy_eee_enable(struct tg3 *tp)
1840 {
1841         u32 val;
1842
1843         if (tp->link_config.active_speed == SPEED_1000 &&
1844             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1845              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1846              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1847             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1848                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1849                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1850         }
1851
1852         val = tr32(TG3_CPMU_EEE_MODE);
1853         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1854 }
1855
1856 static int tg3_wait_macro_done(struct tg3 *tp)
1857 {
1858         int limit = 100;
1859
1860         while (limit--) {
1861                 u32 tmp32;
1862
1863                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1864                         if ((tmp32 & 0x1000) == 0)
1865                                 break;
1866                 }
1867         }
1868         if (limit < 0)
1869                 return -EBUSY;
1870
1871         return 0;
1872 }
1873
1874 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1875 {
1876         static const u32 test_pat[4][6] = {
1877         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1878         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1879         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1880         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1881         };
1882         int chan;
1883
1884         for (chan = 0; chan < 4; chan++) {
1885                 int i;
1886
1887                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1888                              (chan * 0x2000) | 0x0200);
1889                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1890
1891                 for (i = 0; i < 6; i++)
1892                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1893                                      test_pat[chan][i]);
1894
1895                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1896                 if (tg3_wait_macro_done(tp)) {
1897                         *resetp = 1;
1898                         return -EBUSY;
1899                 }
1900
1901                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902                              (chan * 0x2000) | 0x0200);
1903                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1904                 if (tg3_wait_macro_done(tp)) {
1905                         *resetp = 1;
1906                         return -EBUSY;
1907                 }
1908
1909                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1910                 if (tg3_wait_macro_done(tp)) {
1911                         *resetp = 1;
1912                         return -EBUSY;
1913                 }
1914
1915                 for (i = 0; i < 6; i += 2) {
1916                         u32 low, high;
1917
1918                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1919                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1920                             tg3_wait_macro_done(tp)) {
1921                                 *resetp = 1;
1922                                 return -EBUSY;
1923                         }
1924                         low &= 0x7fff;
1925                         high &= 0x000f;
1926                         if (low != test_pat[chan][i] ||
1927                             high != test_pat[chan][i+1]) {
1928                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1929                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1930                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1931
1932                                 return -EBUSY;
1933                         }
1934                 }
1935         }
1936
1937         return 0;
1938 }
1939
1940 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1941 {
1942         int chan;
1943
1944         for (chan = 0; chan < 4; chan++) {
1945                 int i;
1946
1947                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1948                              (chan * 0x2000) | 0x0200);
1949                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1950                 for (i = 0; i < 6; i++)
1951                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1952                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1953                 if (tg3_wait_macro_done(tp))
1954                         return -EBUSY;
1955         }
1956
1957         return 0;
1958 }
1959
1960 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1961 {
1962         u32 reg32, phy9_orig;
1963         int retries, do_phy_reset, err;
1964
1965         retries = 10;
1966         do_phy_reset = 1;
1967         do {
1968                 if (do_phy_reset) {
1969                         err = tg3_bmcr_reset(tp);
1970                         if (err)
1971                                 return err;
1972                         do_phy_reset = 0;
1973                 }
1974
1975                 /* Disable transmitter and interrupt.  */
1976                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1977                         continue;
1978
1979                 reg32 |= 0x3000;
1980                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1981
1982                 /* Set full-duplex, 1000 mbps.  */
1983                 tg3_writephy(tp, MII_BMCR,
1984                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1985
1986                 /* Set to master mode.  */
1987                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1988                         continue;
1989
1990                 tg3_writephy(tp, MII_TG3_CTRL,
1991                              (MII_TG3_CTRL_AS_MASTER |
1992                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1993
1994                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1995                 if (err)
1996                         return err;
1997
1998                 /* Block the PHY control access.  */
1999                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2000
2001                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2002                 if (!err)
2003                         break;
2004         } while (--retries);
2005
2006         err = tg3_phy_reset_chanpat(tp);
2007         if (err)
2008                 return err;
2009
2010         tg3_phydsp_write(tp, 0x8005, 0x0000);
2011
2012         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2013         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2014
2015         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2016
2017         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2018
2019         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2020                 reg32 &= ~0x3000;
2021                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2022         } else if (!err)
2023                 err = -EBUSY;
2024
2025         return err;
2026 }
2027
2028 /* This will reset the tigon3 PHY if there is no valid
2029  * link unless the FORCE argument is non-zero.
2030  */
2031 static int tg3_phy_reset(struct tg3 *tp)
2032 {
2033         u32 val, cpmuctrl;
2034         int err;
2035
2036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2037                 val = tr32(GRC_MISC_CFG);
2038                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2039                 udelay(40);
2040         }
2041         err  = tg3_readphy(tp, MII_BMSR, &val);
2042         err |= tg3_readphy(tp, MII_BMSR, &val);
2043         if (err != 0)
2044                 return -EBUSY;
2045
2046         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2047                 netif_carrier_off(tp->dev);
2048                 tg3_link_report(tp);
2049         }
2050
2051         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2052             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2054                 err = tg3_phy_reset_5703_4_5(tp);
2055                 if (err)
2056                         return err;
2057                 goto out;
2058         }
2059
2060         cpmuctrl = 0;
2061         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2062             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2063                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2064                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2065                         tw32(TG3_CPMU_CTRL,
2066                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2067         }
2068
2069         err = tg3_bmcr_reset(tp);
2070         if (err)
2071                 return err;
2072
2073         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2074                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2075                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2076
2077                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2078         }
2079
2080         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2081             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2082                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2083                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2084                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2085                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2086                         udelay(40);
2087                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2088                 }
2089         }
2090
2091         if (tg3_flag(tp, 5717_PLUS) &&
2092             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2093                 return 0;
2094
2095         tg3_phy_apply_otp(tp);
2096
2097         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2098                 tg3_phy_toggle_apd(tp, true);
2099         else
2100                 tg3_phy_toggle_apd(tp, false);
2101
2102 out:
2103         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2104             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2105                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2106                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2107                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2108         }
2109
2110         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2111                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113         }
2114
2115         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2116                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2117                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2118                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2119                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2120                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2121                 }
2122         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2123                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2124                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2125                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2126                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2127                                 tg3_writephy(tp, MII_TG3_TEST1,
2128                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2129                         } else
2130                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2131
2132                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2133                 }
2134         }
2135
2136         /* Set Extended packet length bit (bit 14) on all chips that */
2137         /* support jumbo frames */
2138         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2139                 /* Cannot do read-modify-write on 5401 */
2140                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2141         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2142                 /* Set bit 14 with read-modify-write to preserve other bits */
2143                 err = tg3_phy_auxctl_read(tp,
2144                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2145                 if (!err)
2146                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2147                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2148         }
2149
2150         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2151          * jumbo frames transmission.
2152          */
2153         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2154                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2155                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2156                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2157         }
2158
2159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2160                 /* adjust output voltage */
2161                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2162         }
2163
2164         tg3_phy_toggle_automdix(tp, 1);
2165         tg3_phy_set_wirespeed(tp);
2166         return 0;
2167 }
2168
2169 static void tg3_frob_aux_power(struct tg3 *tp)
2170 {
2171         bool need_vaux = false;
2172
2173         /* The GPIOs do something completely different on 57765. */
2174         if (!tg3_flag(tp, IS_NIC) ||
2175             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2176             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2177                 return;
2178
2179         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2181              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2182              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2183             tp->pdev_peer != tp->pdev) {
2184                 struct net_device *dev_peer;
2185
2186                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2187
2188                 /* remove_one() may have been run on the peer. */
2189                 if (dev_peer) {
2190                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2191
2192                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2193                                 return;
2194
2195                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2196                             tg3_flag(tp_peer, ENABLE_ASF))
2197                                 need_vaux = true;
2198                 }
2199         }
2200
2201         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2202                 need_vaux = true;
2203
2204         if (need_vaux) {
2205                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2206                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2207                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208                                     (GRC_LCLCTRL_GPIO_OE0 |
2209                                      GRC_LCLCTRL_GPIO_OE1 |
2210                                      GRC_LCLCTRL_GPIO_OE2 |
2211                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2212                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2213                                     100);
2214                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2215                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2216                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2217                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2218                                              GRC_LCLCTRL_GPIO_OE1 |
2219                                              GRC_LCLCTRL_GPIO_OE2 |
2220                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2221                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2222                                              tp->grc_local_ctrl;
2223                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2224
2225                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2226                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2227
2228                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2229                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2230                 } else {
2231                         u32 no_gpio2;
2232                         u32 grc_local_ctrl = 0;
2233
2234                         /* Workaround to prevent overdrawing Amps. */
2235                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2236                             ASIC_REV_5714) {
2237                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2238                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2239                                             grc_local_ctrl, 100);
2240                         }
2241
2242                         /* On 5753 and variants, GPIO2 cannot be used. */
2243                         no_gpio2 = tp->nic_sram_data_cfg &
2244                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2245
2246                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2247                                          GRC_LCLCTRL_GPIO_OE1 |
2248                                          GRC_LCLCTRL_GPIO_OE2 |
2249                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2250                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2251                         if (no_gpio2) {
2252                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2253                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2254                         }
2255                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2256                                                     grc_local_ctrl, 100);
2257
2258                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2259
2260                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2261                                                     grc_local_ctrl, 100);
2262
2263                         if (!no_gpio2) {
2264                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2265                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2266                                             grc_local_ctrl, 100);
2267                         }
2268                 }
2269         } else {
2270                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2271                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2272                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2273                                     (GRC_LCLCTRL_GPIO_OE1 |
2274                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2275
2276                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277                                     GRC_LCLCTRL_GPIO_OE1, 100);
2278
2279                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2280                                     (GRC_LCLCTRL_GPIO_OE1 |
2281                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2282                 }
2283         }
2284 }
2285
2286 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2287 {
2288         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2289                 return 1;
2290         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2291                 if (speed != SPEED_10)
2292                         return 1;
2293         } else if (speed == SPEED_10)
2294                 return 1;
2295
2296         return 0;
2297 }
2298
2299 static int tg3_setup_phy(struct tg3 *, int);
2300
2301 #define RESET_KIND_SHUTDOWN     0
2302 #define RESET_KIND_INIT         1
2303 #define RESET_KIND_SUSPEND      2
2304
2305 static void tg3_write_sig_post_reset(struct tg3 *, int);
2306 static int tg3_halt_cpu(struct tg3 *, u32);
2307
2308 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2309 {
2310         u32 val;
2311
2312         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2313                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2314                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2315                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2316
2317                         sg_dig_ctrl |=
2318                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2319                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2320                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2321                 }
2322                 return;
2323         }
2324
2325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2326                 tg3_bmcr_reset(tp);
2327                 val = tr32(GRC_MISC_CFG);
2328                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2329                 udelay(40);
2330                 return;
2331         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2332                 u32 phytest;
2333                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2334                         u32 phy;
2335
2336                         tg3_writephy(tp, MII_ADVERTISE, 0);
2337                         tg3_writephy(tp, MII_BMCR,
2338                                      BMCR_ANENABLE | BMCR_ANRESTART);
2339
2340                         tg3_writephy(tp, MII_TG3_FET_TEST,
2341                                      phytest | MII_TG3_FET_SHADOW_EN);
2342                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2343                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2344                                 tg3_writephy(tp,
2345                                              MII_TG3_FET_SHDW_AUXMODE4,
2346                                              phy);
2347                         }
2348                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2349                 }
2350                 return;
2351         } else if (do_low_power) {
2352                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2353                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2354
2355                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2356                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2357                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2358                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2359         }
2360
2361         /* The PHY should not be powered down on some chips because
2362          * of bugs.
2363          */
2364         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2366             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2367              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2368                 return;
2369
2370         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2371             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2372                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2373                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2374                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2375                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2376         }
2377
2378         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2379 }
2380
2381 /* tp->lock is held. */
2382 static int tg3_nvram_lock(struct tg3 *tp)
2383 {
2384         if (tg3_flag(tp, NVRAM)) {
2385                 int i;
2386
2387                 if (tp->nvram_lock_cnt == 0) {
2388                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2389                         for (i = 0; i < 8000; i++) {
2390                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2391                                         break;
2392                                 udelay(20);
2393                         }
2394                         if (i == 8000) {
2395                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2396                                 return -ENODEV;
2397                         }
2398                 }
2399                 tp->nvram_lock_cnt++;
2400         }
2401         return 0;
2402 }
2403
2404 /* tp->lock is held. */
2405 static void tg3_nvram_unlock(struct tg3 *tp)
2406 {
2407         if (tg3_flag(tp, NVRAM)) {
2408                 if (tp->nvram_lock_cnt > 0)
2409                         tp->nvram_lock_cnt--;
2410                 if (tp->nvram_lock_cnt == 0)
2411                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2412         }
2413 }
2414
2415 /* tp->lock is held. */
2416 static void tg3_enable_nvram_access(struct tg3 *tp)
2417 {
2418         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2419                 u32 nvaccess = tr32(NVRAM_ACCESS);
2420
2421                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2422         }
2423 }
2424
2425 /* tp->lock is held. */
2426 static void tg3_disable_nvram_access(struct tg3 *tp)
2427 {
2428         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2429                 u32 nvaccess = tr32(NVRAM_ACCESS);
2430
2431                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2432         }
2433 }
2434
2435 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2436                                         u32 offset, u32 *val)
2437 {
2438         u32 tmp;
2439         int i;
2440
2441         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2442                 return -EINVAL;
2443
2444         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2445                                         EEPROM_ADDR_DEVID_MASK |
2446                                         EEPROM_ADDR_READ);
2447         tw32(GRC_EEPROM_ADDR,
2448              tmp |
2449              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2450              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2451               EEPROM_ADDR_ADDR_MASK) |
2452              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2453
2454         for (i = 0; i < 1000; i++) {
2455                 tmp = tr32(GRC_EEPROM_ADDR);
2456
2457                 if (tmp & EEPROM_ADDR_COMPLETE)
2458                         break;
2459                 msleep(1);
2460         }
2461         if (!(tmp & EEPROM_ADDR_COMPLETE))
2462                 return -EBUSY;
2463
2464         tmp = tr32(GRC_EEPROM_DATA);
2465
2466         /*
2467          * The data will always be opposite the native endian
2468          * format.  Perform a blind byteswap to compensate.
2469          */
2470         *val = swab32(tmp);
2471
2472         return 0;
2473 }
2474
2475 #define NVRAM_CMD_TIMEOUT 10000
2476
2477 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2478 {
2479         int i;
2480
2481         tw32(NVRAM_CMD, nvram_cmd);
2482         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2483                 udelay(10);
2484                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2485                         udelay(10);
2486                         break;
2487                 }
2488         }
2489
2490         if (i == NVRAM_CMD_TIMEOUT)
2491                 return -EBUSY;
2492
2493         return 0;
2494 }
2495
2496 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2497 {
2498         if (tg3_flag(tp, NVRAM) &&
2499             tg3_flag(tp, NVRAM_BUFFERED) &&
2500             tg3_flag(tp, FLASH) &&
2501             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2502             (tp->nvram_jedecnum == JEDEC_ATMEL))
2503
2504                 addr = ((addr / tp->nvram_pagesize) <<
2505                         ATMEL_AT45DB0X1B_PAGE_POS) +
2506                        (addr % tp->nvram_pagesize);
2507
2508         return addr;
2509 }
2510
2511 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2512 {
2513         if (tg3_flag(tp, NVRAM) &&
2514             tg3_flag(tp, NVRAM_BUFFERED) &&
2515             tg3_flag(tp, FLASH) &&
2516             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2517             (tp->nvram_jedecnum == JEDEC_ATMEL))
2518
2519                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2520                         tp->nvram_pagesize) +
2521                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2522
2523         return addr;
2524 }
2525
2526 /* NOTE: Data read in from NVRAM is byteswapped according to
2527  * the byteswapping settings for all other register accesses.
2528  * tg3 devices are BE devices, so on a BE machine, the data
2529  * returned will be exactly as it is seen in NVRAM.  On a LE
2530  * machine, the 32-bit value will be byteswapped.
2531  */
2532 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2533 {
2534         int ret;
2535
2536         if (!tg3_flag(tp, NVRAM))
2537                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2538
2539         offset = tg3_nvram_phys_addr(tp, offset);
2540
2541         if (offset > NVRAM_ADDR_MSK)
2542                 return -EINVAL;
2543
2544         ret = tg3_nvram_lock(tp);
2545         if (ret)
2546                 return ret;
2547
2548         tg3_enable_nvram_access(tp);
2549
2550         tw32(NVRAM_ADDR, offset);
2551         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2552                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2553
2554         if (ret == 0)
2555                 *val = tr32(NVRAM_RDDATA);
2556
2557         tg3_disable_nvram_access(tp);
2558
2559         tg3_nvram_unlock(tp);
2560
2561         return ret;
2562 }
2563
2564 /* Ensures NVRAM data is in bytestream format. */
2565 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2566 {
2567         u32 v;
2568         int res = tg3_nvram_read(tp, offset, &v);
2569         if (!res)
2570                 *val = cpu_to_be32(v);
2571         return res;
2572 }
2573
2574 /* tp->lock is held. */
2575 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2576 {
2577         u32 addr_high, addr_low;
2578         int i;
2579
2580         addr_high = ((tp->dev->dev_addr[0] << 8) |
2581                      tp->dev->dev_addr[1]);
2582         addr_low = ((tp->dev->dev_addr[2] << 24) |
2583                     (tp->dev->dev_addr[3] << 16) |
2584                     (tp->dev->dev_addr[4] <<  8) |
2585                     (tp->dev->dev_addr[5] <<  0));
2586         for (i = 0; i < 4; i++) {
2587                 if (i == 1 && skip_mac_1)
2588                         continue;
2589                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2590                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2591         }
2592
2593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2595                 for (i = 0; i < 12; i++) {
2596                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2597                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2598                 }
2599         }
2600
2601         addr_high = (tp->dev->dev_addr[0] +
2602                      tp->dev->dev_addr[1] +
2603                      tp->dev->dev_addr[2] +
2604                      tp->dev->dev_addr[3] +
2605                      tp->dev->dev_addr[4] +
2606                      tp->dev->dev_addr[5]) &
2607                 TX_BACKOFF_SEED_MASK;
2608         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2609 }
2610
2611 static void tg3_enable_register_access(struct tg3 *tp)
2612 {
2613         /*
2614          * Make sure register accesses (indirect or otherwise) will function
2615          * correctly.
2616          */
2617         pci_write_config_dword(tp->pdev,
2618                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2619 }
2620
2621 static int tg3_power_up(struct tg3 *tp)
2622 {
2623         tg3_enable_register_access(tp);
2624
2625         pci_set_power_state(tp->pdev, PCI_D0);
2626
2627         /* Switch out of Vaux if it is a NIC */
2628         if (tg3_flag(tp, IS_NIC))
2629                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2630
2631         return 0;
2632 }
2633
2634 static int tg3_power_down_prepare(struct tg3 *tp)
2635 {
2636         u32 misc_host_ctrl;
2637         bool device_should_wake, do_low_power;
2638
2639         tg3_enable_register_access(tp);
2640
2641         /* Restore the CLKREQ setting. */
2642         if (tg3_flag(tp, CLKREQ_BUG)) {
2643                 u16 lnkctl;
2644
2645                 pci_read_config_word(tp->pdev,
2646                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2647                                      &lnkctl);
2648                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2649                 pci_write_config_word(tp->pdev,
2650                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2651                                       lnkctl);
2652         }
2653
2654         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2655         tw32(TG3PCI_MISC_HOST_CTRL,
2656              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2657
2658         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2659                              tg3_flag(tp, WOL_ENABLE);
2660
2661         if (tg3_flag(tp, USE_PHYLIB)) {
2662                 do_low_power = false;
2663                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2664                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2665                         struct phy_device *phydev;
2666                         u32 phyid, advertising;
2667
2668                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2669
2670                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2671
2672                         tp->link_config.orig_speed = phydev->speed;
2673                         tp->link_config.orig_duplex = phydev->duplex;
2674                         tp->link_config.orig_autoneg = phydev->autoneg;
2675                         tp->link_config.orig_advertising = phydev->advertising;
2676
2677                         advertising = ADVERTISED_TP |
2678                                       ADVERTISED_Pause |
2679                                       ADVERTISED_Autoneg |
2680                                       ADVERTISED_10baseT_Half;
2681
2682                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2683                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2684                                         advertising |=
2685                                                 ADVERTISED_100baseT_Half |
2686                                                 ADVERTISED_100baseT_Full |
2687                                                 ADVERTISED_10baseT_Full;
2688                                 else
2689                                         advertising |= ADVERTISED_10baseT_Full;
2690                         }
2691
2692                         phydev->advertising = advertising;
2693
2694                         phy_start_aneg(phydev);
2695
2696                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2697                         if (phyid != PHY_ID_BCMAC131) {
2698                                 phyid &= PHY_BCM_OUI_MASK;
2699                                 if (phyid == PHY_BCM_OUI_1 ||
2700                                     phyid == PHY_BCM_OUI_2 ||
2701                                     phyid == PHY_BCM_OUI_3)
2702                                         do_low_power = true;
2703                         }
2704                 }
2705         } else {
2706                 do_low_power = true;
2707
2708                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2709                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2710                         tp->link_config.orig_speed = tp->link_config.speed;
2711                         tp->link_config.orig_duplex = tp->link_config.duplex;
2712                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2713                 }
2714
2715                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2716                         tp->link_config.speed = SPEED_10;
2717                         tp->link_config.duplex = DUPLEX_HALF;
2718                         tp->link_config.autoneg = AUTONEG_ENABLE;
2719                         tg3_setup_phy(tp, 0);
2720                 }
2721         }
2722
2723         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2724                 u32 val;
2725
2726                 val = tr32(GRC_VCPU_EXT_CTRL);
2727                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2728         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2729                 int i;
2730                 u32 val;
2731
2732                 for (i = 0; i < 200; i++) {
2733                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2734                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2735                                 break;
2736                         msleep(1);
2737                 }
2738         }
2739         if (tg3_flag(tp, WOL_CAP))
2740                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2741                                                      WOL_DRV_STATE_SHUTDOWN |
2742                                                      WOL_DRV_WOL |
2743                                                      WOL_SET_MAGIC_PKT);
2744
2745         if (device_should_wake) {
2746                 u32 mac_mode;
2747
2748                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2749                         if (do_low_power &&
2750                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2751                                 tg3_phy_auxctl_write(tp,
2752                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2753                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2754                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2755                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2756                                 udelay(40);
2757                         }
2758
2759                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2760                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2761                         else
2762                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2763
2764                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2765                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2766                             ASIC_REV_5700) {
2767                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2768                                              SPEED_100 : SPEED_10;
2769                                 if (tg3_5700_link_polarity(tp, speed))
2770                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2771                                 else
2772                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2773                         }
2774                 } else {
2775                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2776                 }
2777
2778                 if (!tg3_flag(tp, 5750_PLUS))
2779                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2780
2781                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2782                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2783                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2784                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2785
2786                 if (tg3_flag(tp, ENABLE_APE))
2787                         mac_mode |= MAC_MODE_APE_TX_EN |
2788                                     MAC_MODE_APE_RX_EN |
2789                                     MAC_MODE_TDE_ENABLE;
2790
2791                 tw32_f(MAC_MODE, mac_mode);
2792                 udelay(100);
2793
2794                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2795                 udelay(10);
2796         }
2797
2798         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2799             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2800              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2801                 u32 base_val;
2802
2803                 base_val = tp->pci_clock_ctrl;
2804                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2805                              CLOCK_CTRL_TXCLK_DISABLE);
2806
2807                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2808                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2809         } else if (tg3_flag(tp, 5780_CLASS) ||
2810                    tg3_flag(tp, CPMU_PRESENT) ||
2811                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2812                 /* do nothing */
2813         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2814                 u32 newbits1, newbits2;
2815
2816                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2817                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2818                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2819                                     CLOCK_CTRL_TXCLK_DISABLE |
2820                                     CLOCK_CTRL_ALTCLK);
2821                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2822                 } else if (tg3_flag(tp, 5705_PLUS)) {
2823                         newbits1 = CLOCK_CTRL_625_CORE;
2824                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2825                 } else {
2826                         newbits1 = CLOCK_CTRL_ALTCLK;
2827                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2828                 }
2829
2830                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2831                             40);
2832
2833                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2834                             40);
2835
2836                 if (!tg3_flag(tp, 5705_PLUS)) {
2837                         u32 newbits3;
2838
2839                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2840                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2841                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2842                                             CLOCK_CTRL_TXCLK_DISABLE |
2843                                             CLOCK_CTRL_44MHZ_CORE);
2844                         } else {
2845                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2846                         }
2847
2848                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2849                                     tp->pci_clock_ctrl | newbits3, 40);
2850                 }
2851         }
2852
2853         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2854                 tg3_power_down_phy(tp, do_low_power);
2855
2856         tg3_frob_aux_power(tp);
2857
2858         /* Workaround for unstable PLL clock */
2859         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2860             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2861                 u32 val = tr32(0x7d00);
2862
2863                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2864                 tw32(0x7d00, val);
2865                 if (!tg3_flag(tp, ENABLE_ASF)) {
2866                         int err;
2867
2868                         err = tg3_nvram_lock(tp);
2869                         tg3_halt_cpu(tp, RX_CPU_BASE);
2870                         if (!err)
2871                                 tg3_nvram_unlock(tp);
2872                 }
2873         }
2874
2875         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2876
2877         return 0;
2878 }
2879
2880 static void tg3_power_down(struct tg3 *tp)
2881 {
2882         tg3_power_down_prepare(tp);
2883
2884         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2885         pci_set_power_state(tp->pdev, PCI_D3hot);
2886 }
2887
2888 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2889 {
2890         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2891         case MII_TG3_AUX_STAT_10HALF:
2892                 *speed = SPEED_10;
2893                 *duplex = DUPLEX_HALF;
2894                 break;
2895
2896         case MII_TG3_AUX_STAT_10FULL:
2897                 *speed = SPEED_10;
2898                 *duplex = DUPLEX_FULL;
2899                 break;
2900
2901         case MII_TG3_AUX_STAT_100HALF:
2902                 *speed = SPEED_100;
2903                 *duplex = DUPLEX_HALF;
2904                 break;
2905
2906         case MII_TG3_AUX_STAT_100FULL:
2907                 *speed = SPEED_100;
2908                 *duplex = DUPLEX_FULL;
2909                 break;
2910
2911         case MII_TG3_AUX_STAT_1000HALF:
2912                 *speed = SPEED_1000;
2913                 *duplex = DUPLEX_HALF;
2914                 break;
2915
2916         case MII_TG3_AUX_STAT_1000FULL:
2917                 *speed = SPEED_1000;
2918                 *duplex = DUPLEX_FULL;
2919                 break;
2920
2921         default:
2922                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2923                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2924                                  SPEED_10;
2925                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2926                                   DUPLEX_HALF;
2927                         break;
2928                 }
2929                 *speed = SPEED_INVALID;
2930                 *duplex = DUPLEX_INVALID;
2931                 break;
2932         }
2933 }
2934
2935 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2936 {
2937         int err = 0;
2938         u32 val, new_adv;
2939
2940         new_adv = ADVERTISE_CSMA;
2941         if (advertise & ADVERTISED_10baseT_Half)
2942                 new_adv |= ADVERTISE_10HALF;
2943         if (advertise & ADVERTISED_10baseT_Full)
2944                 new_adv |= ADVERTISE_10FULL;
2945         if (advertise & ADVERTISED_100baseT_Half)
2946                 new_adv |= ADVERTISE_100HALF;
2947         if (advertise & ADVERTISED_100baseT_Full)
2948                 new_adv |= ADVERTISE_100FULL;
2949
2950         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2951
2952         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2953         if (err)
2954                 goto done;
2955
2956         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2957                 goto done;
2958
2959         new_adv = 0;
2960         if (advertise & ADVERTISED_1000baseT_Half)
2961                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2962         if (advertise & ADVERTISED_1000baseT_Full)
2963                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2964
2965         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2966             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2967                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2968                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2969
2970         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2971         if (err)
2972                 goto done;
2973
2974         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2975                 goto done;
2976
2977         tw32(TG3_CPMU_EEE_MODE,
2978              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2979
2980         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2981         if (!err) {
2982                 u32 err2;
2983
2984                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2985                 case ASIC_REV_5717:
2986                 case ASIC_REV_57765:
2987                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2988                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2989                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2990                         /* Fall through */
2991                 case ASIC_REV_5719:
2992                         val = MII_TG3_DSP_TAP26_ALNOKO |
2993                               MII_TG3_DSP_TAP26_RMRXSTO |
2994                               MII_TG3_DSP_TAP26_OPCSINPT;
2995                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2996                 }
2997
2998                 val = 0;
2999                 /* Advertise 100-BaseTX EEE ability */
3000                 if (advertise & ADVERTISED_100baseT_Full)
3001                         val |= MDIO_AN_EEE_ADV_100TX;
3002                 /* Advertise 1000-BaseT EEE ability */
3003                 if (advertise & ADVERTISED_1000baseT_Full)
3004                         val |= MDIO_AN_EEE_ADV_1000T;
3005                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3006
3007                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3008                 if (!err)
3009                         err = err2;
3010         }
3011
3012 done:
3013         return err;
3014 }
3015
3016 static void tg3_phy_copper_begin(struct tg3 *tp)
3017 {
3018         u32 new_adv;
3019         int i;
3020
3021         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3022                 new_adv = ADVERTISED_10baseT_Half |
3023                           ADVERTISED_10baseT_Full;
3024                 if (tg3_flag(tp, WOL_SPEED_100MB))
3025                         new_adv |= ADVERTISED_100baseT_Half |
3026                                    ADVERTISED_100baseT_Full;
3027
3028                 tg3_phy_autoneg_cfg(tp, new_adv,
3029                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3030         } else if (tp->link_config.speed == SPEED_INVALID) {
3031                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3032                         tp->link_config.advertising &=
3033                                 ~(ADVERTISED_1000baseT_Half |
3034                                   ADVERTISED_1000baseT_Full);
3035
3036                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3037                                     tp->link_config.flowctrl);
3038         } else {
3039                 /* Asking for a specific link mode. */
3040                 if (tp->link_config.speed == SPEED_1000) {
3041                         if (tp->link_config.duplex == DUPLEX_FULL)
3042                                 new_adv = ADVERTISED_1000baseT_Full;
3043                         else
3044                                 new_adv = ADVERTISED_1000baseT_Half;
3045                 } else if (tp->link_config.speed == SPEED_100) {
3046                         if (tp->link_config.duplex == DUPLEX_FULL)
3047                                 new_adv = ADVERTISED_100baseT_Full;
3048                         else
3049                                 new_adv = ADVERTISED_100baseT_Half;
3050                 } else {
3051                         if (tp->link_config.duplex == DUPLEX_FULL)
3052                                 new_adv = ADVERTISED_10baseT_Full;
3053                         else
3054                                 new_adv = ADVERTISED_10baseT_Half;
3055                 }
3056
3057                 tg3_phy_autoneg_cfg(tp, new_adv,
3058                                     tp->link_config.flowctrl);
3059         }
3060
3061         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3062             tp->link_config.speed != SPEED_INVALID) {
3063                 u32 bmcr, orig_bmcr;
3064
3065                 tp->link_config.active_speed = tp->link_config.speed;
3066                 tp->link_config.active_duplex = tp->link_config.duplex;
3067
3068                 bmcr = 0;
3069                 switch (tp->link_config.speed) {
3070                 default:
3071                 case SPEED_10:
3072                         break;
3073
3074                 case SPEED_100:
3075                         bmcr |= BMCR_SPEED100;
3076                         break;
3077
3078                 case SPEED_1000:
3079                         bmcr |= TG3_BMCR_SPEED1000;
3080                         break;
3081                 }
3082
3083                 if (tp->link_config.duplex == DUPLEX_FULL)
3084                         bmcr |= BMCR_FULLDPLX;
3085
3086                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3087                     (bmcr != orig_bmcr)) {
3088                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3089                         for (i = 0; i < 1500; i++) {
3090                                 u32 tmp;
3091
3092                                 udelay(10);
3093                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3094                                     tg3_readphy(tp, MII_BMSR, &tmp))
3095                                         continue;
3096                                 if (!(tmp & BMSR_LSTATUS)) {
3097                                         udelay(40);
3098                                         break;
3099                                 }
3100                         }
3101                         tg3_writephy(tp, MII_BMCR, bmcr);
3102                         udelay(40);
3103                 }
3104         } else {
3105                 tg3_writephy(tp, MII_BMCR,
3106                              BMCR_ANENABLE | BMCR_ANRESTART);
3107         }
3108 }
3109
3110 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3111 {
3112         int err;
3113
3114         /* Turn off tap power management. */
3115         /* Set Extended packet length bit */
3116         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3117
3118         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3119         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3120         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3121         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3122         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3123
3124         udelay(40);
3125
3126         return err;
3127 }
3128
3129 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3130 {
3131         u32 adv_reg, all_mask = 0;
3132
3133         if (mask & ADVERTISED_10baseT_Half)
3134                 all_mask |= ADVERTISE_10HALF;
3135         if (mask & ADVERTISED_10baseT_Full)
3136                 all_mask |= ADVERTISE_10FULL;
3137         if (mask & ADVERTISED_100baseT_Half)
3138                 all_mask |= ADVERTISE_100HALF;
3139         if (mask & ADVERTISED_100baseT_Full)
3140                 all_mask |= ADVERTISE_100FULL;
3141
3142         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3143                 return 0;
3144
3145         if ((adv_reg & all_mask) != all_mask)
3146                 return 0;
3147         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3148                 u32 tg3_ctrl;
3149
3150                 all_mask = 0;
3151                 if (mask & ADVERTISED_1000baseT_Half)
3152                         all_mask |= ADVERTISE_1000HALF;
3153                 if (mask & ADVERTISED_1000baseT_Full)
3154                         all_mask |= ADVERTISE_1000FULL;
3155
3156                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3157                         return 0;
3158
3159                 if ((tg3_ctrl & all_mask) != all_mask)
3160                         return 0;
3161         }
3162         return 1;
3163 }
3164
3165 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3166 {
3167         u32 curadv, reqadv;
3168
3169         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3170                 return 1;
3171
3172         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3173         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3174
3175         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3176                 if (curadv != reqadv)
3177                         return 0;
3178
3179                 if (tg3_flag(tp, PAUSE_AUTONEG))
3180                         tg3_readphy(tp, MII_LPA, rmtadv);
3181         } else {
3182                 /* Reprogram the advertisement register, even if it
3183                  * does not affect the current link.  If the link
3184                  * gets renegotiated in the future, we can save an
3185                  * additional renegotiation cycle by advertising
3186                  * it correctly in the first place.
3187                  */
3188                 if (curadv != reqadv) {
3189                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3190                                      ADVERTISE_PAUSE_ASYM);
3191                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3192                 }
3193         }
3194
3195         return 1;
3196 }
3197
3198 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3199 {
3200         int current_link_up;
3201         u32 bmsr, val;
3202         u32 lcl_adv, rmt_adv;
3203         u16 current_speed;
3204         u8 current_duplex;
3205         int i, err;
3206
3207         tw32(MAC_EVENT, 0);
3208
3209         tw32_f(MAC_STATUS,
3210              (MAC_STATUS_SYNC_CHANGED |
3211               MAC_STATUS_CFG_CHANGED |
3212               MAC_STATUS_MI_COMPLETION |
3213               MAC_STATUS_LNKSTATE_CHANGED));
3214         udelay(40);
3215
3216         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3217                 tw32_f(MAC_MI_MODE,
3218                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3219                 udelay(80);
3220         }
3221
3222         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3223
3224         /* Some third-party PHYs need to be reset on link going
3225          * down.
3226          */
3227         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3228              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3229              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3230             netif_carrier_ok(tp->dev)) {
3231                 tg3_readphy(tp, MII_BMSR, &bmsr);
3232                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3233                     !(bmsr & BMSR_LSTATUS))
3234                         force_reset = 1;
3235         }
3236         if (force_reset)
3237                 tg3_phy_reset(tp);
3238
3239         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3240                 tg3_readphy(tp, MII_BMSR, &bmsr);
3241                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3242                     !tg3_flag(tp, INIT_COMPLETE))
3243                         bmsr = 0;
3244
3245                 if (!(bmsr & BMSR_LSTATUS)) {
3246                         err = tg3_init_5401phy_dsp(tp);
3247                         if (err)
3248                                 return err;
3249
3250                         tg3_readphy(tp, MII_BMSR, &bmsr);
3251                         for (i = 0; i < 1000; i++) {
3252                                 udelay(10);
3253                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3254                                     (bmsr & BMSR_LSTATUS)) {
3255                                         udelay(40);
3256                                         break;
3257                                 }
3258                         }
3259
3260                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3261                             TG3_PHY_REV_BCM5401_B0 &&
3262                             !(bmsr & BMSR_LSTATUS) &&
3263                             tp->link_config.active_speed == SPEED_1000) {
3264                                 err = tg3_phy_reset(tp);
3265                                 if (!err)
3266                                         err = tg3_init_5401phy_dsp(tp);
3267                                 if (err)
3268                                         return err;
3269                         }
3270                 }
3271         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3272                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3273                 /* 5701 {A0,B0} CRC bug workaround */
3274                 tg3_writephy(tp, 0x15, 0x0a75);
3275                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3276                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3277                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3278         }
3279
3280         /* Clear pending interrupts... */
3281         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3282         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3283
3284         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3285                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3286         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3287                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3288
3289         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3290             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3291                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3292                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3293                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3294                 else
3295                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3296         }
3297
3298         current_link_up = 0;
3299         current_speed = SPEED_INVALID;
3300         current_duplex = DUPLEX_INVALID;
3301
3302         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3303                 err = tg3_phy_auxctl_read(tp,
3304                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3305                                           &val);
3306                 if (!err && !(val & (1 << 10))) {
3307                         tg3_phy_auxctl_write(tp,
3308                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309                                              val | (1 << 10));
3310                         goto relink;
3311                 }
3312         }
3313
3314         bmsr = 0;
3315         for (i = 0; i < 100; i++) {
3316                 tg3_readphy(tp, MII_BMSR, &bmsr);
3317                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3318                     (bmsr & BMSR_LSTATUS))
3319                         break;
3320                 udelay(40);
3321         }
3322
3323         if (bmsr & BMSR_LSTATUS) {
3324                 u32 aux_stat, bmcr;
3325
3326                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3327                 for (i = 0; i < 2000; i++) {
3328                         udelay(10);
3329                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3330                             aux_stat)
3331                                 break;
3332                 }
3333
3334                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3335                                              &current_speed,
3336                                              &current_duplex);
3337
3338                 bmcr = 0;
3339                 for (i = 0; i < 200; i++) {
3340                         tg3_readphy(tp, MII_BMCR, &bmcr);
3341                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3342                                 continue;
3343                         if (bmcr && bmcr != 0x7fff)
3344                                 break;
3345                         udelay(10);
3346                 }
3347
3348                 lcl_adv = 0;
3349                 rmt_adv = 0;
3350
3351                 tp->link_config.active_speed = current_speed;
3352                 tp->link_config.active_duplex = current_duplex;
3353
3354                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3355                         if ((bmcr & BMCR_ANENABLE) &&
3356                             tg3_copper_is_advertising_all(tp,
3357                                                 tp->link_config.advertising)) {
3358                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3359                                                                   &rmt_adv))
3360                                         current_link_up = 1;
3361                         }
3362                 } else {
3363                         if (!(bmcr & BMCR_ANENABLE) &&
3364                             tp->link_config.speed == current_speed &&
3365                             tp->link_config.duplex == current_duplex &&
3366                             tp->link_config.flowctrl ==
3367                             tp->link_config.active_flowctrl) {
3368                                 current_link_up = 1;
3369                         }
3370                 }
3371
3372                 if (current_link_up == 1 &&
3373                     tp->link_config.active_duplex == DUPLEX_FULL)
3374                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3375         }
3376
3377 relink:
3378         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3379                 tg3_phy_copper_begin(tp);
3380
3381                 tg3_readphy(tp, MII_BMSR, &bmsr);
3382                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3383                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3384                         current_link_up = 1;
3385         }
3386
3387         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3388         if (current_link_up == 1) {
3389                 if (tp->link_config.active_speed == SPEED_100 ||
3390                     tp->link_config.active_speed == SPEED_10)
3391                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3392                 else
3393                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3394         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3395                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3396         else
3397                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398
3399         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3400         if (tp->link_config.active_duplex == DUPLEX_HALF)
3401                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3402
3403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3404                 if (current_link_up == 1 &&
3405                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3406                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3407                 else
3408                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3409         }
3410
3411         /* ??? Without this setting Netgear GA302T PHY does not
3412          * ??? send/receive packets...
3413          */
3414         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3415             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3416                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3417                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3418                 udelay(80);
3419         }
3420
3421         tw32_f(MAC_MODE, tp->mac_mode);
3422         udelay(40);
3423
3424         tg3_phy_eee_adjust(tp, current_link_up);
3425
3426         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3427                 /* Polled via timer. */
3428                 tw32_f(MAC_EVENT, 0);
3429         } else {
3430                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3431         }
3432         udelay(40);
3433
3434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3435             current_link_up == 1 &&
3436             tp->link_config.active_speed == SPEED_1000 &&
3437             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3438                 udelay(120);
3439                 tw32_f(MAC_STATUS,
3440                      (MAC_STATUS_SYNC_CHANGED |
3441                       MAC_STATUS_CFG_CHANGED));
3442                 udelay(40);
3443                 tg3_write_mem(tp,
3444                               NIC_SRAM_FIRMWARE_MBOX,
3445                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3446         }
3447
3448         /* Prevent send BD corruption. */
3449         if (tg3_flag(tp, CLKREQ_BUG)) {
3450                 u16 oldlnkctl, newlnkctl;
3451
3452                 pci_read_config_word(tp->pdev,
3453                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3454                                      &oldlnkctl);
3455                 if (tp->link_config.active_speed == SPEED_100 ||
3456                     tp->link_config.active_speed == SPEED_10)
3457                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3458                 else
3459                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3460                 if (newlnkctl != oldlnkctl)
3461                         pci_write_config_word(tp->pdev,
3462                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3463                                               newlnkctl);
3464         }
3465
3466         if (current_link_up != netif_carrier_ok(tp->dev)) {
3467                 if (current_link_up)
3468                         netif_carrier_on(tp->dev);
3469                 else
3470                         netif_carrier_off(tp->dev);
3471                 tg3_link_report(tp);
3472         }
3473
3474         return 0;
3475 }
3476
3477 struct tg3_fiber_aneginfo {
3478         int state;
3479 #define ANEG_STATE_UNKNOWN              0
3480 #define ANEG_STATE_AN_ENABLE            1
3481 #define ANEG_STATE_RESTART_INIT         2
3482 #define ANEG_STATE_RESTART              3
3483 #define ANEG_STATE_DISABLE_LINK_OK      4
3484 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3485 #define ANEG_STATE_ABILITY_DETECT       6
3486 #define ANEG_STATE_ACK_DETECT_INIT      7
3487 #define ANEG_STATE_ACK_DETECT           8
3488 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3489 #define ANEG_STATE_COMPLETE_ACK         10
3490 #define ANEG_STATE_IDLE_DETECT_INIT     11
3491 #define ANEG_STATE_IDLE_DETECT          12
3492 #define ANEG_STATE_LINK_OK              13
3493 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3494 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3495
3496         u32 flags;
3497 #define MR_AN_ENABLE            0x00000001
3498 #define MR_RESTART_AN           0x00000002
3499 #define MR_AN_COMPLETE          0x00000004
3500 #define MR_PAGE_RX              0x00000008
3501 #define MR_NP_LOADED            0x00000010
3502 #define MR_TOGGLE_TX            0x00000020
3503 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3504 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3505 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3506 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3507 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3508 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3509 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3510 #define MR_TOGGLE_RX            0x00002000
3511 #define MR_NP_RX                0x00004000
3512
3513 #define MR_LINK_OK              0x80000000
3514
3515         unsigned long link_time, cur_time;
3516
3517         u32 ability_match_cfg;
3518         int ability_match_count;
3519
3520         char ability_match, idle_match, ack_match;
3521
3522         u32 txconfig, rxconfig;
3523 #define ANEG_CFG_NP             0x00000080
3524 #define ANEG_CFG_ACK            0x00000040
3525 #define ANEG_CFG_RF2            0x00000020
3526 #define ANEG_CFG_RF1            0x00000010
3527 #define ANEG_CFG_PS2            0x00000001
3528 #define ANEG_CFG_PS1            0x00008000
3529 #define ANEG_CFG_HD             0x00004000
3530 #define ANEG_CFG_FD             0x00002000
3531 #define ANEG_CFG_INVAL          0x00001f06
3532
3533 };
3534 #define ANEG_OK         0
3535 #define ANEG_DONE       1
3536 #define ANEG_TIMER_ENAB 2
3537 #define ANEG_FAILED     -1
3538
3539 #define ANEG_STATE_SETTLE_TIME  10000
3540
3541 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3542                                    struct tg3_fiber_aneginfo *ap)
3543 {
3544         u16 flowctrl;
3545         unsigned long delta;
3546         u32 rx_cfg_reg;
3547         int ret;
3548
3549         if (ap->state == ANEG_STATE_UNKNOWN) {
3550                 ap->rxconfig = 0;
3551                 ap->link_time = 0;
3552                 ap->cur_time = 0;
3553                 ap->ability_match_cfg = 0;
3554                 ap->ability_match_count = 0;
3555                 ap->ability_match = 0;
3556                 ap->idle_match = 0;
3557                 ap->ack_match = 0;
3558         }
3559         ap->cur_time++;
3560
3561         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3562                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3563
3564                 if (rx_cfg_reg != ap->ability_match_cfg) {
3565                         ap->ability_match_cfg = rx_cfg_reg;
3566                         ap->ability_match = 0;
3567                         ap->ability_match_count = 0;
3568                 } else {
3569                         if (++ap->ability_match_count > 1) {
3570                                 ap->ability_match = 1;
3571                                 ap->ability_match_cfg = rx_cfg_reg;
3572                         }
3573                 }
3574                 if (rx_cfg_reg & ANEG_CFG_ACK)
3575                         ap->ack_match = 1;
3576                 else
3577                         ap->ack_match = 0;
3578
3579                 ap->idle_match = 0;
3580         } else {
3581                 ap->idle_match = 1;
3582                 ap->ability_match_cfg = 0;
3583                 ap->ability_match_count = 0;
3584                 ap->ability_match = 0;
3585                 ap->ack_match = 0;
3586
3587                 rx_cfg_reg = 0;
3588         }
3589
3590         ap->rxconfig = rx_cfg_reg;
3591         ret = ANEG_OK;
3592
3593         switch (ap->state) {
3594         case ANEG_STATE_UNKNOWN:
3595                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3596                         ap->state = ANEG_STATE_AN_ENABLE;
3597
3598                 /* fallthru */
3599         case ANEG_STATE_AN_ENABLE:
3600                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3601                 if (ap->flags & MR_AN_ENABLE) {
3602                         ap->link_time = 0;
3603                         ap->cur_time = 0;
3604                         ap->ability_match_cfg = 0;
3605                         ap->ability_match_count = 0;
3606                         ap->ability_match = 0;
3607                         ap->idle_match = 0;
3608                         ap->ack_match = 0;
3609
3610                         ap->state = ANEG_STATE_RESTART_INIT;
3611                 } else {
3612                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3613                 }
3614                 break;
3615
3616         case ANEG_STATE_RESTART_INIT:
3617                 ap->link_time = ap->cur_time;
3618                 ap->flags &= ~(MR_NP_LOADED);
3619                 ap->txconfig = 0;
3620                 tw32(MAC_TX_AUTO_NEG, 0);
3621                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3622                 tw32_f(MAC_MODE, tp->mac_mode);
3623                 udelay(40);
3624
3625                 ret = ANEG_TIMER_ENAB;
3626                 ap->state = ANEG_STATE_RESTART;
3627
3628                 /* fallthru */
3629         case ANEG_STATE_RESTART:
3630                 delta = ap->cur_time - ap->link_time;
3631                 if (delta > ANEG_STATE_SETTLE_TIME)
3632                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3633                 else
3634                         ret = ANEG_TIMER_ENAB;
3635                 break;
3636
3637         case ANEG_STATE_DISABLE_LINK_OK:
3638                 ret = ANEG_DONE;
3639                 break;
3640
3641         case ANEG_STATE_ABILITY_DETECT_INIT:
3642                 ap->flags &= ~(MR_TOGGLE_TX);
3643                 ap->txconfig = ANEG_CFG_FD;
3644                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3645                 if (flowctrl & ADVERTISE_1000XPAUSE)
3646                         ap->txconfig |= ANEG_CFG_PS1;
3647                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3648                         ap->txconfig |= ANEG_CFG_PS2;
3649                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3650                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3651                 tw32_f(MAC_MODE, tp->mac_mode);
3652                 udelay(40);
3653
3654                 ap->state = ANEG_STATE_ABILITY_DETECT;
3655                 break;
3656
3657         case ANEG_STATE_ABILITY_DETECT:
3658                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3659                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3660                 break;
3661
3662         case ANEG_STATE_ACK_DETECT_INIT:
3663                 ap->txconfig |= ANEG_CFG_ACK;
3664                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3665                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3666                 tw32_f(MAC_MODE, tp->mac_mode);
3667                 udelay(40);
3668
3669                 ap->state = ANEG_STATE_ACK_DETECT;
3670
3671                 /* fallthru */
3672         case ANEG_STATE_ACK_DETECT:
3673                 if (ap->ack_match != 0) {
3674                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3675                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3676                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3677                         } else {
3678                                 ap->state = ANEG_STATE_AN_ENABLE;
3679                         }
3680                 } else if (ap->ability_match != 0 &&
3681                            ap->rxconfig == 0) {
3682                         ap->state = ANEG_STATE_AN_ENABLE;
3683                 }
3684                 break;
3685
3686         case ANEG_STATE_COMPLETE_ACK_INIT:
3687                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3688                         ret = ANEG_FAILED;
3689                         break;
3690                 }
3691                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3692                                MR_LP_ADV_HALF_DUPLEX |
3693                                MR_LP_ADV_SYM_PAUSE |
3694                                MR_LP_ADV_ASYM_PAUSE |
3695                                MR_LP_ADV_REMOTE_FAULT1 |
3696                                MR_LP_ADV_REMOTE_FAULT2 |
3697                                MR_LP_ADV_NEXT_PAGE |
3698                                MR_TOGGLE_RX |
3699                                MR_NP_RX);
3700                 if (ap->rxconfig & ANEG_CFG_FD)
3701                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3702                 if (ap->rxconfig & ANEG_CFG_HD)
3703                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3704                 if (ap->rxconfig & ANEG_CFG_PS1)
3705                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3706                 if (ap->rxconfig & ANEG_CFG_PS2)
3707                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3708                 if (ap->rxconfig & ANEG_CFG_RF1)
3709                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3710                 if (ap->rxconfig & ANEG_CFG_RF2)
3711                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3712                 if (ap->rxconfig & ANEG_CFG_NP)
3713                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3714
3715                 ap->link_time = ap->cur_time;
3716
3717                 ap->flags ^= (MR_TOGGLE_TX);
3718                 if (ap->rxconfig & 0x0008)
3719                         ap->flags |= MR_TOGGLE_RX;
3720                 if (ap->rxconfig & ANEG_CFG_NP)
3721                         ap->flags |= MR_NP_RX;
3722                 ap->flags |= MR_PAGE_RX;
3723
3724                 ap->state = ANEG_STATE_COMPLETE_ACK;
3725                 ret = ANEG_TIMER_ENAB;
3726                 break;
3727
3728         case ANEG_STATE_COMPLETE_ACK:
3729                 if (ap->ability_match != 0 &&
3730                     ap->rxconfig == 0) {
3731                         ap->state = ANEG_STATE_AN_ENABLE;
3732                         break;
3733                 }
3734                 delta = ap->cur_time - ap->link_time;
3735                 if (delta > ANEG_STATE_SETTLE_TIME) {
3736                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3737                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3738                         } else {
3739                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3740                                     !(ap->flags & MR_NP_RX)) {
3741                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3742                                 } else {
3743                                         ret = ANEG_FAILED;
3744                                 }
3745                         }
3746                 }
3747                 break;
3748
3749         case ANEG_STATE_IDLE_DETECT_INIT:
3750                 ap->link_time = ap->cur_time;
3751                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3752                 tw32_f(MAC_MODE, tp->mac_mode);
3753                 udelay(40);
3754
3755                 ap->state = ANEG_STATE_IDLE_DETECT;
3756                 ret = ANEG_TIMER_ENAB;
3757                 break;
3758
3759         case ANEG_STATE_IDLE_DETECT:
3760                 if (ap->ability_match != 0 &&
3761                     ap->rxconfig == 0) {
3762                         ap->state = ANEG_STATE_AN_ENABLE;
3763                         break;
3764                 }
3765                 delta = ap->cur_time - ap->link_time;
3766                 if (delta > ANEG_STATE_SETTLE_TIME) {
3767                         /* XXX another gem from the Broadcom driver :( */
3768                         ap->state = ANEG_STATE_LINK_OK;
3769                 }
3770                 break;
3771
3772         case ANEG_STATE_LINK_OK:
3773                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3774                 ret = ANEG_DONE;
3775                 break;
3776
3777         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3778                 /* ??? unimplemented */
3779                 break;
3780
3781         case ANEG_STATE_NEXT_PAGE_WAIT:
3782                 /* ??? unimplemented */
3783                 break;
3784
3785         default:
3786                 ret = ANEG_FAILED;
3787                 break;
3788         }
3789
3790         return ret;
3791 }
3792
3793 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3794 {
3795         int res = 0;
3796         struct tg3_fiber_aneginfo aninfo;
3797         int status = ANEG_FAILED;
3798         unsigned int tick;
3799         u32 tmp;
3800
3801         tw32_f(MAC_TX_AUTO_NEG, 0);
3802
3803         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3804         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3805         udelay(40);
3806
3807         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3808         udelay(40);
3809
3810         memset(&aninfo, 0, sizeof(aninfo));
3811         aninfo.flags |= MR_AN_ENABLE;
3812         aninfo.state = ANEG_STATE_UNKNOWN;
3813         aninfo.cur_time = 0;
3814         tick = 0;
3815         while (++tick < 195000) {
3816                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3817                 if (status == ANEG_DONE || status == ANEG_FAILED)
3818                         break;
3819
3820                 udelay(1);
3821         }
3822
3823         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3824         tw32_f(MAC_MODE, tp->mac_mode);
3825         udelay(40);
3826
3827         *txflags = aninfo.txconfig;
3828         *rxflags = aninfo.flags;
3829
3830         if (status == ANEG_DONE &&
3831             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3832                              MR_LP_ADV_FULL_DUPLEX)))
3833                 res = 1;
3834
3835         return res;
3836 }
3837
3838 static void tg3_init_bcm8002(struct tg3 *tp)
3839 {
3840         u32 mac_status = tr32(MAC_STATUS);
3841         int i;
3842
3843         /* Reset when initting first time or we have a link. */
3844         if (tg3_flag(tp, INIT_COMPLETE) &&
3845             !(mac_status & MAC_STATUS_PCS_SYNCED))
3846                 return;
3847
3848         /* Set PLL lock range. */
3849         tg3_writephy(tp, 0x16, 0x8007);
3850
3851         /* SW reset */
3852         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3853
3854         /* Wait for reset to complete. */
3855         /* XXX schedule_timeout() ... */
3856         for (i = 0; i < 500; i++)
3857                 udelay(10);
3858
3859         /* Config mode; select PMA/Ch 1 regs. */
3860         tg3_writephy(tp, 0x10, 0x8411);
3861
3862         /* Enable auto-lock and comdet, select txclk for tx. */
3863         tg3_writephy(tp, 0x11, 0x0a10);
3864
3865         tg3_writephy(tp, 0x18, 0x00a0);
3866         tg3_writephy(tp, 0x16, 0x41ff);
3867
3868         /* Assert and deassert POR. */
3869         tg3_writephy(tp, 0x13, 0x0400);
3870         udelay(40);
3871         tg3_writephy(tp, 0x13, 0x0000);
3872
3873         tg3_writephy(tp, 0x11, 0x0a50);
3874         udelay(40);
3875         tg3_writephy(tp, 0x11, 0x0a10);
3876
3877         /* Wait for signal to stabilize */
3878         /* XXX schedule_timeout() ... */
3879         for (i = 0; i < 15000; i++)
3880                 udelay(10);
3881
3882         /* Deselect the channel register so we can read the PHYID
3883          * later.
3884          */
3885         tg3_writephy(tp, 0x10, 0x8011);
3886 }
3887
3888 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3889 {
3890         u16 flowctrl;
3891         u32 sg_dig_ctrl, sg_dig_status;
3892         u32 serdes_cfg, expected_sg_dig_ctrl;
3893         int workaround, port_a;
3894         int current_link_up;
3895
3896         serdes_cfg = 0;
3897         expected_sg_dig_ctrl = 0;
3898         workaround = 0;
3899         port_a = 1;
3900         current_link_up = 0;
3901
3902         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3903             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3904                 workaround = 1;
3905                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3906                         port_a = 0;
3907
3908                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3909                 /* preserve bits 20-23 for voltage regulator */
3910                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3911         }
3912
3913         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3914
3915         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3916                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3917                         if (workaround) {
3918                                 u32 val = serdes_cfg;
3919
3920                                 if (port_a)
3921                                         val |= 0xc010000;
3922                                 else
3923                                         val |= 0x4010000;
3924                                 tw32_f(MAC_SERDES_CFG, val);
3925                         }
3926
3927                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3928                 }
3929                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3930                         tg3_setup_flow_control(tp, 0, 0);
3931                         current_link_up = 1;
3932                 }
3933                 goto out;
3934         }
3935
3936         /* Want auto-negotiation.  */
3937         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3938
3939         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3940         if (flowctrl & ADVERTISE_1000XPAUSE)
3941                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3942         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3943                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3944
3945         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3946                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3947                     tp->serdes_counter &&
3948                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3949                                     MAC_STATUS_RCVD_CFG)) ==
3950                      MAC_STATUS_PCS_SYNCED)) {
3951                         tp->serdes_counter--;
3952                         current_link_up = 1;
3953                         goto out;
3954                 }
3955 restart_autoneg:
3956                 if (workaround)
3957                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3958                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3959                 udelay(5);
3960                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3961
3962                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3963                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3964         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3965                                  MAC_STATUS_SIGNAL_DET)) {
3966                 sg_dig_status = tr32(SG_DIG_STATUS);
3967                 mac_status = tr32(MAC_STATUS);
3968
3969                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3970                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3971                         u32 local_adv = 0, remote_adv = 0;
3972
3973                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3974                                 local_adv |= ADVERTISE_1000XPAUSE;
3975                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3976                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3977
3978                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3979                                 remote_adv |= LPA_1000XPAUSE;
3980                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3981                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3982
3983                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3984                         current_link_up = 1;
3985                         tp->serdes_counter = 0;
3986                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3987                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3988                         if (tp->serdes_counter)
3989                                 tp->serdes_counter--;
3990                         else {
3991                                 if (workaround) {
3992                                         u32 val = serdes_cfg;
3993
3994                                         if (port_a)
3995                                                 val |= 0xc010000;
3996                                         else
3997                                                 val |= 0x4010000;
3998
3999                                         tw32_f(MAC_SERDES_CFG, val);
4000                                 }
4001
4002                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4003                                 udelay(40);
4004
4005                                 /* Link parallel detection - link is up */
4006                                 /* only if we have PCS_SYNC and not */
4007                                 /* receiving config code words */
4008                                 mac_status = tr32(MAC_STATUS);
4009                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4010                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4011                                         tg3_setup_flow_control(tp, 0, 0);
4012                                         current_link_up = 1;
4013                                         tp->phy_flags |=
4014                                                 TG3_PHYFLG_PARALLEL_DETECT;
4015                                         tp->serdes_counter =
4016                                                 SERDES_PARALLEL_DET_TIMEOUT;
4017                                 } else
4018                                         goto restart_autoneg;
4019                         }
4020                 }
4021         } else {
4022                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4023                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4024         }
4025
4026 out:
4027         return current_link_up;
4028 }
4029
4030 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4031 {
4032         int current_link_up = 0;
4033
4034         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4035                 goto out;
4036
4037         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4038                 u32 txflags, rxflags;
4039                 int i;
4040
4041                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4042                         u32 local_adv = 0, remote_adv = 0;
4043
4044                         if (txflags & ANEG_CFG_PS1)
4045                                 local_adv |= ADVERTISE_1000XPAUSE;
4046                         if (txflags & ANEG_CFG_PS2)
4047                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4048
4049                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4050                                 remote_adv |= LPA_1000XPAUSE;
4051                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4052                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4053
4054                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4055
4056                         current_link_up = 1;
4057                 }
4058                 for (i = 0; i < 30; i++) {
4059                         udelay(20);
4060                         tw32_f(MAC_STATUS,
4061                                (MAC_STATUS_SYNC_CHANGED |
4062                                 MAC_STATUS_CFG_CHANGED));
4063                         udelay(40);
4064                         if ((tr32(MAC_STATUS) &
4065                              (MAC_STATUS_SYNC_CHANGED |
4066                               MAC_STATUS_CFG_CHANGED)) == 0)
4067                                 break;
4068                 }
4069
4070                 mac_status = tr32(MAC_STATUS);
4071                 if (current_link_up == 0 &&
4072                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4073                     !(mac_status & MAC_STATUS_RCVD_CFG))
4074                         current_link_up = 1;
4075         } else {
4076                 tg3_setup_flow_control(tp, 0, 0);
4077
4078                 /* Forcing 1000FD link up. */
4079                 current_link_up = 1;
4080
4081                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4082                 udelay(40);
4083
4084                 tw32_f(MAC_MODE, tp->mac_mode);
4085                 udelay(40);
4086         }
4087
4088 out:
4089         return current_link_up;
4090 }
4091
4092 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4093 {
4094         u32 orig_pause_cfg;
4095         u16 orig_active_speed;
4096         u8 orig_active_duplex;
4097         u32 mac_status;
4098         int current_link_up;
4099         int i;
4100
4101         orig_pause_cfg = tp->link_config.active_flowctrl;
4102         orig_active_speed = tp->link_config.active_speed;
4103         orig_active_duplex = tp->link_config.active_duplex;
4104
4105         if (!tg3_flag(tp, HW_AUTONEG) &&
4106             netif_carrier_ok(tp->dev) &&
4107             tg3_flag(tp, INIT_COMPLETE)) {
4108                 mac_status = tr32(MAC_STATUS);
4109                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4110                                MAC_STATUS_SIGNAL_DET |
4111                                MAC_STATUS_CFG_CHANGED |
4112                                MAC_STATUS_RCVD_CFG);
4113                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4114                                    MAC_STATUS_SIGNAL_DET)) {
4115                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4116                                             MAC_STATUS_CFG_CHANGED));
4117                         return 0;
4118                 }
4119         }
4120
4121         tw32_f(MAC_TX_AUTO_NEG, 0);
4122
4123         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4124         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4125         tw32_f(MAC_MODE, tp->mac_mode);
4126         udelay(40);
4127
4128         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4129                 tg3_init_bcm8002(tp);
4130
4131         /* Enable link change event even when serdes polling.  */
4132         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4133         udelay(40);
4134
4135         current_link_up = 0;
4136         mac_status = tr32(MAC_STATUS);
4137
4138         if (tg3_flag(tp, HW_AUTONEG))
4139                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4140         else
4141                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4142
4143         tp->napi[0].hw_status->status =
4144                 (SD_STATUS_UPDATED |
4145                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4146
4147         for (i = 0; i < 100; i++) {
4148                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4149                                     MAC_STATUS_CFG_CHANGED));
4150                 udelay(5);
4151                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4152                                          MAC_STATUS_CFG_CHANGED |
4153                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4154                         break;
4155         }
4156
4157         mac_status = tr32(MAC_STATUS);
4158         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4159                 current_link_up = 0;
4160                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4161                     tp->serdes_counter == 0) {
4162                         tw32_f(MAC_MODE, (tp->mac_mode |
4163                                           MAC_MODE_SEND_CONFIGS));
4164                         udelay(1);
4165                         tw32_f(MAC_MODE, tp->mac_mode);
4166                 }
4167         }
4168
4169         if (current_link_up == 1) {
4170                 tp->link_config.active_speed = SPEED_1000;
4171                 tp->link_config.active_duplex = DUPLEX_FULL;
4172                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4173                                     LED_CTRL_LNKLED_OVERRIDE |
4174                                     LED_CTRL_1000MBPS_ON));
4175         } else {
4176                 tp->link_config.active_speed = SPEED_INVALID;
4177                 tp->link_config.active_duplex = DUPLEX_INVALID;
4178                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4179                                     LED_CTRL_LNKLED_OVERRIDE |
4180                                     LED_CTRL_TRAFFIC_OVERRIDE));
4181         }
4182
4183         if (current_link_up != netif_carrier_ok(tp->dev)) {
4184                 if (current_link_up)
4185                         netif_carrier_on(tp->dev);
4186                 else
4187                         netif_carrier_off(tp->dev);
4188                 tg3_link_report(tp);
4189         } else {
4190                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4191                 if (orig_pause_cfg != now_pause_cfg ||
4192                     orig_active_speed != tp->link_config.active_speed ||
4193                     orig_active_duplex != tp->link_config.active_duplex)
4194                         tg3_link_report(tp);
4195         }
4196
4197         return 0;
4198 }
4199
4200 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4201 {
4202         int current_link_up, err = 0;
4203         u32 bmsr, bmcr;
4204         u16 current_speed;
4205         u8 current_duplex;
4206         u32 local_adv, remote_adv;
4207
4208         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4209         tw32_f(MAC_MODE, tp->mac_mode);
4210         udelay(40);
4211
4212         tw32(MAC_EVENT, 0);
4213
4214         tw32_f(MAC_STATUS,
4215              (MAC_STATUS_SYNC_CHANGED |
4216               MAC_STATUS_CFG_CHANGED |
4217               MAC_STATUS_MI_COMPLETION |
4218               MAC_STATUS_LNKSTATE_CHANGED));
4219         udelay(40);
4220
4221         if (force_reset)
4222                 tg3_phy_reset(tp);
4223
4224         current_link_up = 0;
4225         current_speed = SPEED_INVALID;
4226         current_duplex = DUPLEX_INVALID;
4227
4228         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4229         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4231                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4232                         bmsr |= BMSR_LSTATUS;
4233                 else
4234                         bmsr &= ~BMSR_LSTATUS;
4235         }
4236
4237         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4238
4239         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4240             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4241                 /* do nothing, just check for link up at the end */
4242         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4243                 u32 adv, new_adv;
4244
4245                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4246                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4247                                   ADVERTISE_1000XPAUSE |
4248                                   ADVERTISE_1000XPSE_ASYM |
4249                                   ADVERTISE_SLCT);
4250
4251                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4252
4253                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4254                         new_adv |= ADVERTISE_1000XHALF;
4255                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4256                         new_adv |= ADVERTISE_1000XFULL;
4257
4258                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4259                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4260                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4261                         tg3_writephy(tp, MII_BMCR, bmcr);
4262
4263                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4264                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4265                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4266
4267                         return err;
4268                 }
4269         } else {
4270                 u32 new_bmcr;
4271
4272                 bmcr &= ~BMCR_SPEED1000;
4273                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4274
4275                 if (tp->link_config.duplex == DUPLEX_FULL)
4276                         new_bmcr |= BMCR_FULLDPLX;
4277
4278                 if (new_bmcr != bmcr) {
4279                         /* BMCR_SPEED1000 is a reserved bit that needs
4280                          * to be set on write.
4281                          */
4282                         new_bmcr |= BMCR_SPEED1000;
4283
4284                         /* Force a linkdown */
4285                         if (netif_carrier_ok(tp->dev)) {
4286                                 u32 adv;
4287
4288                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4289                                 adv &= ~(ADVERTISE_1000XFULL |
4290                                          ADVERTISE_1000XHALF |
4291                                          ADVERTISE_SLCT);
4292                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4293                                 tg3_writephy(tp, MII_BMCR, bmcr |
4294                                                            BMCR_ANRESTART |
4295                                                            BMCR_ANENABLE);
4296                                 udelay(10);
4297                                 netif_carrier_off(tp->dev);
4298                         }
4299                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4300                         bmcr = new_bmcr;
4301                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4302                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4303                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4304                             ASIC_REV_5714) {
4305                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4306                                         bmsr |= BMSR_LSTATUS;
4307                                 else
4308                                         bmsr &= ~BMSR_LSTATUS;
4309                         }
4310                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4311                 }
4312         }
4313
4314         if (bmsr & BMSR_LSTATUS) {
4315                 current_speed = SPEED_1000;
4316                 current_link_up = 1;
4317                 if (bmcr & BMCR_FULLDPLX)
4318                         current_duplex = DUPLEX_FULL;
4319                 else
4320                         current_duplex = DUPLEX_HALF;
4321
4322                 local_adv = 0;
4323                 remote_adv = 0;
4324
4325                 if (bmcr & BMCR_ANENABLE) {
4326                         u32 common;
4327
4328                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4329                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4330                         common = local_adv & remote_adv;
4331                         if (common & (ADVERTISE_1000XHALF |
4332                                       ADVERTISE_1000XFULL)) {
4333                                 if (common & ADVERTISE_1000XFULL)
4334                                         current_duplex = DUPLEX_FULL;
4335                                 else
4336                                         current_duplex = DUPLEX_HALF;
4337                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4338                                 /* Link is up via parallel detect */
4339                         } else {
4340                                 current_link_up = 0;
4341                         }
4342                 }
4343         }
4344
4345         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4346                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4347
4348         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4349         if (tp->link_config.active_duplex == DUPLEX_HALF)
4350                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4351
4352         tw32_f(MAC_MODE, tp->mac_mode);
4353         udelay(40);
4354
4355         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4356
4357         tp->link_config.active_speed = current_speed;
4358         tp->link_config.active_duplex = current_duplex;
4359
4360         if (current_link_up != netif_carrier_ok(tp->dev)) {
4361                 if (current_link_up)
4362                         netif_carrier_on(tp->dev);
4363                 else {
4364                         netif_carrier_off(tp->dev);
4365                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4366                 }
4367                 tg3_link_report(tp);
4368         }
4369         return err;
4370 }
4371
4372 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4373 {
4374         if (tp->serdes_counter) {
4375                 /* Give autoneg time to complete. */
4376                 tp->serdes_counter--;
4377                 return;
4378         }
4379
4380         if (!netif_carrier_ok(tp->dev) &&
4381             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4382                 u32 bmcr;
4383
4384                 tg3_readphy(tp, MII_BMCR, &bmcr);
4385                 if (bmcr & BMCR_ANENABLE) {
4386                         u32 phy1, phy2;
4387
4388                         /* Select shadow register 0x1f */
4389                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4390                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4391
4392                         /* Select expansion interrupt status register */
4393                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4394                                          MII_TG3_DSP_EXP1_INT_STAT);
4395                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4396                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4397
4398                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4399                                 /* We have signal detect and not receiving
4400                                  * config code words, link is up by parallel
4401                                  * detection.
4402                                  */
4403
4404                                 bmcr &= ~BMCR_ANENABLE;
4405                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4406                                 tg3_writephy(tp, MII_BMCR, bmcr);
4407                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4408                         }
4409                 }
4410         } else if (netif_carrier_ok(tp->dev) &&
4411                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4412                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4413                 u32 phy2;
4414
4415                 /* Select expansion interrupt status register */
4416                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4417                                  MII_TG3_DSP_EXP1_INT_STAT);
4418                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4419                 if (phy2 & 0x20) {
4420                         u32 bmcr;
4421
4422                         /* Config code words received, turn on autoneg. */
4423                         tg3_readphy(tp, MII_BMCR, &bmcr);
4424                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4425
4426                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4427
4428                 }
4429         }
4430 }
4431
4432 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4433 {
4434         u32 val;
4435         int err;
4436
4437         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4438                 err = tg3_setup_fiber_phy(tp, force_reset);
4439         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4440                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4441         else
4442                 err = tg3_setup_copper_phy(tp, force_reset);
4443
4444         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4445                 u32 scale;
4446
4447                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4448                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4449                         scale = 65;
4450                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4451                         scale = 6;
4452                 else
4453                         scale = 12;
4454
4455                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4456                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4457                 tw32(GRC_MISC_CFG, val);
4458         }
4459
4460         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4461               (6 << TX_LENGTHS_IPG_SHIFT);
4462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4463                 val |= tr32(MAC_TX_LENGTHS) &
4464                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4465                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4466
4467         if (tp->link_config.active_speed == SPEED_1000 &&
4468             tp->link_config.active_duplex == DUPLEX_HALF)
4469                 tw32(MAC_TX_LENGTHS, val |
4470                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4471         else
4472                 tw32(MAC_TX_LENGTHS, val |
4473                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4474
4475         if (!tg3_flag(tp, 5705_PLUS)) {
4476                 if (netif_carrier_ok(tp->dev)) {
4477                         tw32(HOSTCC_STAT_COAL_TICKS,
4478                              tp->coal.stats_block_coalesce_usecs);
4479                 } else {
4480                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4481                 }
4482         }
4483
4484         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4485                 val = tr32(PCIE_PWR_MGMT_THRESH);
4486                 if (!netif_carrier_ok(tp->dev))
4487                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4488                               tp->pwrmgmt_thresh;
4489                 else
4490                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4491                 tw32(PCIE_PWR_MGMT_THRESH, val);
4492         }
4493
4494         return err;
4495 }
4496
4497 static inline int tg3_irq_sync(struct tg3 *tp)
4498 {
4499         return tp->irq_sync;
4500 }
4501
4502 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4503 {
4504         int i;
4505
4506         dst = (u32 *)((u8 *)dst + off);
4507         for (i = 0; i < len; i += sizeof(u32))
4508                 *dst++ = tr32(off + i);
4509 }
4510
4511 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4512 {
4513         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4514         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4515         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4516         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4517         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4518         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4519         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4520         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4521         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4522         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4523         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4524         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4525         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4526         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4527         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4528         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4529         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4530         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4531         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4532
4533         if (tg3_flag(tp, SUPPORT_MSIX))
4534                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4535
4536         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4537         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4538         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4539         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4540         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4541         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4542         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4543         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4544
4545         if (!tg3_flag(tp, 5705_PLUS)) {
4546                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4547                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4548                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4549         }
4550
4551         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4552         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4553         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4554         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4555         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4556
4557         if (tg3_flag(tp, NVRAM))
4558                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4559 }
4560
4561 static void tg3_dump_state(struct tg3 *tp)
4562 {
4563         int i;
4564         u32 *regs;
4565
4566         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4567         if (!regs) {
4568                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4569                 return;
4570         }
4571
4572         if (tg3_flag(tp, PCI_EXPRESS)) {
4573                 /* Read up to but not including private PCI registers */
4574                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4575                         regs[i / sizeof(u32)] = tr32(i);
4576         } else
4577                 tg3_dump_legacy_regs(tp, regs);
4578
4579         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4580                 if (!regs[i + 0] && !regs[i + 1] &&
4581                     !regs[i + 2] && !regs[i + 3])
4582                         continue;
4583
4584                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4585                            i * 4,
4586                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4587         }
4588
4589         kfree(regs);
4590
4591         for (i = 0; i < tp->irq_cnt; i++) {
4592                 struct tg3_napi *tnapi = &tp->napi[i];
4593
4594                 /* SW status block */
4595                 netdev_err(tp->dev,
4596                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4597                            i,
4598                            tnapi->hw_status->status,
4599                            tnapi->hw_status->status_tag,
4600                            tnapi->hw_status->rx_jumbo_consumer,
4601                            tnapi->hw_status->rx_consumer,
4602                            tnapi->hw_status->rx_mini_consumer,
4603                            tnapi->hw_status->idx[0].rx_producer,
4604                            tnapi->hw_status->idx[0].tx_consumer);
4605
4606                 netdev_err(tp->dev,
4607                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4608                            i,
4609                            tnapi->last_tag, tnapi->last_irq_tag,
4610                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4611                            tnapi->rx_rcb_ptr,
4612                            tnapi->prodring.rx_std_prod_idx,
4613                            tnapi->prodring.rx_std_cons_idx,
4614                            tnapi->prodring.rx_jmb_prod_idx,
4615                            tnapi->prodring.rx_jmb_cons_idx);
4616         }
4617 }
4618
4619 /* This is called whenever we suspect that the system chipset is re-
4620  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4621  * is bogus tx completions. We try to recover by setting the
4622  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4623  * in the workqueue.
4624  */
4625 static void tg3_tx_recover(struct tg3 *tp)
4626 {
4627         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4628                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4629
4630         netdev_warn(tp->dev,
4631                     "The system may be re-ordering memory-mapped I/O "
4632                     "cycles to the network device, attempting to recover. "
4633                     "Please report the problem to the driver maintainer "
4634                     "and include system chipset information.\n");
4635
4636         spin_lock(&tp->lock);
4637         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4638         spin_unlock(&tp->lock);
4639 }
4640
4641 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4642 {
4643         /* Tell compiler to fetch tx indices from memory. */
4644         barrier();
4645         return tnapi->tx_pending -
4646                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4647 }
4648
4649 /* Tigon3 never reports partial packet sends.  So we do not
4650  * need special logic to handle SKBs that have not had all
4651  * of their frags sent yet, like SunGEM does.
4652  */
4653 static void tg3_tx(struct tg3_napi *tnapi)
4654 {
4655         struct tg3 *tp = tnapi->tp;
4656         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4657         u32 sw_idx = tnapi->tx_cons;
4658         struct netdev_queue *txq;
4659         int index = tnapi - tp->napi;
4660
4661         if (tg3_flag(tp, ENABLE_TSS))
4662                 index--;
4663
4664         txq = netdev_get_tx_queue(tp->dev, index);
4665
4666         while (sw_idx != hw_idx) {
4667                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4668                 struct sk_buff *skb = ri->skb;
4669                 int i, tx_bug = 0;
4670
4671                 if (unlikely(skb == NULL)) {
4672                         tg3_tx_recover(tp);
4673                         return;
4674                 }
4675
4676                 pci_unmap_single(tp->pdev,
4677                                  dma_unmap_addr(ri, mapping),
4678                                  skb_headlen(skb),
4679                                  PCI_DMA_TODEVICE);
4680
4681                 ri->skb = NULL;
4682
4683                 sw_idx = NEXT_TX(sw_idx);
4684
4685                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4686                         ri = &tnapi->tx_buffers[sw_idx];
4687                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4688                                 tx_bug = 1;
4689
4690                         pci_unmap_page(tp->pdev,
4691                                        dma_unmap_addr(ri, mapping),
4692                                        skb_shinfo(skb)->frags[i].size,
4693                                        PCI_DMA_TODEVICE);
4694                         sw_idx = NEXT_TX(sw_idx);
4695                 }
4696
4697                 dev_kfree_skb(skb);
4698
4699                 if (unlikely(tx_bug)) {
4700                         tg3_tx_recover(tp);
4701                         return;
4702                 }
4703         }
4704
4705         tnapi->tx_cons = sw_idx;
4706
4707         /* Need to make the tx_cons update visible to tg3_start_xmit()
4708          * before checking for netif_queue_stopped().  Without the
4709          * memory barrier, there is a small possibility that tg3_start_xmit()
4710          * will miss it and cause the queue to be stopped forever.
4711          */
4712         smp_mb();
4713
4714         if (unlikely(netif_tx_queue_stopped(txq) &&
4715                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4716                 __netif_tx_lock(txq, smp_processor_id());
4717                 if (netif_tx_queue_stopped(txq) &&
4718                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4719                         netif_tx_wake_queue(txq);
4720                 __netif_tx_unlock(txq);
4721         }
4722 }
4723
4724 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4725 {
4726         if (!ri->skb)
4727                 return;
4728
4729         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4730                          map_sz, PCI_DMA_FROMDEVICE);
4731         dev_kfree_skb_any(ri->skb);
4732         ri->skb = NULL;
4733 }
4734
4735 /* Returns size of skb allocated or < 0 on error.
4736  *
4737  * We only need to fill in the address because the other members
4738  * of the RX descriptor are invariant, see tg3_init_rings.
4739  *
4740  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4741  * posting buffers we only dirty the first cache line of the RX
4742  * descriptor (containing the address).  Whereas for the RX status
4743  * buffers the cpu only reads the last cacheline of the RX descriptor
4744  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4745  */
4746 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4747                             u32 opaque_key, u32 dest_idx_unmasked)
4748 {
4749         struct tg3_rx_buffer_desc *desc;
4750         struct ring_info *map;
4751         struct sk_buff *skb;
4752         dma_addr_t mapping;
4753         int skb_size, dest_idx;
4754
4755         switch (opaque_key) {
4756         case RXD_OPAQUE_RING_STD:
4757                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4758                 desc = &tpr->rx_std[dest_idx];
4759                 map = &tpr->rx_std_buffers[dest_idx];
4760                 skb_size = tp->rx_pkt_map_sz;
4761                 break;
4762
4763         case RXD_OPAQUE_RING_JUMBO:
4764                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4765                 desc = &tpr->rx_jmb[dest_idx].std;
4766                 map = &tpr->rx_jmb_buffers[dest_idx];
4767                 skb_size = TG3_RX_JMB_MAP_SZ;
4768                 break;
4769
4770         default:
4771                 return -EINVAL;
4772         }
4773
4774         /* Do not overwrite any of the map or rp information
4775          * until we are sure we can commit to a new buffer.
4776          *
4777          * Callers depend upon this behavior and assume that
4778          * we leave everything unchanged if we fail.
4779          */
4780         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4781         if (skb == NULL)
4782                 return -ENOMEM;
4783
4784         skb_reserve(skb, tp->rx_offset);
4785
4786         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4787                                  PCI_DMA_FROMDEVICE);
4788         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4789                 dev_kfree_skb(skb);
4790                 return -EIO;
4791         }
4792
4793         map->skb = skb;
4794         dma_unmap_addr_set(map, mapping, mapping);
4795
4796         desc->addr_hi = ((u64)mapping >> 32);
4797         desc->addr_lo = ((u64)mapping & 0xffffffff);
4798
4799         return skb_size;
4800 }
4801
4802 /* We only need to move over in the address because the other
4803  * members of the RX descriptor are invariant.  See notes above
4804  * tg3_alloc_rx_skb for full details.
4805  */
4806 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4807                            struct tg3_rx_prodring_set *dpr,
4808                            u32 opaque_key, int src_idx,
4809                            u32 dest_idx_unmasked)
4810 {
4811         struct tg3 *tp = tnapi->tp;
4812         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4813         struct ring_info *src_map, *dest_map;
4814         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4815         int dest_idx;
4816
4817         switch (opaque_key) {
4818         case RXD_OPAQUE_RING_STD:
4819                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4820                 dest_desc = &dpr->rx_std[dest_idx];
4821                 dest_map = &dpr->rx_std_buffers[dest_idx];
4822                 src_desc = &spr->rx_std[src_idx];
4823                 src_map = &spr->rx_std_buffers[src_idx];
4824                 break;
4825
4826         case RXD_OPAQUE_RING_JUMBO:
4827                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4828                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4829                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4830                 src_desc = &spr->rx_jmb[src_idx].std;
4831                 src_map = &spr->rx_jmb_buffers[src_idx];
4832                 break;
4833
4834         default:
4835                 return;
4836         }
4837
4838         dest_map->skb = src_map->skb;
4839         dma_unmap_addr_set(dest_map, mapping,
4840                            dma_unmap_addr(src_map, mapping));
4841         dest_desc->addr_hi = src_desc->addr_hi;
4842         dest_desc->addr_lo = src_desc->addr_lo;
4843
4844         /* Ensure that the update to the skb happens after the physical
4845          * addresses have been transferred to the new BD location.
4846          */
4847         smp_wmb();
4848
4849         src_map->skb = NULL;
4850 }
4851
4852 /* The RX ring scheme is composed of multiple rings which post fresh
4853  * buffers to the chip, and one special ring the chip uses to report
4854  * status back to the host.
4855  *
4856  * The special ring reports the status of received packets to the
4857  * host.  The chip does not write into the original descriptor the
4858  * RX buffer was obtained from.  The chip simply takes the original
4859  * descriptor as provided by the host, updates the status and length
4860  * field, then writes this into the next status ring entry.
4861  *
4862  * Each ring the host uses to post buffers to the chip is described
4863  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4864  * it is first placed into the on-chip ram.  When the packet's length
4865  * is known, it walks down the TG3_BDINFO entries to select the ring.
4866  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4867  * which is within the range of the new packet's length is chosen.
4868  *
4869  * The "separate ring for rx status" scheme may sound queer, but it makes
4870  * sense from a cache coherency perspective.  If only the host writes
4871  * to the buffer post rings, and only the chip writes to the rx status
4872  * rings, then cache lines never move beyond shared-modified state.
4873  * If both the host and chip were to write into the same ring, cache line
4874  * eviction could occur since both entities want it in an exclusive state.
4875  */
4876 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4877 {
4878         struct tg3 *tp = tnapi->tp;
4879         u32 work_mask, rx_std_posted = 0;
4880         u32 std_prod_idx, jmb_prod_idx;
4881         u32 sw_idx = tnapi->rx_rcb_ptr;
4882         u16 hw_idx;
4883         int received;
4884         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4885
4886         hw_idx = *(tnapi->rx_rcb_prod_idx);
4887         /*
4888          * We need to order the read of hw_idx and the read of
4889          * the opaque cookie.
4890          */
4891         rmb();
4892         work_mask = 0;
4893         received = 0;
4894         std_prod_idx = tpr->rx_std_prod_idx;
4895         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4896         while (sw_idx != hw_idx && budget > 0) {
4897                 struct ring_info *ri;
4898                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4899                 unsigned int len;
4900                 struct sk_buff *skb;
4901                 dma_addr_t dma_addr;
4902                 u32 opaque_key, desc_idx, *post_ptr;
4903
4904                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4905                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4906                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4907                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4908                         dma_addr = dma_unmap_addr(ri, mapping);
4909                         skb = ri->skb;
4910                         post_ptr = &std_prod_idx;
4911                         rx_std_posted++;
4912                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4913                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4914                         dma_addr = dma_unmap_addr(ri, mapping);
4915                         skb = ri->skb;
4916                         post_ptr = &jmb_prod_idx;
4917                 } else
4918                         goto next_pkt_nopost;
4919
4920                 work_mask |= opaque_key;
4921
4922                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4923                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4924                 drop_it:
4925                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4926                                        desc_idx, *post_ptr);
4927                 drop_it_no_recycle:
4928                         /* Other statistics kept track of by card. */
4929                         tp->rx_dropped++;
4930                         goto next_pkt;
4931                 }
4932
4933                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4934                       ETH_FCS_LEN;
4935
4936                 if (len > TG3_RX_COPY_THRESH(tp)) {
4937                         int skb_size;
4938
4939                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4940                                                     *post_ptr);
4941                         if (skb_size < 0)
4942                                 goto drop_it;
4943
4944                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4945                                          PCI_DMA_FROMDEVICE);
4946
4947                         /* Ensure that the update to the skb happens
4948                          * after the usage of the old DMA mapping.
4949                          */
4950                         smp_wmb();
4951
4952                         ri->skb = NULL;
4953
4954                         skb_put(skb, len);
4955                 } else {
4956                         struct sk_buff *copy_skb;
4957
4958                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4959                                        desc_idx, *post_ptr);
4960
4961                         copy_skb = netdev_alloc_skb(tp->dev, len +
4962                                                     TG3_RAW_IP_ALIGN);
4963                         if (copy_skb == NULL)
4964                                 goto drop_it_no_recycle;
4965
4966                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4967                         skb_put(copy_skb, len);
4968                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4969                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4970                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4971
4972                         /* We'll reuse the original ring buffer. */
4973                         skb = copy_skb;
4974                 }
4975
4976                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4977                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4978                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4979                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4980                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4981                 else
4982                         skb_checksum_none_assert(skb);
4983
4984                 skb->protocol = eth_type_trans(skb, tp->dev);
4985
4986                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4987                     skb->protocol != htons(ETH_P_8021Q)) {
4988                         dev_kfree_skb(skb);
4989                         goto drop_it_no_recycle;
4990                 }
4991
4992                 if (desc->type_flags & RXD_FLAG_VLAN &&
4993                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4994                         __vlan_hwaccel_put_tag(skb,
4995                                                desc->err_vlan & RXD_VLAN_MASK);
4996
4997                 napi_gro_receive(&tnapi->napi, skb);
4998
4999                 received++;
5000                 budget--;
5001
5002 next_pkt:
5003                 (*post_ptr)++;
5004
5005                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5006                         tpr->rx_std_prod_idx = std_prod_idx &
5007                                                tp->rx_std_ring_mask;
5008                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5009                                      tpr->rx_std_prod_idx);
5010                         work_mask &= ~RXD_OPAQUE_RING_STD;
5011                         rx_std_posted = 0;
5012                 }
5013 next_pkt_nopost:
5014                 sw_idx++;
5015                 sw_idx &= tp->rx_ret_ring_mask;
5016
5017                 /* Refresh hw_idx to see if there is new work */
5018                 if (sw_idx == hw_idx) {
5019                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5020                         rmb();
5021                 }
5022         }
5023
5024         /* ACK the status ring. */
5025         tnapi->rx_rcb_ptr = sw_idx;
5026         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5027
5028         /* Refill RX ring(s). */
5029         if (!tg3_flag(tp, ENABLE_RSS)) {
5030                 if (work_mask & RXD_OPAQUE_RING_STD) {
5031                         tpr->rx_std_prod_idx = std_prod_idx &
5032                                                tp->rx_std_ring_mask;
5033                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5034                                      tpr->rx_std_prod_idx);
5035                 }
5036                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5037                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5038                                                tp->rx_jmb_ring_mask;
5039                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5040                                      tpr->rx_jmb_prod_idx);
5041                 }
5042                 mmiowb();
5043         } else if (work_mask) {
5044                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5045                  * updated before the producer indices can be updated.
5046                  */
5047                 smp_wmb();
5048
5049                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5050                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5051
5052                 if (tnapi != &tp->napi[1])
5053                         napi_schedule(&tp->napi[1].napi);
5054         }
5055
5056         return received;
5057 }
5058
5059 static void tg3_poll_link(struct tg3 *tp)
5060 {
5061         /* handle link change and other phy events */
5062         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5063                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5064
5065                 if (sblk->status & SD_STATUS_LINK_CHG) {
5066                         sblk->status = SD_STATUS_UPDATED |
5067                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5068                         spin_lock(&tp->lock);
5069                         if (tg3_flag(tp, USE_PHYLIB)) {
5070                                 tw32_f(MAC_STATUS,
5071                                      (MAC_STATUS_SYNC_CHANGED |
5072                                       MAC_STATUS_CFG_CHANGED |
5073                                       MAC_STATUS_MI_COMPLETION |
5074                                       MAC_STATUS_LNKSTATE_CHANGED));
5075                                 udelay(40);
5076                         } else
5077                                 tg3_setup_phy(tp, 0);
5078                         spin_unlock(&tp->lock);
5079                 }
5080         }
5081 }
5082
5083 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5084                                 struct tg3_rx_prodring_set *dpr,
5085                                 struct tg3_rx_prodring_set *spr)
5086 {
5087         u32 si, di, cpycnt, src_prod_idx;
5088         int i, err = 0;
5089
5090         while (1) {
5091                 src_prod_idx = spr->rx_std_prod_idx;
5092
5093                 /* Make sure updates to the rx_std_buffers[] entries and the
5094                  * standard producer index are seen in the correct order.
5095                  */
5096                 smp_rmb();
5097
5098                 if (spr->rx_std_cons_idx == src_prod_idx)
5099                         break;
5100
5101                 if (spr->rx_std_cons_idx < src_prod_idx)
5102                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5103                 else
5104                         cpycnt = tp->rx_std_ring_mask + 1 -
5105                                  spr->rx_std_cons_idx;
5106
5107                 cpycnt = min(cpycnt,
5108                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5109
5110                 si = spr->rx_std_cons_idx;
5111                 di = dpr->rx_std_prod_idx;
5112
5113                 for (i = di; i < di + cpycnt; i++) {
5114                         if (dpr->rx_std_buffers[i].skb) {
5115                                 cpycnt = i - di;
5116                                 err = -ENOSPC;
5117                                 break;
5118                         }
5119                 }
5120
5121                 if (!cpycnt)
5122                         break;
5123
5124                 /* Ensure that updates to the rx_std_buffers ring and the
5125                  * shadowed hardware producer ring from tg3_recycle_skb() are
5126                  * ordered correctly WRT the skb check above.
5127                  */
5128                 smp_rmb();
5129
5130                 memcpy(&dpr->rx_std_buffers[di],
5131                        &spr->rx_std_buffers[si],
5132                        cpycnt * sizeof(struct ring_info));
5133
5134                 for (i = 0; i < cpycnt; i++, di++, si++) {
5135                         struct tg3_rx_buffer_desc *sbd, *dbd;
5136                         sbd = &spr->rx_std[si];
5137                         dbd = &dpr->rx_std[di];
5138                         dbd->addr_hi = sbd->addr_hi;
5139                         dbd->addr_lo = sbd->addr_lo;
5140                 }
5141
5142                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5143                                        tp->rx_std_ring_mask;
5144                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5145                                        tp->rx_std_ring_mask;
5146         }
5147
5148         while (1) {
5149                 src_prod_idx = spr->rx_jmb_prod_idx;
5150
5151                 /* Make sure updates to the rx_jmb_buffers[] entries and
5152                  * the jumbo producer index are seen in the correct order.
5153                  */
5154                 smp_rmb();
5155
5156                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5157                         break;
5158
5159                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5160                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5161                 else
5162                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5163                                  spr->rx_jmb_cons_idx;
5164
5165                 cpycnt = min(cpycnt,
5166                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5167
5168                 si = spr->rx_jmb_cons_idx;
5169                 di = dpr->rx_jmb_prod_idx;
5170
5171                 for (i = di; i < di + cpycnt; i++) {
5172                         if (dpr->rx_jmb_buffers[i].skb) {
5173                                 cpycnt = i - di;
5174                                 err = -ENOSPC;
5175                                 break;
5176                         }
5177                 }
5178
5179                 if (!cpycnt)
5180                         break;
5181
5182                 /* Ensure that updates to the rx_jmb_buffers ring and the
5183                  * shadowed hardware producer ring from tg3_recycle_skb() are
5184                  * ordered correctly WRT the skb check above.
5185                  */
5186                 smp_rmb();
5187
5188                 memcpy(&dpr->rx_jmb_buffers[di],
5189                        &spr->rx_jmb_buffers[si],
5190                        cpycnt * sizeof(struct ring_info));
5191
5192                 for (i = 0; i < cpycnt; i++, di++, si++) {
5193                         struct tg3_rx_buffer_desc *sbd, *dbd;
5194                         sbd = &spr->rx_jmb[si].std;
5195                         dbd = &dpr->rx_jmb[di].std;
5196                         dbd->addr_hi = sbd->addr_hi;
5197                         dbd->addr_lo = sbd->addr_lo;
5198                 }
5199
5200                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5201                                        tp->rx_jmb_ring_mask;
5202                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5203                                        tp->rx_jmb_ring_mask;
5204         }
5205
5206         return err;
5207 }
5208
5209 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5210 {
5211         struct tg3 *tp = tnapi->tp;
5212
5213         /* run TX completion thread */
5214         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5215                 tg3_tx(tnapi);
5216                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5217                         return work_done;
5218         }
5219
5220         /* run RX thread, within the bounds set by NAPI.
5221          * All RX "locking" is done by ensuring outside
5222          * code synchronizes with tg3->napi.poll()
5223          */
5224         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5225                 work_done += tg3_rx(tnapi, budget - work_done);
5226
5227         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5228                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5229                 int i, err = 0;
5230                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5231                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5232
5233                 for (i = 1; i < tp->irq_cnt; i++)
5234                         err |= tg3_rx_prodring_xfer(tp, dpr,
5235                                                     &tp->napi[i].prodring);
5236
5237                 wmb();
5238
5239                 if (std_prod_idx != dpr->rx_std_prod_idx)
5240                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5241                                      dpr->rx_std_prod_idx);
5242
5243                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5244                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5245                                      dpr->rx_jmb_prod_idx);
5246
5247                 mmiowb();
5248
5249                 if (err)
5250                         tw32_f(HOSTCC_MODE, tp->coal_now);
5251         }
5252
5253         return work_done;
5254 }
5255
5256 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5257 {
5258         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5259         struct tg3 *tp = tnapi->tp;
5260         int work_done = 0;
5261         struct tg3_hw_status *sblk = tnapi->hw_status;
5262
5263         while (1) {
5264                 work_done = tg3_poll_work(tnapi, work_done, budget);
5265
5266                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5267                         goto tx_recovery;
5268
5269                 if (unlikely(work_done >= budget))
5270                         break;
5271
5272                 /* tp->last_tag is used in tg3_int_reenable() below
5273                  * to tell the hw how much work has been processed,
5274                  * so we must read it before checking for more work.
5275                  */
5276                 tnapi->last_tag = sblk->status_tag;
5277                 tnapi->last_irq_tag = tnapi->last_tag;
5278                 rmb();
5279
5280                 /* check for RX/TX work to do */
5281                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5282                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5283                         napi_complete(napi);
5284                         /* Reenable interrupts. */
5285                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5286                         mmiowb();
5287                         break;
5288                 }
5289         }
5290
5291         return work_done;
5292
5293 tx_recovery:
5294         /* work_done is guaranteed to be less than budget. */
5295         napi_complete(napi);
5296         schedule_work(&tp->reset_task);
5297         return work_done;
5298 }
5299
5300 static void tg3_process_error(struct tg3 *tp)
5301 {
5302         u32 val;
5303         bool real_error = false;
5304
5305         if (tg3_flag(tp, ERROR_PROCESSED))
5306                 return;
5307
5308         /* Check Flow Attention register */
5309         val = tr32(HOSTCC_FLOW_ATTN);
5310         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5311                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5312                 real_error = true;
5313         }
5314
5315         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5316                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5317                 real_error = true;
5318         }
5319
5320         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5321                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5322                 real_error = true;
5323         }
5324
5325         if (!real_error)
5326                 return;
5327
5328         tg3_dump_state(tp);
5329
5330         tg3_flag_set(tp, ERROR_PROCESSED);
5331         schedule_work(&tp->reset_task);
5332 }
5333
5334 static int tg3_poll(struct napi_struct *napi, int budget)
5335 {
5336         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5337         struct tg3 *tp = tnapi->tp;
5338         int work_done = 0;
5339         struct tg3_hw_status *sblk = tnapi->hw_status;
5340
5341         while (1) {
5342                 if (sblk->status & SD_STATUS_ERROR)
5343                         tg3_process_error(tp);
5344
5345                 tg3_poll_link(tp);
5346
5347                 work_done = tg3_poll_work(tnapi, work_done, budget);
5348
5349                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5350                         goto tx_recovery;
5351
5352                 if (unlikely(work_done >= budget))
5353                         break;
5354
5355                 if (tg3_flag(tp, TAGGED_STATUS)) {
5356                         /* tp->last_tag is used in tg3_int_reenable() below
5357                          * to tell the hw how much work has been processed,
5358                          * so we must read it before checking for more work.
5359                          */
5360                         tnapi->last_tag = sblk->status_tag;
5361                         tnapi->last_irq_tag = tnapi->last_tag;
5362                         rmb();
5363                 } else
5364                         sblk->status &= ~SD_STATUS_UPDATED;
5365
5366                 if (likely(!tg3_has_work(tnapi))) {
5367                         napi_complete(napi);
5368                         tg3_int_reenable(tnapi);
5369                         break;
5370                 }
5371         }
5372
5373         return work_done;
5374
5375 tx_recovery:
5376         /* work_done is guaranteed to be less than budget. */
5377         napi_complete(napi);
5378         schedule_work(&tp->reset_task);
5379         return work_done;
5380 }
5381
5382 static void tg3_napi_disable(struct tg3 *tp)
5383 {
5384         int i;
5385
5386         for (i = tp->irq_cnt - 1; i >= 0; i--)
5387                 napi_disable(&tp->napi[i].napi);
5388 }
5389
5390 static void tg3_napi_enable(struct tg3 *tp)
5391 {
5392         int i;
5393
5394         for (i = 0; i < tp->irq_cnt; i++)
5395                 napi_enable(&tp->napi[i].napi);
5396 }
5397
5398 static void tg3_napi_init(struct tg3 *tp)
5399 {
5400         int i;
5401
5402         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5403         for (i = 1; i < tp->irq_cnt; i++)
5404                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5405 }
5406
5407 static void tg3_napi_fini(struct tg3 *tp)
5408 {
5409         int i;
5410
5411         for (i = 0; i < tp->irq_cnt; i++)
5412                 netif_napi_del(&tp->napi[i].napi);
5413 }
5414
5415 static inline void tg3_netif_stop(struct tg3 *tp)
5416 {
5417         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5418         tg3_napi_disable(tp);
5419         netif_tx_disable(tp->dev);
5420 }
5421
5422 static inline void tg3_netif_start(struct tg3 *tp)
5423 {
5424         /* NOTE: unconditional netif_tx_wake_all_queues is only
5425          * appropriate so long as all callers are assured to
5426          * have free tx slots (such as after tg3_init_hw)
5427          */
5428         netif_tx_wake_all_queues(tp->dev);
5429
5430         tg3_napi_enable(tp);
5431         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5432         tg3_enable_ints(tp);
5433 }
5434
5435 static void tg3_irq_quiesce(struct tg3 *tp)
5436 {
5437         int i;
5438
5439         BUG_ON(tp->irq_sync);
5440
5441         tp->irq_sync = 1;
5442         smp_mb();
5443
5444         for (i = 0; i < tp->irq_cnt; i++)
5445                 synchronize_irq(tp->napi[i].irq_vec);
5446 }
5447
5448 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5449  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5450  * with as well.  Most of the time, this is not necessary except when
5451  * shutting down the device.
5452  */
5453 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5454 {
5455         spin_lock_bh(&tp->lock);
5456         if (irq_sync)
5457                 tg3_irq_quiesce(tp);
5458 }
5459
5460 static inline void tg3_full_unlock(struct tg3 *tp)
5461 {
5462         spin_unlock_bh(&tp->lock);
5463 }
5464
5465 /* One-shot MSI handler - Chip automatically disables interrupt
5466  * after sending MSI so driver doesn't have to do it.
5467  */
5468 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5469 {
5470         struct tg3_napi *tnapi = dev_id;
5471         struct tg3 *tp = tnapi->tp;
5472
5473         prefetch(tnapi->hw_status);
5474         if (tnapi->rx_rcb)
5475                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5476
5477         if (likely(!tg3_irq_sync(tp)))
5478                 napi_schedule(&tnapi->napi);
5479
5480         return IRQ_HANDLED;
5481 }
5482
5483 /* MSI ISR - No need to check for interrupt sharing and no need to
5484  * flush status block and interrupt mailbox. PCI ordering rules
5485  * guarantee that MSI will arrive after the status block.
5486  */
5487 static irqreturn_t tg3_msi(int irq, void *dev_id)
5488 {
5489         struct tg3_napi *tnapi = dev_id;
5490         struct tg3 *tp = tnapi->tp;
5491
5492         prefetch(tnapi->hw_status);
5493         if (tnapi->rx_rcb)
5494                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5495         /*
5496          * Writing any value to intr-mbox-0 clears PCI INTA# and
5497          * chip-internal interrupt pending events.
5498          * Writing non-zero to intr-mbox-0 additional tells the
5499          * NIC to stop sending us irqs, engaging "in-intr-handler"
5500          * event coalescing.
5501          */
5502         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5503         if (likely(!tg3_irq_sync(tp)))
5504                 napi_schedule(&tnapi->napi);
5505
5506         return IRQ_RETVAL(1);
5507 }
5508
5509 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5510 {
5511         struct tg3_napi *tnapi = dev_id;
5512         struct tg3 *tp = tnapi->tp;
5513         struct tg3_hw_status *sblk = tnapi->hw_status;
5514         unsigned int handled = 1;
5515
5516         /* In INTx mode, it is possible for the interrupt to arrive at
5517          * the CPU before the status block posted prior to the interrupt.
5518          * Reading the PCI State register will confirm whether the
5519          * interrupt is ours and will flush the status block.
5520          */
5521         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5522                 if (tg3_flag(tp, CHIP_RESETTING) ||
5523                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5524                         handled = 0;
5525                         goto out;
5526                 }
5527         }
5528
5529         /*
5530          * Writing any value to intr-mbox-0 clears PCI INTA# and
5531          * chip-internal interrupt pending events.
5532          * Writing non-zero to intr-mbox-0 additional tells the
5533          * NIC to stop sending us irqs, engaging "in-intr-handler"
5534          * event coalescing.
5535          *
5536          * Flush the mailbox to de-assert the IRQ immediately to prevent
5537          * spurious interrupts.  The flush impacts performance but
5538          * excessive spurious interrupts can be worse in some cases.
5539          */
5540         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5541         if (tg3_irq_sync(tp))
5542                 goto out;
5543         sblk->status &= ~SD_STATUS_UPDATED;
5544         if (likely(tg3_has_work(tnapi))) {
5545                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5546                 napi_schedule(&tnapi->napi);
5547         } else {
5548                 /* No work, shared interrupt perhaps?  re-enable
5549                  * interrupts, and flush that PCI write
5550                  */
5551                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5552                                0x00000000);
5553         }
5554 out:
5555         return IRQ_RETVAL(handled);
5556 }
5557
5558 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5559 {
5560         struct tg3_napi *tnapi = dev_id;
5561         struct tg3 *tp = tnapi->tp;
5562         struct tg3_hw_status *sblk = tnapi->hw_status;
5563         unsigned int handled = 1;
5564
5565         /* In INTx mode, it is possible for the interrupt to arrive at
5566          * the CPU before the status block posted prior to the interrupt.
5567          * Reading the PCI State register will confirm whether the
5568          * interrupt is ours and will flush the status block.
5569          */
5570         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5571                 if (tg3_flag(tp, CHIP_RESETTING) ||
5572                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5573                         handled = 0;
5574                         goto out;
5575                 }
5576         }
5577
5578         /*
5579          * writing any value to intr-mbox-0 clears PCI INTA# and
5580          * chip-internal interrupt pending events.
5581          * writing non-zero to intr-mbox-0 additional tells the
5582          * NIC to stop sending us irqs, engaging "in-intr-handler"
5583          * event coalescing.
5584          *
5585          * Flush the mailbox to de-assert the IRQ immediately to prevent
5586          * spurious interrupts.  The flush impacts performance but
5587          * excessive spurious interrupts can be worse in some cases.
5588          */
5589         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5590
5591         /*
5592          * In a shared interrupt configuration, sometimes other devices'
5593          * interrupts will scream.  We record the current status tag here
5594          * so that the above check can report that the screaming interrupts
5595          * are unhandled.  Eventually they will be silenced.
5596          */
5597         tnapi->last_irq_tag = sblk->status_tag;
5598
5599         if (tg3_irq_sync(tp))
5600                 goto out;
5601
5602         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5603
5604         napi_schedule(&tnapi->napi);
5605
5606 out:
5607         return IRQ_RETVAL(handled);
5608 }
5609
5610 /* ISR for interrupt test */
5611 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5612 {
5613         struct tg3_napi *tnapi = dev_id;
5614         struct tg3 *tp = tnapi->tp;
5615         struct tg3_hw_status *sblk = tnapi->hw_status;
5616
5617         if ((sblk->status & SD_STATUS_UPDATED) ||
5618             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5619                 tg3_disable_ints(tp);
5620                 return IRQ_RETVAL(1);
5621         }
5622         return IRQ_RETVAL(0);
5623 }
5624
5625 static int tg3_init_hw(struct tg3 *, int);
5626 static int tg3_halt(struct tg3 *, int, int);
5627
5628 /* Restart hardware after configuration changes, self-test, etc.
5629  * Invoked with tp->lock held.
5630  */
5631 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5632         __releases(tp->lock)
5633         __acquires(tp->lock)
5634 {
5635         int err;
5636
5637         err = tg3_init_hw(tp, reset_phy);
5638         if (err) {
5639                 netdev_err(tp->dev,
5640                            "Failed to re-initialize device, aborting\n");
5641                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5642                 tg3_full_unlock(tp);
5643                 del_timer_sync(&tp->timer);
5644                 tp->irq_sync = 0;
5645                 tg3_napi_enable(tp);
5646                 dev_close(tp->dev);
5647                 tg3_full_lock(tp, 0);
5648         }
5649         return err;
5650 }
5651
5652 #ifdef CONFIG_NET_POLL_CONTROLLER
5653 static void tg3_poll_controller(struct net_device *dev)
5654 {
5655         int i;
5656         struct tg3 *tp = netdev_priv(dev);
5657
5658         for (i = 0; i < tp->irq_cnt; i++)
5659                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5660 }
5661 #endif
5662
5663 static void tg3_reset_task(struct work_struct *work)
5664 {
5665         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5666         int err;
5667         unsigned int restart_timer;
5668
5669         tg3_full_lock(tp, 0);
5670
5671         if (!netif_running(tp->dev)) {
5672                 tg3_full_unlock(tp);
5673                 return;
5674         }
5675
5676         tg3_full_unlock(tp);
5677
5678         tg3_phy_stop(tp);
5679
5680         tg3_netif_stop(tp);
5681
5682         tg3_full_lock(tp, 1);
5683
5684         restart_timer = tg3_flag(tp, RESTART_TIMER);
5685         tg3_flag_clear(tp, RESTART_TIMER);
5686
5687         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5688                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5689                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5690                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5691                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5692         }
5693
5694         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5695         err = tg3_init_hw(tp, 1);
5696         if (err)
5697                 goto out;
5698
5699         tg3_netif_start(tp);
5700
5701         if (restart_timer)
5702                 mod_timer(&tp->timer, jiffies + 1);
5703
5704 out:
5705         tg3_full_unlock(tp);
5706
5707         if (!err)
5708                 tg3_phy_start(tp);
5709 }
5710
5711 static void tg3_tx_timeout(struct net_device *dev)
5712 {
5713         struct tg3 *tp = netdev_priv(dev);
5714
5715         if (netif_msg_tx_err(tp)) {
5716                 netdev_err(dev, "transmit timed out, resetting\n");
5717                 tg3_dump_state(tp);
5718         }
5719
5720         schedule_work(&tp->reset_task);
5721 }
5722
5723 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5724 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5725 {
5726         u32 base = (u32) mapping & 0xffffffff;
5727
5728         return (base > 0xffffdcc0) && (base + len + 8 < base);
5729 }
5730
5731 /* Test for DMA addresses > 40-bit */
5732 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5733                                           int len)
5734 {
5735 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5736         if (tg3_flag(tp, 40BIT_DMA_BUG))
5737                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5738         return 0;
5739 #else
5740         return 0;
5741 #endif
5742 }
5743
5744 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5745                         dma_addr_t mapping, int len, u32 flags,
5746                         u32 mss_and_is_end)
5747 {
5748         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5749         int is_end = (mss_and_is_end & 0x1);
5750         u32 mss = (mss_and_is_end >> 1);
5751         u32 vlan_tag = 0;
5752
5753         if (is_end)
5754                 flags |= TXD_FLAG_END;
5755         if (flags & TXD_FLAG_VLAN) {
5756                 vlan_tag = flags >> 16;
5757                 flags &= 0xffff;
5758         }
5759         vlan_tag |= (mss << TXD_MSS_SHIFT);
5760
5761         txd->addr_hi = ((u64) mapping >> 32);
5762         txd->addr_lo = ((u64) mapping & 0xffffffff);
5763         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5764         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5765 }
5766
5767 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5768                                 struct sk_buff *skb, int last)
5769 {
5770         int i;
5771         u32 entry = tnapi->tx_prod;
5772         struct ring_info *txb = &tnapi->tx_buffers[entry];
5773
5774         pci_unmap_single(tnapi->tp->pdev,
5775                          dma_unmap_addr(txb, mapping),
5776                          skb_headlen(skb),
5777                          PCI_DMA_TODEVICE);
5778         for (i = 0; i < last; i++) {
5779                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5780
5781                 entry = NEXT_TX(entry);
5782                 txb = &tnapi->tx_buffers[entry];
5783
5784                 pci_unmap_page(tnapi->tp->pdev,
5785                                dma_unmap_addr(txb, mapping),
5786                                frag->size, PCI_DMA_TODEVICE);
5787         }
5788 }
5789
5790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5792                                        struct sk_buff *skb,
5793                                        u32 base_flags, u32 mss)
5794 {
5795         struct tg3 *tp = tnapi->tp;
5796         struct sk_buff *new_skb;
5797         dma_addr_t new_addr = 0;
5798         u32 entry = tnapi->tx_prod;
5799         int ret = 0;
5800
5801         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5802                 new_skb = skb_copy(skb, GFP_ATOMIC);
5803         else {
5804                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5805
5806                 new_skb = skb_copy_expand(skb,
5807                                           skb_headroom(skb) + more_headroom,
5808                                           skb_tailroom(skb), GFP_ATOMIC);
5809         }
5810
5811         if (!new_skb) {
5812                 ret = -1;
5813         } else {
5814                 /* New SKB is guaranteed to be linear. */
5815                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5816                                           PCI_DMA_TODEVICE);
5817                 /* Make sure the mapping succeeded */
5818                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5819                         ret = -1;
5820                         dev_kfree_skb(new_skb);
5821
5822                 /* Make sure new skb does not cross any 4G boundaries.
5823                  * Drop the packet if it does.
5824                  */
5825                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5826                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5827                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5828                                          PCI_DMA_TODEVICE);
5829                         ret = -1;
5830                         dev_kfree_skb(new_skb);
5831                 } else {
5832                         tnapi->tx_buffers[entry].skb = new_skb;
5833                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5834                                            mapping, new_addr);
5835
5836                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5837                                     base_flags, 1 | (mss << 1));
5838                 }
5839         }
5840
5841         dev_kfree_skb(skb);
5842
5843         return ret;
5844 }
5845
5846 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5847
5848 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5849  * TSO header is greater than 80 bytes.
5850  */
5851 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5852 {
5853         struct sk_buff *segs, *nskb;
5854         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5855
5856         /* Estimate the number of fragments in the worst case */
5857         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5858                 netif_stop_queue(tp->dev);
5859
5860                 /* netif_tx_stop_queue() must be done before checking
5861                  * checking tx index in tg3_tx_avail() below, because in
5862                  * tg3_tx(), we update tx index before checking for
5863                  * netif_tx_queue_stopped().
5864                  */
5865                 smp_mb();
5866                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5867                         return NETDEV_TX_BUSY;
5868
5869                 netif_wake_queue(tp->dev);
5870         }
5871
5872         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5873         if (IS_ERR(segs))
5874                 goto tg3_tso_bug_end;
5875
5876         do {
5877                 nskb = segs;
5878                 segs = segs->next;
5879                 nskb->next = NULL;
5880                 tg3_start_xmit(nskb, tp->dev);
5881         } while (segs);
5882
5883 tg3_tso_bug_end:
5884         dev_kfree_skb(skb);
5885
5886         return NETDEV_TX_OK;
5887 }
5888
5889 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5890  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5891  */
5892 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5893 {
5894         struct tg3 *tp = netdev_priv(dev);
5895         u32 len, entry, base_flags, mss;
5896         int i = -1, would_hit_hwbug;
5897         dma_addr_t mapping;
5898         struct tg3_napi *tnapi;
5899         struct netdev_queue *txq;
5900         unsigned int last;
5901
5902         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5903         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5904         if (tg3_flag(tp, ENABLE_TSS))
5905                 tnapi++;
5906
5907         /* We are running in BH disabled context with netif_tx_lock
5908          * and TX reclaim runs via tp->napi.poll inside of a software
5909          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5910          * no IRQ context deadlocks to worry about either.  Rejoice!
5911          */
5912         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5913                 if (!netif_tx_queue_stopped(txq)) {
5914                         netif_tx_stop_queue(txq);
5915
5916                         /* This is a hard error, log it. */
5917                         netdev_err(dev,
5918                                    "BUG! Tx Ring full when queue awake!\n");
5919                 }
5920                 return NETDEV_TX_BUSY;
5921         }
5922
5923         entry = tnapi->tx_prod;
5924         base_flags = 0;
5925         if (skb->ip_summed == CHECKSUM_PARTIAL)
5926                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5927
5928         mss = skb_shinfo(skb)->gso_size;
5929         if (mss) {
5930                 struct iphdr *iph;
5931                 u32 tcp_opt_len, hdr_len;
5932
5933                 if (skb_header_cloned(skb) &&
5934                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5935                         dev_kfree_skb(skb);
5936                         goto out_unlock;
5937                 }
5938
5939                 iph = ip_hdr(skb);
5940                 tcp_opt_len = tcp_optlen(skb);
5941
5942                 if (skb_is_gso_v6(skb)) {
5943                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5944                 } else {
5945                         u32 ip_tcp_len;
5946
5947                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5948                         hdr_len = ip_tcp_len + tcp_opt_len;
5949
5950                         iph->check = 0;
5951                         iph->tot_len = htons(mss + hdr_len);
5952                 }
5953
5954                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5955                     tg3_flag(tp, TSO_BUG))
5956                         return tg3_tso_bug(tp, skb);
5957
5958                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5959                                TXD_FLAG_CPU_POST_DMA);
5960
5961                 if (tg3_flag(tp, HW_TSO_1) ||
5962                     tg3_flag(tp, HW_TSO_2) ||
5963                     tg3_flag(tp, HW_TSO_3)) {
5964                         tcp_hdr(skb)->check = 0;
5965                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5966                 } else
5967                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5968                                                                  iph->daddr, 0,
5969                                                                  IPPROTO_TCP,
5970                                                                  0);
5971
5972                 if (tg3_flag(tp, HW_TSO_3)) {
5973                         mss |= (hdr_len & 0xc) << 12;
5974                         if (hdr_len & 0x10)
5975                                 base_flags |= 0x00000010;
5976                         base_flags |= (hdr_len & 0x3e0) << 5;
5977                 } else if (tg3_flag(tp, HW_TSO_2))
5978                         mss |= hdr_len << 9;
5979                 else if (tg3_flag(tp, HW_TSO_1) ||
5980                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5981                         if (tcp_opt_len || iph->ihl > 5) {
5982                                 int tsflags;
5983
5984                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5985                                 mss |= (tsflags << 11);
5986                         }
5987                 } else {
5988                         if (tcp_opt_len || iph->ihl > 5) {
5989                                 int tsflags;
5990
5991                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5992                                 base_flags |= tsflags << 12;
5993                         }
5994                 }
5995         }
5996
5997         if (vlan_tx_tag_present(skb))
5998                 base_flags |= (TXD_FLAG_VLAN |
5999                                (vlan_tx_tag_get(skb) << 16));
6000
6001         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6002             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6003                 base_flags |= TXD_FLAG_JMB_PKT;
6004
6005         len = skb_headlen(skb);
6006
6007         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6008         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6009                 dev_kfree_skb(skb);
6010                 goto out_unlock;
6011         }
6012
6013         tnapi->tx_buffers[entry].skb = skb;
6014         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6015
6016         would_hit_hwbug = 0;
6017
6018         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6019                 would_hit_hwbug = 1;
6020
6021         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6022             tg3_4g_overflow_test(mapping, len))
6023                 would_hit_hwbug = 1;
6024
6025         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6026             tg3_40bit_overflow_test(tp, mapping, len))
6027                 would_hit_hwbug = 1;
6028
6029         if (tg3_flag(tp, 5701_DMA_BUG))
6030                 would_hit_hwbug = 1;
6031
6032         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6033                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6034
6035         entry = NEXT_TX(entry);
6036
6037         /* Now loop through additional data fragments, and queue them. */
6038         if (skb_shinfo(skb)->nr_frags > 0) {
6039                 last = skb_shinfo(skb)->nr_frags - 1;
6040                 for (i = 0; i <= last; i++) {
6041                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6042
6043                         len = frag->size;
6044                         mapping = pci_map_page(tp->pdev,
6045                                                frag->page,
6046                                                frag->page_offset,
6047                                                len, PCI_DMA_TODEVICE);
6048
6049                         tnapi->tx_buffers[entry].skb = NULL;
6050                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6051                                            mapping);
6052                         if (pci_dma_mapping_error(tp->pdev, mapping))
6053                                 goto dma_error;
6054
6055                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6056                             len <= 8)
6057                                 would_hit_hwbug = 1;
6058
6059                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6060                             tg3_4g_overflow_test(mapping, len))
6061                                 would_hit_hwbug = 1;
6062
6063                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6064                             tg3_40bit_overflow_test(tp, mapping, len))
6065                                 would_hit_hwbug = 1;
6066
6067                         if (tg3_flag(tp, HW_TSO_1) ||
6068                             tg3_flag(tp, HW_TSO_2) ||
6069                             tg3_flag(tp, HW_TSO_3))
6070                                 tg3_set_txd(tnapi, entry, mapping, len,
6071                                             base_flags, (i == last)|(mss << 1));
6072                         else
6073                                 tg3_set_txd(tnapi, entry, mapping, len,
6074                                             base_flags, (i == last));
6075
6076                         entry = NEXT_TX(entry);
6077                 }
6078         }
6079
6080         if (would_hit_hwbug) {
6081                 tg3_skb_error_unmap(tnapi, skb, i);
6082
6083                 /* If the workaround fails due to memory/mapping
6084                  * failure, silently drop this packet.
6085                  */
6086                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6087                         goto out_unlock;
6088
6089                 entry = NEXT_TX(tnapi->tx_prod);
6090         }
6091
6092         /* Packets are ready, update Tx producer idx local and on card. */
6093         tw32_tx_mbox(tnapi->prodmbox, entry);
6094
6095         skb_tx_timestamp(skb);
6096
6097         tnapi->tx_prod = entry;
6098         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6099                 netif_tx_stop_queue(txq);
6100
6101                 /* netif_tx_stop_queue() must be done before checking
6102                  * checking tx index in tg3_tx_avail() below, because in
6103                  * tg3_tx(), we update tx index before checking for
6104                  * netif_tx_queue_stopped().
6105                  */
6106                 smp_mb();
6107                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6108                         netif_tx_wake_queue(txq);
6109         }
6110
6111 out_unlock:
6112         mmiowb();
6113
6114         return NETDEV_TX_OK;
6115
6116 dma_error:
6117         tg3_skb_error_unmap(tnapi, skb, i);
6118         dev_kfree_skb(skb);
6119         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6120         return NETDEV_TX_OK;
6121 }
6122
6123 static void tg3_set_loopback(struct net_device *dev, u32 features)
6124 {
6125         struct tg3 *tp = netdev_priv(dev);
6126
6127         if (features & NETIF_F_LOOPBACK) {
6128                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6129                         return;
6130
6131                 /*
6132                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6133                  * loopback mode if Half-Duplex mode was negotiated earlier.
6134                  */
6135                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6136
6137                 /* Enable internal MAC loopback mode */
6138                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6139                 spin_lock_bh(&tp->lock);
6140                 tw32(MAC_MODE, tp->mac_mode);
6141                 netif_carrier_on(tp->dev);
6142                 spin_unlock_bh(&tp->lock);
6143                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6144         } else {
6145                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6146                         return;
6147
6148                 /* Disable internal MAC loopback mode */
6149                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6150                 spin_lock_bh(&tp->lock);
6151                 tw32(MAC_MODE, tp->mac_mode);
6152                 /* Force link status check */
6153                 tg3_setup_phy(tp, 1);
6154                 spin_unlock_bh(&tp->lock);
6155                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6156         }
6157 }
6158
6159 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6160 {
6161         struct tg3 *tp = netdev_priv(dev);
6162
6163         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6164                 features &= ~NETIF_F_ALL_TSO;
6165
6166         return features;
6167 }
6168
6169 static int tg3_set_features(struct net_device *dev, u32 features)
6170 {
6171         u32 changed = dev->features ^ features;
6172
6173         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6174                 tg3_set_loopback(dev, features);
6175
6176         return 0;
6177 }
6178
6179 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6180                                int new_mtu)
6181 {
6182         dev->mtu = new_mtu;
6183
6184         if (new_mtu > ETH_DATA_LEN) {
6185                 if (tg3_flag(tp, 5780_CLASS)) {
6186                         netdev_update_features(dev);
6187                         tg3_flag_clear(tp, TSO_CAPABLE);
6188                 } else {
6189                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6190                 }
6191         } else {
6192                 if (tg3_flag(tp, 5780_CLASS)) {
6193                         tg3_flag_set(tp, TSO_CAPABLE);
6194                         netdev_update_features(dev);
6195                 }
6196                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6197         }
6198 }
6199
6200 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6201 {
6202         struct tg3 *tp = netdev_priv(dev);
6203         int err;
6204
6205         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6206                 return -EINVAL;
6207
6208         if (!netif_running(dev)) {
6209                 /* We'll just catch it later when the
6210                  * device is up'd.
6211                  */
6212                 tg3_set_mtu(dev, tp, new_mtu);
6213                 return 0;
6214         }
6215
6216         tg3_phy_stop(tp);
6217
6218         tg3_netif_stop(tp);
6219
6220         tg3_full_lock(tp, 1);
6221
6222         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6223
6224         tg3_set_mtu(dev, tp, new_mtu);
6225
6226         err = tg3_restart_hw(tp, 0);
6227
6228         if (!err)
6229                 tg3_netif_start(tp);
6230
6231         tg3_full_unlock(tp);
6232
6233         if (!err)
6234                 tg3_phy_start(tp);
6235
6236         return err;
6237 }
6238
6239 static void tg3_rx_prodring_free(struct tg3 *tp,
6240                                  struct tg3_rx_prodring_set *tpr)
6241 {
6242         int i;
6243
6244         if (tpr != &tp->napi[0].prodring) {
6245                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6246                      i = (i + 1) & tp->rx_std_ring_mask)
6247                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6248                                         tp->rx_pkt_map_sz);
6249
6250                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6251                         for (i = tpr->rx_jmb_cons_idx;
6252                              i != tpr->rx_jmb_prod_idx;
6253                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6254                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6255                                                 TG3_RX_JMB_MAP_SZ);
6256                         }
6257                 }
6258
6259                 return;
6260         }
6261
6262         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6263                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6264                                 tp->rx_pkt_map_sz);
6265
6266         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6267                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6268                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6269                                         TG3_RX_JMB_MAP_SZ);
6270         }
6271 }
6272
6273 /* Initialize rx rings for packet processing.
6274  *
6275  * The chip has been shut down and the driver detached from
6276  * the networking, so no interrupts or new tx packets will
6277  * end up in the driver.  tp->{tx,}lock are held and thus
6278  * we may not sleep.
6279  */
6280 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6281                                  struct tg3_rx_prodring_set *tpr)
6282 {
6283         u32 i, rx_pkt_dma_sz;
6284
6285         tpr->rx_std_cons_idx = 0;
6286         tpr->rx_std_prod_idx = 0;
6287         tpr->rx_jmb_cons_idx = 0;
6288         tpr->rx_jmb_prod_idx = 0;
6289
6290         if (tpr != &tp->napi[0].prodring) {
6291                 memset(&tpr->rx_std_buffers[0], 0,
6292                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6293                 if (tpr->rx_jmb_buffers)
6294                         memset(&tpr->rx_jmb_buffers[0], 0,
6295                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6296                 goto done;
6297         }
6298
6299         /* Zero out all descriptors. */
6300         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6301
6302         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6303         if (tg3_flag(tp, 5780_CLASS) &&
6304             tp->dev->mtu > ETH_DATA_LEN)
6305                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6306         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6307
6308         /* Initialize invariants of the rings, we only set this
6309          * stuff once.  This works because the card does not
6310          * write into the rx buffer posting rings.
6311          */
6312         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6313                 struct tg3_rx_buffer_desc *rxd;
6314
6315                 rxd = &tpr->rx_std[i];
6316                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6317                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6318                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6319                                (i << RXD_OPAQUE_INDEX_SHIFT));
6320         }
6321
6322         /* Now allocate fresh SKBs for each rx ring. */
6323         for (i = 0; i < tp->rx_pending; i++) {
6324                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6325                         netdev_warn(tp->dev,
6326                                     "Using a smaller RX standard ring. Only "
6327                                     "%d out of %d buffers were allocated "
6328                                     "successfully\n", i, tp->rx_pending);
6329                         if (i == 0)
6330                                 goto initfail;
6331                         tp->rx_pending = i;
6332                         break;
6333                 }
6334         }
6335
6336         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6337                 goto done;
6338
6339         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6340
6341         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6342                 goto done;
6343
6344         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6345                 struct tg3_rx_buffer_desc *rxd;
6346
6347                 rxd = &tpr->rx_jmb[i].std;
6348                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6349                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6350                                   RXD_FLAG_JUMBO;
6351                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6352                        (i << RXD_OPAQUE_INDEX_SHIFT));
6353         }
6354
6355         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6356                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6357                         netdev_warn(tp->dev,
6358                                     "Using a smaller RX jumbo ring. Only %d "
6359                                     "out of %d buffers were allocated "
6360                                     "successfully\n", i, tp->rx_jumbo_pending);
6361                         if (i == 0)
6362                                 goto initfail;
6363                         tp->rx_jumbo_pending = i;
6364                         break;
6365                 }
6366         }
6367
6368 done:
6369         return 0;
6370
6371 initfail:
6372         tg3_rx_prodring_free(tp, tpr);
6373         return -ENOMEM;
6374 }
6375
6376 static void tg3_rx_prodring_fini(struct tg3 *tp,
6377                                  struct tg3_rx_prodring_set *tpr)
6378 {
6379         kfree(tpr->rx_std_buffers);
6380         tpr->rx_std_buffers = NULL;
6381         kfree(tpr->rx_jmb_buffers);
6382         tpr->rx_jmb_buffers = NULL;
6383         if (tpr->rx_std) {
6384                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6385                                   tpr->rx_std, tpr->rx_std_mapping);
6386                 tpr->rx_std = NULL;
6387         }
6388         if (tpr->rx_jmb) {
6389                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6390                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6391                 tpr->rx_jmb = NULL;
6392         }
6393 }
6394
6395 static int tg3_rx_prodring_init(struct tg3 *tp,
6396                                 struct tg3_rx_prodring_set *tpr)
6397 {
6398         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6399                                       GFP_KERNEL);
6400         if (!tpr->rx_std_buffers)
6401                 return -ENOMEM;
6402
6403         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6404                                          TG3_RX_STD_RING_BYTES(tp),
6405                                          &tpr->rx_std_mapping,
6406                                          GFP_KERNEL);
6407         if (!tpr->rx_std)
6408                 goto err_out;
6409
6410         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6411                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6412                                               GFP_KERNEL);
6413                 if (!tpr->rx_jmb_buffers)
6414                         goto err_out;
6415
6416                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6417                                                  TG3_RX_JMB_RING_BYTES(tp),
6418                                                  &tpr->rx_jmb_mapping,
6419                                                  GFP_KERNEL);
6420                 if (!tpr->rx_jmb)
6421                         goto err_out;
6422         }
6423
6424         return 0;
6425
6426 err_out:
6427         tg3_rx_prodring_fini(tp, tpr);
6428         return -ENOMEM;
6429 }
6430
6431 /* Free up pending packets in all rx/tx rings.
6432  *
6433  * The chip has been shut down and the driver detached from
6434  * the networking, so no interrupts or new tx packets will
6435  * end up in the driver.  tp->{tx,}lock is not held and we are not
6436  * in an interrupt context and thus may sleep.
6437  */
6438 static void tg3_free_rings(struct tg3 *tp)
6439 {
6440         int i, j;
6441
6442         for (j = 0; j < tp->irq_cnt; j++) {
6443                 struct tg3_napi *tnapi = &tp->napi[j];
6444
6445                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6446
6447                 if (!tnapi->tx_buffers)
6448                         continue;
6449
6450                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6451                         struct ring_info *txp;
6452                         struct sk_buff *skb;
6453                         unsigned int k;
6454
6455                         txp = &tnapi->tx_buffers[i];
6456                         skb = txp->skb;
6457
6458                         if (skb == NULL) {
6459                                 i++;
6460                                 continue;
6461                         }
6462
6463                         pci_unmap_single(tp->pdev,
6464                                          dma_unmap_addr(txp, mapping),
6465                                          skb_headlen(skb),
6466                                          PCI_DMA_TODEVICE);
6467                         txp->skb = NULL;
6468
6469                         i++;
6470
6471                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6472                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6473                                 pci_unmap_page(tp->pdev,
6474                                                dma_unmap_addr(txp, mapping),
6475                                                skb_shinfo(skb)->frags[k].size,
6476                                                PCI_DMA_TODEVICE);
6477                                 i++;
6478                         }
6479
6480                         dev_kfree_skb_any(skb);
6481                 }
6482         }
6483 }
6484
6485 /* Initialize tx/rx rings for packet processing.
6486  *
6487  * The chip has been shut down and the driver detached from
6488  * the networking, so no interrupts or new tx packets will
6489  * end up in the driver.  tp->{tx,}lock are held and thus
6490  * we may not sleep.
6491  */
6492 static int tg3_init_rings(struct tg3 *tp)
6493 {
6494         int i;
6495
6496         /* Free up all the SKBs. */
6497         tg3_free_rings(tp);
6498
6499         for (i = 0; i < tp->irq_cnt; i++) {
6500                 struct tg3_napi *tnapi = &tp->napi[i];
6501
6502                 tnapi->last_tag = 0;
6503                 tnapi->last_irq_tag = 0;
6504                 tnapi->hw_status->status = 0;
6505                 tnapi->hw_status->status_tag = 0;
6506                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6507
6508                 tnapi->tx_prod = 0;
6509                 tnapi->tx_cons = 0;
6510                 if (tnapi->tx_ring)
6511                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6512
6513                 tnapi->rx_rcb_ptr = 0;
6514                 if (tnapi->rx_rcb)
6515                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6516
6517                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6518                         tg3_free_rings(tp);
6519                         return -ENOMEM;
6520                 }
6521         }
6522
6523         return 0;
6524 }
6525
6526 /*
6527  * Must not be invoked with interrupt sources disabled and
6528  * the hardware shutdown down.
6529  */
6530 static void tg3_free_consistent(struct tg3 *tp)
6531 {
6532         int i;
6533
6534         for (i = 0; i < tp->irq_cnt; i++) {
6535                 struct tg3_napi *tnapi = &tp->napi[i];
6536
6537                 if (tnapi->tx_ring) {
6538                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6539                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6540                         tnapi->tx_ring = NULL;
6541                 }
6542
6543                 kfree(tnapi->tx_buffers);
6544                 tnapi->tx_buffers = NULL;
6545
6546                 if (tnapi->rx_rcb) {
6547                         dma_free_coherent(&tp->pdev->dev,
6548                                           TG3_RX_RCB_RING_BYTES(tp),
6549                                           tnapi->rx_rcb,
6550                                           tnapi->rx_rcb_mapping);
6551                         tnapi->rx_rcb = NULL;
6552                 }
6553
6554                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6555
6556                 if (tnapi->hw_status) {
6557                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6558                                           tnapi->hw_status,
6559                                           tnapi->status_mapping);
6560                         tnapi->hw_status = NULL;
6561                 }
6562         }
6563
6564         if (tp->hw_stats) {
6565                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6566                                   tp->hw_stats, tp->stats_mapping);
6567                 tp->hw_stats = NULL;
6568         }
6569 }
6570
6571 /*
6572  * Must not be invoked with interrupt sources disabled and
6573  * the hardware shutdown down.  Can sleep.
6574  */
6575 static int tg3_alloc_consistent(struct tg3 *tp)
6576 {
6577         int i;
6578
6579         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6580                                           sizeof(struct tg3_hw_stats),
6581                                           &tp->stats_mapping,
6582                                           GFP_KERNEL);
6583         if (!tp->hw_stats)
6584                 goto err_out;
6585
6586         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6587
6588         for (i = 0; i < tp->irq_cnt; i++) {
6589                 struct tg3_napi *tnapi = &tp->napi[i];
6590                 struct tg3_hw_status *sblk;
6591
6592                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6593                                                       TG3_HW_STATUS_SIZE,
6594                                                       &tnapi->status_mapping,
6595                                                       GFP_KERNEL);
6596                 if (!tnapi->hw_status)
6597                         goto err_out;
6598
6599                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6600                 sblk = tnapi->hw_status;
6601
6602                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6603                         goto err_out;
6604
6605                 /* If multivector TSS is enabled, vector 0 does not handle
6606                  * tx interrupts.  Don't allocate any resources for it.
6607                  */
6608                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6609                     (i && tg3_flag(tp, ENABLE_TSS))) {
6610                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6611                                                     TG3_TX_RING_SIZE,
6612                                                     GFP_KERNEL);
6613                         if (!tnapi->tx_buffers)
6614                                 goto err_out;
6615
6616                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6617                                                             TG3_TX_RING_BYTES,
6618                                                         &tnapi->tx_desc_mapping,
6619                                                             GFP_KERNEL);
6620                         if (!tnapi->tx_ring)
6621                                 goto err_out;
6622                 }
6623
6624                 /*
6625                  * When RSS is enabled, the status block format changes
6626                  * slightly.  The "rx_jumbo_consumer", "reserved",
6627                  * and "rx_mini_consumer" members get mapped to the
6628                  * other three rx return ring producer indexes.
6629                  */
6630                 switch (i) {
6631                 default:
6632                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6633                         break;
6634                 case 2:
6635                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6636                         break;
6637                 case 3:
6638                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6639                         break;
6640                 case 4:
6641                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6642                         break;
6643                 }
6644
6645                 /*
6646                  * If multivector RSS is enabled, vector 0 does not handle
6647                  * rx or tx interrupts.  Don't allocate any resources for it.
6648                  */
6649                 if (!i && tg3_flag(tp, ENABLE_RSS))
6650                         continue;
6651
6652                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6653                                                    TG3_RX_RCB_RING_BYTES(tp),
6654                                                    &tnapi->rx_rcb_mapping,
6655                                                    GFP_KERNEL);
6656                 if (!tnapi->rx_rcb)
6657                         goto err_out;
6658
6659                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6660         }
6661
6662         return 0;
6663
6664 err_out:
6665         tg3_free_consistent(tp);
6666         return -ENOMEM;
6667 }
6668
6669 #define MAX_WAIT_CNT 1000
6670
6671 /* To stop a block, clear the enable bit and poll till it
6672  * clears.  tp->lock is held.
6673  */
6674 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6675 {
6676         unsigned int i;
6677         u32 val;
6678
6679         if (tg3_flag(tp, 5705_PLUS)) {
6680                 switch (ofs) {
6681                 case RCVLSC_MODE:
6682                 case DMAC_MODE:
6683                 case MBFREE_MODE:
6684                 case BUFMGR_MODE:
6685                 case MEMARB_MODE:
6686                         /* We can't enable/disable these bits of the
6687                          * 5705/5750, just say success.
6688                          */
6689                         return 0;
6690
6691                 default:
6692                         break;
6693                 }
6694         }
6695
6696         val = tr32(ofs);
6697         val &= ~enable_bit;
6698         tw32_f(ofs, val);
6699
6700         for (i = 0; i < MAX_WAIT_CNT; i++) {
6701                 udelay(100);
6702                 val = tr32(ofs);
6703                 if ((val & enable_bit) == 0)
6704                         break;
6705         }
6706
6707         if (i == MAX_WAIT_CNT && !silent) {
6708                 dev_err(&tp->pdev->dev,
6709                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6710                         ofs, enable_bit);
6711                 return -ENODEV;
6712         }
6713
6714         return 0;
6715 }
6716
6717 /* tp->lock is held. */
6718 static int tg3_abort_hw(struct tg3 *tp, int silent)
6719 {
6720         int i, err;
6721
6722         tg3_disable_ints(tp);
6723
6724         tp->rx_mode &= ~RX_MODE_ENABLE;
6725         tw32_f(MAC_RX_MODE, tp->rx_mode);
6726         udelay(10);
6727
6728         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6729         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6730         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6731         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6732         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6733         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6734
6735         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6736         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6737         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6738         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6739         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6740         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6741         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6742
6743         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6744         tw32_f(MAC_MODE, tp->mac_mode);
6745         udelay(40);
6746
6747         tp->tx_mode &= ~TX_MODE_ENABLE;
6748         tw32_f(MAC_TX_MODE, tp->tx_mode);
6749
6750         for (i = 0; i < MAX_WAIT_CNT; i++) {
6751                 udelay(100);
6752                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6753                         break;
6754         }
6755         if (i >= MAX_WAIT_CNT) {
6756                 dev_err(&tp->pdev->dev,
6757                         "%s timed out, TX_MODE_ENABLE will not clear "
6758                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6759                 err |= -ENODEV;
6760         }
6761
6762         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6763         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6764         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6765
6766         tw32(FTQ_RESET, 0xffffffff);
6767         tw32(FTQ_RESET, 0x00000000);
6768
6769         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6770         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6771
6772         for (i = 0; i < tp->irq_cnt; i++) {
6773                 struct tg3_napi *tnapi = &tp->napi[i];
6774                 if (tnapi->hw_status)
6775                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6776         }
6777         if (tp->hw_stats)
6778                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6779
6780         return err;
6781 }
6782
6783 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6784 {
6785         int i;
6786         u32 apedata;
6787
6788         /* NCSI does not support APE events */
6789         if (tg3_flag(tp, APE_HAS_NCSI))
6790                 return;
6791
6792         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6793         if (apedata != APE_SEG_SIG_MAGIC)
6794                 return;
6795
6796         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6797         if (!(apedata & APE_FW_STATUS_READY))
6798                 return;
6799
6800         /* Wait for up to 1 millisecond for APE to service previous event. */
6801         for (i = 0; i < 10; i++) {
6802                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6803                         return;
6804
6805                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6806
6807                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6808                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6809                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6810
6811                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6812
6813                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6814                         break;
6815
6816                 udelay(100);
6817         }
6818
6819         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6820                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6821 }
6822
6823 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6824 {
6825         u32 event;
6826         u32 apedata;
6827
6828         if (!tg3_flag(tp, ENABLE_APE))
6829                 return;
6830
6831         switch (kind) {
6832         case RESET_KIND_INIT:
6833                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6834                                 APE_HOST_SEG_SIG_MAGIC);
6835                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6836                                 APE_HOST_SEG_LEN_MAGIC);
6837                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6838                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6839                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6840                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6841                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6842                                 APE_HOST_BEHAV_NO_PHYLOCK);
6843                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6844                                     TG3_APE_HOST_DRVR_STATE_START);
6845
6846                 event = APE_EVENT_STATUS_STATE_START;
6847                 break;
6848         case RESET_KIND_SHUTDOWN:
6849                 /* With the interface we are currently using,
6850                  * APE does not track driver state.  Wiping
6851                  * out the HOST SEGMENT SIGNATURE forces
6852                  * the APE to assume OS absent status.
6853                  */
6854                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6855
6856                 if (device_may_wakeup(&tp->pdev->dev) &&
6857                     tg3_flag(tp, WOL_ENABLE)) {
6858                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6859                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6860                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6861                 } else
6862                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6863
6864                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6865
6866                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6867                 break;
6868         case RESET_KIND_SUSPEND:
6869                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6870                 break;
6871         default:
6872                 return;
6873         }
6874
6875         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6876
6877         tg3_ape_send_event(tp, event);
6878 }
6879
6880 /* tp->lock is held. */
6881 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6882 {
6883         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6884                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6885
6886         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6887                 switch (kind) {
6888                 case RESET_KIND_INIT:
6889                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6890                                       DRV_STATE_START);
6891                         break;
6892
6893                 case RESET_KIND_SHUTDOWN:
6894                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6895                                       DRV_STATE_UNLOAD);
6896                         break;
6897
6898                 case RESET_KIND_SUSPEND:
6899                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6900                                       DRV_STATE_SUSPEND);
6901                         break;
6902
6903                 default:
6904                         break;
6905                 }
6906         }
6907
6908         if (kind == RESET_KIND_INIT ||
6909             kind == RESET_KIND_SUSPEND)
6910                 tg3_ape_driver_state_change(tp, kind);
6911 }
6912
6913 /* tp->lock is held. */
6914 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6915 {
6916         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6917                 switch (kind) {
6918                 case RESET_KIND_INIT:
6919                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6920                                       DRV_STATE_START_DONE);
6921                         break;
6922
6923                 case RESET_KIND_SHUTDOWN:
6924                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6925                                       DRV_STATE_UNLOAD_DONE);
6926                         break;
6927
6928                 default:
6929                         break;
6930                 }
6931         }
6932
6933         if (kind == RESET_KIND_SHUTDOWN)
6934                 tg3_ape_driver_state_change(tp, kind);
6935 }
6936
6937 /* tp->lock is held. */
6938 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6939 {
6940         if (tg3_flag(tp, ENABLE_ASF)) {
6941                 switch (kind) {
6942                 case RESET_KIND_INIT:
6943                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6944                                       DRV_STATE_START);
6945                         break;
6946
6947                 case RESET_KIND_SHUTDOWN:
6948                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6949                                       DRV_STATE_UNLOAD);
6950                         break;
6951
6952                 case RESET_KIND_SUSPEND:
6953                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6954                                       DRV_STATE_SUSPEND);
6955                         break;
6956
6957                 default:
6958                         break;
6959                 }
6960         }
6961 }
6962
6963 static int tg3_poll_fw(struct tg3 *tp)
6964 {
6965         int i;
6966         u32 val;
6967
6968         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6969                 /* Wait up to 20ms for init done. */
6970                 for (i = 0; i < 200; i++) {
6971                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6972                                 return 0;
6973                         udelay(100);
6974                 }
6975                 return -ENODEV;
6976         }
6977
6978         /* Wait for firmware initialization to complete. */
6979         for (i = 0; i < 100000; i++) {
6980                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6981                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6982                         break;
6983                 udelay(10);
6984         }
6985
6986         /* Chip might not be fitted with firmware.  Some Sun onboard
6987          * parts are configured like that.  So don't signal the timeout
6988          * of the above loop as an error, but do report the lack of
6989          * running firmware once.
6990          */
6991         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
6992                 tg3_flag_set(tp, NO_FWARE_REPORTED);
6993
6994                 netdev_info(tp->dev, "No firmware running\n");
6995         }
6996
6997         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6998                 /* The 57765 A0 needs a little more
6999                  * time to do some important work.
7000                  */
7001                 mdelay(10);
7002         }
7003
7004         return 0;
7005 }
7006
7007 /* Save PCI command register before chip reset */
7008 static void tg3_save_pci_state(struct tg3 *tp)
7009 {
7010         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7011 }
7012
7013 /* Restore PCI state after chip reset */
7014 static void tg3_restore_pci_state(struct tg3 *tp)
7015 {
7016         u32 val;
7017
7018         /* Re-enable indirect register accesses. */
7019         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7020                                tp->misc_host_ctrl);
7021
7022         /* Set MAX PCI retry to zero. */
7023         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7024         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7025             tg3_flag(tp, PCIX_MODE))
7026                 val |= PCISTATE_RETRY_SAME_DMA;
7027         /* Allow reads and writes to the APE register and memory space. */
7028         if (tg3_flag(tp, ENABLE_APE))
7029                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7030                        PCISTATE_ALLOW_APE_SHMEM_WR |
7031                        PCISTATE_ALLOW_APE_PSPACE_WR;
7032         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7033
7034         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7035
7036         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7037                 if (tg3_flag(tp, PCI_EXPRESS))
7038                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7039                 else {
7040                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7041                                               tp->pci_cacheline_sz);
7042                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7043                                               tp->pci_lat_timer);
7044                 }
7045         }
7046
7047         /* Make sure PCI-X relaxed ordering bit is clear. */
7048         if (tg3_flag(tp, PCIX_MODE)) {
7049                 u16 pcix_cmd;
7050
7051                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7052                                      &pcix_cmd);
7053                 pcix_cmd &= ~PCI_X_CMD_ERO;
7054                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7055                                       pcix_cmd);
7056         }
7057
7058         if (tg3_flag(tp, 5780_CLASS)) {
7059
7060                 /* Chip reset on 5780 will reset MSI enable bit,
7061                  * so need to restore it.
7062                  */
7063                 if (tg3_flag(tp, USING_MSI)) {
7064                         u16 ctrl;
7065
7066                         pci_read_config_word(tp->pdev,
7067                                              tp->msi_cap + PCI_MSI_FLAGS,
7068                                              &ctrl);
7069                         pci_write_config_word(tp->pdev,
7070                                               tp->msi_cap + PCI_MSI_FLAGS,
7071                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7072                         val = tr32(MSGINT_MODE);
7073                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7074                 }
7075         }
7076 }
7077
7078 static void tg3_stop_fw(struct tg3 *);
7079
7080 /* tp->lock is held. */
7081 static int tg3_chip_reset(struct tg3 *tp)
7082 {
7083         u32 val;
7084         void (*write_op)(struct tg3 *, u32, u32);
7085         int i, err;
7086
7087         tg3_nvram_lock(tp);
7088
7089         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7090
7091         /* No matching tg3_nvram_unlock() after this because
7092          * chip reset below will undo the nvram lock.
7093          */
7094         tp->nvram_lock_cnt = 0;
7095
7096         /* GRC_MISC_CFG core clock reset will clear the memory
7097          * enable bit in PCI register 4 and the MSI enable bit
7098          * on some chips, so we save relevant registers here.
7099          */
7100         tg3_save_pci_state(tp);
7101
7102         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7103             tg3_flag(tp, 5755_PLUS))
7104                 tw32(GRC_FASTBOOT_PC, 0);
7105
7106         /*
7107          * We must avoid the readl() that normally takes place.
7108          * It locks machines, causes machine checks, and other
7109          * fun things.  So, temporarily disable the 5701
7110          * hardware workaround, while we do the reset.
7111          */
7112         write_op = tp->write32;
7113         if (write_op == tg3_write_flush_reg32)
7114                 tp->write32 = tg3_write32;
7115
7116         /* Prevent the irq handler from reading or writing PCI registers
7117          * during chip reset when the memory enable bit in the PCI command
7118          * register may be cleared.  The chip does not generate interrupt
7119          * at this time, but the irq handler may still be called due to irq
7120          * sharing or irqpoll.
7121          */
7122         tg3_flag_set(tp, CHIP_RESETTING);
7123         for (i = 0; i < tp->irq_cnt; i++) {
7124                 struct tg3_napi *tnapi = &tp->napi[i];
7125                 if (tnapi->hw_status) {
7126                         tnapi->hw_status->status = 0;
7127                         tnapi->hw_status->status_tag = 0;
7128                 }
7129                 tnapi->last_tag = 0;
7130                 tnapi->last_irq_tag = 0;
7131         }
7132         smp_mb();
7133
7134         for (i = 0; i < tp->irq_cnt; i++)
7135                 synchronize_irq(tp->napi[i].irq_vec);
7136
7137         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7138                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7139                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7140         }
7141
7142         /* do the reset */
7143         val = GRC_MISC_CFG_CORECLK_RESET;
7144
7145         if (tg3_flag(tp, PCI_EXPRESS)) {
7146                 /* Force PCIe 1.0a mode */
7147                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7148                     !tg3_flag(tp, 57765_PLUS) &&
7149                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7150                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7151                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7152
7153                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7154                         tw32(GRC_MISC_CFG, (1 << 29));
7155                         val |= (1 << 29);
7156                 }
7157         }
7158
7159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7160                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7161                 tw32(GRC_VCPU_EXT_CTRL,
7162                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7163         }
7164
7165         /* Manage gphy power for all CPMU absent PCIe devices. */
7166         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7167                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7168
7169         tw32(GRC_MISC_CFG, val);
7170
7171         /* restore 5701 hardware bug workaround write method */
7172         tp->write32 = write_op;
7173
7174         /* Unfortunately, we have to delay before the PCI read back.
7175          * Some 575X chips even will not respond to a PCI cfg access
7176          * when the reset command is given to the chip.
7177          *
7178          * How do these hardware designers expect things to work
7179          * properly if the PCI write is posted for a long period
7180          * of time?  It is always necessary to have some method by
7181          * which a register read back can occur to push the write
7182          * out which does the reset.
7183          *
7184          * For most tg3 variants the trick below was working.
7185          * Ho hum...
7186          */
7187         udelay(120);
7188
7189         /* Flush PCI posted writes.  The normal MMIO registers
7190          * are inaccessible at this time so this is the only
7191          * way to make this reliably (actually, this is no longer
7192          * the case, see above).  I tried to use indirect
7193          * register read/write but this upset some 5701 variants.
7194          */
7195         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7196
7197         udelay(120);
7198
7199         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7200                 u16 val16;
7201
7202                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7203                         int i;
7204                         u32 cfg_val;
7205
7206                         /* Wait for link training to complete.  */
7207                         for (i = 0; i < 5000; i++)
7208                                 udelay(100);
7209
7210                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7211                         pci_write_config_dword(tp->pdev, 0xc4,
7212                                                cfg_val | (1 << 15));
7213                 }
7214
7215                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7216                 pci_read_config_word(tp->pdev,
7217                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7218                                      &val16);
7219                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7220                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7221                 /*
7222                  * Older PCIe devices only support the 128 byte
7223                  * MPS setting.  Enforce the restriction.
7224                  */
7225                 if (!tg3_flag(tp, CPMU_PRESENT))
7226                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7227                 pci_write_config_word(tp->pdev,
7228                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7229                                       val16);
7230
7231                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7232
7233                 /* Clear error status */
7234                 pci_write_config_word(tp->pdev,
7235                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7236                                       PCI_EXP_DEVSTA_CED |
7237                                       PCI_EXP_DEVSTA_NFED |
7238                                       PCI_EXP_DEVSTA_FED |
7239                                       PCI_EXP_DEVSTA_URD);
7240         }
7241
7242         tg3_restore_pci_state(tp);
7243
7244         tg3_flag_clear(tp, CHIP_RESETTING);
7245         tg3_flag_clear(tp, ERROR_PROCESSED);
7246
7247         val = 0;
7248         if (tg3_flag(tp, 5780_CLASS))
7249                 val = tr32(MEMARB_MODE);
7250         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7251
7252         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7253                 tg3_stop_fw(tp);
7254                 tw32(0x5000, 0x400);
7255         }
7256
7257         tw32(GRC_MODE, tp->grc_mode);
7258
7259         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7260                 val = tr32(0xc4);
7261
7262                 tw32(0xc4, val | (1 << 15));
7263         }
7264
7265         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7266             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7267                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7268                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7269                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7270                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7271         }
7272
7273         if (tg3_flag(tp, ENABLE_APE))
7274                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7275                                MAC_MODE_APE_RX_EN |
7276                                MAC_MODE_TDE_ENABLE;
7277
7278         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7279                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7280                 val = tp->mac_mode;
7281         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7282                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7283                 val = tp->mac_mode;
7284         } else
7285                 val = 0;
7286
7287         tw32_f(MAC_MODE, val);
7288         udelay(40);
7289
7290         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7291
7292         err = tg3_poll_fw(tp);
7293         if (err)
7294                 return err;
7295
7296         tg3_mdio_start(tp);
7297
7298         if (tg3_flag(tp, PCI_EXPRESS) &&
7299             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7300             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7301             !tg3_flag(tp, 57765_PLUS)) {
7302                 val = tr32(0x7c00);
7303
7304                 tw32(0x7c00, val | (1 << 25));
7305         }
7306
7307         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7308                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7309                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7310         }
7311
7312         /* Reprobe ASF enable state.  */
7313         tg3_flag_clear(tp, ENABLE_ASF);
7314         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7315         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7316         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7317                 u32 nic_cfg;
7318
7319                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7320                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7321                         tg3_flag_set(tp, ENABLE_ASF);
7322                         tp->last_event_jiffies = jiffies;
7323                         if (tg3_flag(tp, 5750_PLUS))
7324                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7325                 }
7326         }
7327
7328         return 0;
7329 }
7330
7331 /* tp->lock is held. */
7332 static void tg3_stop_fw(struct tg3 *tp)
7333 {
7334         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7335                 /* Wait for RX cpu to ACK the previous event. */
7336                 tg3_wait_for_event_ack(tp);
7337
7338                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7339
7340                 tg3_generate_fw_event(tp);
7341
7342                 /* Wait for RX cpu to ACK this event. */
7343                 tg3_wait_for_event_ack(tp);
7344         }
7345 }
7346
7347 /* tp->lock is held. */
7348 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7349 {
7350         int err;
7351
7352         tg3_stop_fw(tp);
7353
7354         tg3_write_sig_pre_reset(tp, kind);
7355
7356         tg3_abort_hw(tp, silent);
7357         err = tg3_chip_reset(tp);
7358
7359         __tg3_set_mac_addr(tp, 0);
7360
7361         tg3_write_sig_legacy(tp, kind);
7362         tg3_write_sig_post_reset(tp, kind);
7363
7364         if (err)
7365                 return err;
7366
7367         return 0;
7368 }
7369
7370 #define RX_CPU_SCRATCH_BASE     0x30000
7371 #define RX_CPU_SCRATCH_SIZE     0x04000
7372 #define TX_CPU_SCRATCH_BASE     0x34000
7373 #define TX_CPU_SCRATCH_SIZE     0x04000
7374
7375 /* tp->lock is held. */
7376 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7377 {
7378         int i;
7379
7380         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7381
7382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7383                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7384
7385                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7386                 return 0;
7387         }
7388         if (offset == RX_CPU_BASE) {
7389                 for (i = 0; i < 10000; i++) {
7390                         tw32(offset + CPU_STATE, 0xffffffff);
7391                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7392                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7393                                 break;
7394                 }
7395
7396                 tw32(offset + CPU_STATE, 0xffffffff);
7397                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7398                 udelay(10);
7399         } else {
7400                 for (i = 0; i < 10000; i++) {
7401                         tw32(offset + CPU_STATE, 0xffffffff);
7402                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7403                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7404                                 break;
7405                 }
7406         }
7407
7408         if (i >= 10000) {
7409                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7410                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7411                 return -ENODEV;
7412         }
7413
7414         /* Clear firmware's nvram arbitration. */
7415         if (tg3_flag(tp, NVRAM))
7416                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7417         return 0;
7418 }
7419
7420 struct fw_info {
7421         unsigned int fw_base;
7422         unsigned int fw_len;
7423         const __be32 *fw_data;
7424 };
7425
7426 /* tp->lock is held. */
7427 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7428                                  int cpu_scratch_size, struct fw_info *info)
7429 {
7430         int err, lock_err, i;
7431         void (*write_op)(struct tg3 *, u32, u32);
7432
7433         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7434                 netdev_err(tp->dev,
7435                            "%s: Trying to load TX cpu firmware which is 5705\n",
7436                            __func__);
7437                 return -EINVAL;
7438         }
7439
7440         if (tg3_flag(tp, 5705_PLUS))
7441                 write_op = tg3_write_mem;
7442         else
7443                 write_op = tg3_write_indirect_reg32;
7444
7445         /* It is possible that bootcode is still loading at this point.
7446          * Get the nvram lock first before halting the cpu.
7447          */
7448         lock_err = tg3_nvram_lock(tp);
7449         err = tg3_halt_cpu(tp, cpu_base);
7450         if (!lock_err)
7451                 tg3_nvram_unlock(tp);
7452         if (err)
7453                 goto out;
7454
7455         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7456                 write_op(tp, cpu_scratch_base + i, 0);
7457         tw32(cpu_base + CPU_STATE, 0xffffffff);
7458         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7459         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7460                 write_op(tp, (cpu_scratch_base +
7461                               (info->fw_base & 0xffff) +
7462                               (i * sizeof(u32))),
7463                               be32_to_cpu(info->fw_data[i]));
7464
7465         err = 0;
7466
7467 out:
7468         return err;
7469 }
7470
7471 /* tp->lock is held. */
7472 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7473 {
7474         struct fw_info info;
7475         const __be32 *fw_data;
7476         int err, i;
7477
7478         fw_data = (void *)tp->fw->data;
7479
7480         /* Firmware blob starts with version numbers, followed by
7481            start address and length. We are setting complete length.
7482            length = end_address_of_bss - start_address_of_text.
7483            Remainder is the blob to be loaded contiguously
7484            from start address. */
7485
7486         info.fw_base = be32_to_cpu(fw_data[1]);
7487         info.fw_len = tp->fw->size - 12;
7488         info.fw_data = &fw_data[3];
7489
7490         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7491                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7492                                     &info);
7493         if (err)
7494                 return err;
7495
7496         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7497                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7498                                     &info);
7499         if (err)
7500                 return err;
7501
7502         /* Now startup only the RX cpu. */
7503         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7504         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7505
7506         for (i = 0; i < 5; i++) {
7507                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7508                         break;
7509                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7510                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7511                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7512                 udelay(1000);
7513         }
7514         if (i >= 5) {
7515                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7516                            "should be %08x\n", __func__,
7517                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7518                 return -ENODEV;
7519         }
7520         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7521         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7522
7523         return 0;
7524 }
7525
7526 /* tp->lock is held. */
7527 static int tg3_load_tso_firmware(struct tg3 *tp)
7528 {
7529         struct fw_info info;
7530         const __be32 *fw_data;
7531         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7532         int err, i;
7533
7534         if (tg3_flag(tp, HW_TSO_1) ||
7535             tg3_flag(tp, HW_TSO_2) ||
7536             tg3_flag(tp, HW_TSO_3))
7537                 return 0;
7538
7539         fw_data = (void *)tp->fw->data;
7540
7541         /* Firmware blob starts with version numbers, followed by
7542            start address and length. We are setting complete length.
7543            length = end_address_of_bss - start_address_of_text.
7544            Remainder is the blob to be loaded contiguously
7545            from start address. */
7546
7547         info.fw_base = be32_to_cpu(fw_data[1]);
7548         cpu_scratch_size = tp->fw_len;
7549         info.fw_len = tp->fw->size - 12;
7550         info.fw_data = &fw_data[3];
7551
7552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7553                 cpu_base = RX_CPU_BASE;
7554                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7555         } else {
7556                 cpu_base = TX_CPU_BASE;
7557                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7558                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7559         }
7560
7561         err = tg3_load_firmware_cpu(tp, cpu_base,
7562                                     cpu_scratch_base, cpu_scratch_size,
7563                                     &info);
7564         if (err)
7565                 return err;
7566
7567         /* Now startup the cpu. */
7568         tw32(cpu_base + CPU_STATE, 0xffffffff);
7569         tw32_f(cpu_base + CPU_PC, info.fw_base);
7570
7571         for (i = 0; i < 5; i++) {
7572                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7573                         break;
7574                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7575                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7576                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7577                 udelay(1000);
7578         }
7579         if (i >= 5) {
7580                 netdev_err(tp->dev,
7581                            "%s fails to set CPU PC, is %08x should be %08x\n",
7582                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7583                 return -ENODEV;
7584         }
7585         tw32(cpu_base + CPU_STATE, 0xffffffff);
7586         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7587         return 0;
7588 }
7589
7590
7591 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7592 {
7593         struct tg3 *tp = netdev_priv(dev);
7594         struct sockaddr *addr = p;
7595         int err = 0, skip_mac_1 = 0;
7596
7597         if (!is_valid_ether_addr(addr->sa_data))
7598                 return -EINVAL;
7599
7600         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7601
7602         if (!netif_running(dev))
7603                 return 0;
7604
7605         if (tg3_flag(tp, ENABLE_ASF)) {
7606                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7607
7608                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7609                 addr0_low = tr32(MAC_ADDR_0_LOW);
7610                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7611                 addr1_low = tr32(MAC_ADDR_1_LOW);
7612
7613                 /* Skip MAC addr 1 if ASF is using it. */
7614                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7615                     !(addr1_high == 0 && addr1_low == 0))
7616                         skip_mac_1 = 1;
7617         }
7618         spin_lock_bh(&tp->lock);
7619         __tg3_set_mac_addr(tp, skip_mac_1);
7620         spin_unlock_bh(&tp->lock);
7621
7622         return err;
7623 }
7624
7625 /* tp->lock is held. */
7626 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7627                            dma_addr_t mapping, u32 maxlen_flags,
7628                            u32 nic_addr)
7629 {
7630         tg3_write_mem(tp,
7631                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7632                       ((u64) mapping >> 32));
7633         tg3_write_mem(tp,
7634                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7635                       ((u64) mapping & 0xffffffff));
7636         tg3_write_mem(tp,
7637                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7638                        maxlen_flags);
7639
7640         if (!tg3_flag(tp, 5705_PLUS))
7641                 tg3_write_mem(tp,
7642                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7643                               nic_addr);
7644 }
7645
7646 static void __tg3_set_rx_mode(struct net_device *);
7647 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7648 {
7649         int i;
7650
7651         if (!tg3_flag(tp, ENABLE_TSS)) {
7652                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7653                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7654                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7655         } else {
7656                 tw32(HOSTCC_TXCOL_TICKS, 0);
7657                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7658                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7659         }
7660
7661         if (!tg3_flag(tp, ENABLE_RSS)) {
7662                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7663                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7664                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7665         } else {
7666                 tw32(HOSTCC_RXCOL_TICKS, 0);
7667                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7668                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7669         }
7670
7671         if (!tg3_flag(tp, 5705_PLUS)) {
7672                 u32 val = ec->stats_block_coalesce_usecs;
7673
7674                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7675                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7676
7677                 if (!netif_carrier_ok(tp->dev))
7678                         val = 0;
7679
7680                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7681         }
7682
7683         for (i = 0; i < tp->irq_cnt - 1; i++) {
7684                 u32 reg;
7685
7686                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7687                 tw32(reg, ec->rx_coalesce_usecs);
7688                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7689                 tw32(reg, ec->rx_max_coalesced_frames);
7690                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7691                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7692
7693                 if (tg3_flag(tp, ENABLE_TSS)) {
7694                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7695                         tw32(reg, ec->tx_coalesce_usecs);
7696                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7697                         tw32(reg, ec->tx_max_coalesced_frames);
7698                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7699                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7700                 }
7701         }
7702
7703         for (; i < tp->irq_max - 1; i++) {
7704                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7705                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7706                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7707
7708                 if (tg3_flag(tp, ENABLE_TSS)) {
7709                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7710                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7711                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7712                 }
7713         }
7714 }
7715
7716 /* tp->lock is held. */
7717 static void tg3_rings_reset(struct tg3 *tp)
7718 {
7719         int i;
7720         u32 stblk, txrcb, rxrcb, limit;
7721         struct tg3_napi *tnapi = &tp->napi[0];
7722
7723         /* Disable all transmit rings but the first. */
7724         if (!tg3_flag(tp, 5705_PLUS))
7725                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7726         else if (tg3_flag(tp, 5717_PLUS))
7727                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7728         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7729                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7730         else
7731                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7732
7733         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7734              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7735                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7736                               BDINFO_FLAGS_DISABLED);
7737
7738
7739         /* Disable all receive return rings but the first. */
7740         if (tg3_flag(tp, 5717_PLUS))
7741                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7742         else if (!tg3_flag(tp, 5705_PLUS))
7743                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7744         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7745                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7746                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7747         else
7748                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7749
7750         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7751              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7752                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7753                               BDINFO_FLAGS_DISABLED);
7754
7755         /* Disable interrupts */
7756         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7757         tp->napi[0].chk_msi_cnt = 0;
7758         tp->napi[0].last_rx_cons = 0;
7759         tp->napi[0].last_tx_cons = 0;
7760
7761         /* Zero mailbox registers. */
7762         if (tg3_flag(tp, SUPPORT_MSIX)) {
7763                 for (i = 1; i < tp->irq_max; i++) {
7764                         tp->napi[i].tx_prod = 0;
7765                         tp->napi[i].tx_cons = 0;
7766                         if (tg3_flag(tp, ENABLE_TSS))
7767                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7768                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7769                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7770                         tp->napi[0].chk_msi_cnt = 0;
7771                         tp->napi[i].last_rx_cons = 0;
7772                         tp->napi[i].last_tx_cons = 0;
7773                 }
7774                 if (!tg3_flag(tp, ENABLE_TSS))
7775                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7776         } else {
7777                 tp->napi[0].tx_prod = 0;
7778                 tp->napi[0].tx_cons = 0;
7779                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7780                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7781         }
7782
7783         /* Make sure the NIC-based send BD rings are disabled. */
7784         if (!tg3_flag(tp, 5705_PLUS)) {
7785                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7786                 for (i = 0; i < 16; i++)
7787                         tw32_tx_mbox(mbox + i * 8, 0);
7788         }
7789
7790         txrcb = NIC_SRAM_SEND_RCB;
7791         rxrcb = NIC_SRAM_RCV_RET_RCB;
7792
7793         /* Clear status block in ram. */
7794         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7795
7796         /* Set status block DMA address */
7797         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7798              ((u64) tnapi->status_mapping >> 32));
7799         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7800              ((u64) tnapi->status_mapping & 0xffffffff));
7801
7802         if (tnapi->tx_ring) {
7803                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7804                                (TG3_TX_RING_SIZE <<
7805                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7806                                NIC_SRAM_TX_BUFFER_DESC);
7807                 txrcb += TG3_BDINFO_SIZE;
7808         }
7809
7810         if (tnapi->rx_rcb) {
7811                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7812                                (tp->rx_ret_ring_mask + 1) <<
7813                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7814                 rxrcb += TG3_BDINFO_SIZE;
7815         }
7816
7817         stblk = HOSTCC_STATBLCK_RING1;
7818
7819         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7820                 u64 mapping = (u64)tnapi->status_mapping;
7821                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7822                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7823
7824                 /* Clear status block in ram. */
7825                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7826
7827                 if (tnapi->tx_ring) {
7828                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7829                                        (TG3_TX_RING_SIZE <<
7830                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7831                                        NIC_SRAM_TX_BUFFER_DESC);
7832                         txrcb += TG3_BDINFO_SIZE;
7833                 }
7834
7835                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7836                                ((tp->rx_ret_ring_mask + 1) <<
7837                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7838
7839                 stblk += 8;
7840                 rxrcb += TG3_BDINFO_SIZE;
7841         }
7842 }
7843
7844 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7845 {
7846         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7847
7848         if (!tg3_flag(tp, 5750_PLUS) ||
7849             tg3_flag(tp, 5780_CLASS) ||
7850             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7851             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7852                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7853         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7854                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7855                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7856         else
7857                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7858
7859         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7860         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7861
7862         val = min(nic_rep_thresh, host_rep_thresh);
7863         tw32(RCVBDI_STD_THRESH, val);
7864
7865         if (tg3_flag(tp, 57765_PLUS))
7866                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7867
7868         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7869                 return;
7870
7871         if (!tg3_flag(tp, 5705_PLUS))
7872                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7873         else
7874                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7875
7876         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7877
7878         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7879         tw32(RCVBDI_JUMBO_THRESH, val);
7880
7881         if (tg3_flag(tp, 57765_PLUS))
7882                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7883 }
7884
7885 /* tp->lock is held. */
7886 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7887 {
7888         u32 val, rdmac_mode;
7889         int i, err, limit;
7890         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7891
7892         tg3_disable_ints(tp);
7893
7894         tg3_stop_fw(tp);
7895
7896         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7897
7898         if (tg3_flag(tp, INIT_COMPLETE))
7899                 tg3_abort_hw(tp, 1);
7900
7901         /* Enable MAC control of LPI */
7902         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7903                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7904                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7905                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7906
7907                 tw32_f(TG3_CPMU_EEE_CTRL,
7908                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7909
7910                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7911                       TG3_CPMU_EEEMD_LPI_IN_TX |
7912                       TG3_CPMU_EEEMD_LPI_IN_RX |
7913                       TG3_CPMU_EEEMD_EEE_ENABLE;
7914
7915                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7916                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7917
7918                 if (tg3_flag(tp, ENABLE_APE))
7919                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7920
7921                 tw32_f(TG3_CPMU_EEE_MODE, val);
7922
7923                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7924                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7925                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7926
7927                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7928                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7929                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7930         }
7931
7932         if (reset_phy)
7933                 tg3_phy_reset(tp);
7934
7935         err = tg3_chip_reset(tp);
7936         if (err)
7937                 return err;
7938
7939         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7940
7941         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7942                 val = tr32(TG3_CPMU_CTRL);
7943                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7944                 tw32(TG3_CPMU_CTRL, val);
7945
7946                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7947                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7948                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7949                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7950
7951                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7952                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7953                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7954                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7955
7956                 val = tr32(TG3_CPMU_HST_ACC);
7957                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7958                 val |= CPMU_HST_ACC_MACCLK_6_25;
7959                 tw32(TG3_CPMU_HST_ACC, val);
7960         }
7961
7962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7963                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7964                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7965                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7966                 tw32(PCIE_PWR_MGMT_THRESH, val);
7967
7968                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7969                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7970
7971                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7972
7973                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7974                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7975         }
7976
7977         if (tg3_flag(tp, L1PLLPD_EN)) {
7978                 u32 grc_mode = tr32(GRC_MODE);
7979
7980                 /* Access the lower 1K of PL PCIE block registers. */
7981                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7982                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7983
7984                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7985                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7986                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7987
7988                 tw32(GRC_MODE, grc_mode);
7989         }
7990
7991         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7992                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7993                         u32 grc_mode = tr32(GRC_MODE);
7994
7995                         /* Access the lower 1K of PL PCIE block registers. */
7996                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7997                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7998
7999                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8000                                    TG3_PCIE_PL_LO_PHYCTL5);
8001                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8002                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8003
8004                         tw32(GRC_MODE, grc_mode);
8005                 }
8006
8007                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8008                         u32 grc_mode = tr32(GRC_MODE);
8009
8010                         /* Access the lower 1K of DL PCIE block registers. */
8011                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8012                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8013
8014                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8015                                    TG3_PCIE_DL_LO_FTSMAX);
8016                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8017                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8018                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8019
8020                         tw32(GRC_MODE, grc_mode);
8021                 }
8022
8023                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8024                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8025                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8026                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8027         }
8028
8029         /* This works around an issue with Athlon chipsets on
8030          * B3 tigon3 silicon.  This bit has no effect on any
8031          * other revision.  But do not set this on PCI Express
8032          * chips and don't even touch the clocks if the CPMU is present.
8033          */
8034         if (!tg3_flag(tp, CPMU_PRESENT)) {
8035                 if (!tg3_flag(tp, PCI_EXPRESS))
8036                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8037                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8038         }
8039
8040         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8041             tg3_flag(tp, PCIX_MODE)) {
8042                 val = tr32(TG3PCI_PCISTATE);
8043                 val |= PCISTATE_RETRY_SAME_DMA;
8044                 tw32(TG3PCI_PCISTATE, val);
8045         }
8046
8047         if (tg3_flag(tp, ENABLE_APE)) {
8048                 /* Allow reads and writes to the
8049                  * APE register and memory space.
8050                  */
8051                 val = tr32(TG3PCI_PCISTATE);
8052                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8053                        PCISTATE_ALLOW_APE_SHMEM_WR |
8054                        PCISTATE_ALLOW_APE_PSPACE_WR;
8055                 tw32(TG3PCI_PCISTATE, val);
8056         }
8057
8058         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8059                 /* Enable some hw fixes.  */
8060                 val = tr32(TG3PCI_MSI_DATA);
8061                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8062                 tw32(TG3PCI_MSI_DATA, val);
8063         }
8064
8065         /* Descriptor ring init may make accesses to the
8066          * NIC SRAM area to setup the TX descriptors, so we
8067          * can only do this after the hardware has been
8068          * successfully reset.
8069          */
8070         err = tg3_init_rings(tp);
8071         if (err)
8072                 return err;
8073
8074         if (tg3_flag(tp, 57765_PLUS)) {
8075                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8076                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8077                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8078                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8079                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8080                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8081                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8082                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8083         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8084                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8085                 /* This value is determined during the probe time DMA
8086                  * engine test, tg3_test_dma.
8087                  */
8088                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8089         }
8090
8091         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8092                           GRC_MODE_4X_NIC_SEND_RINGS |
8093                           GRC_MODE_NO_TX_PHDR_CSUM |
8094                           GRC_MODE_NO_RX_PHDR_CSUM);
8095         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8096
8097         /* Pseudo-header checksum is done by hardware logic and not
8098          * the offload processers, so make the chip do the pseudo-
8099          * header checksums on receive.  For transmit it is more
8100          * convenient to do the pseudo-header checksum in software
8101          * as Linux does that on transmit for us in all cases.
8102          */
8103         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8104
8105         tw32(GRC_MODE,
8106              tp->grc_mode |
8107              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8108
8109         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8110         val = tr32(GRC_MISC_CFG);
8111         val &= ~0xff;
8112         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8113         tw32(GRC_MISC_CFG, val);
8114
8115         /* Initialize MBUF/DESC pool. */
8116         if (tg3_flag(tp, 5750_PLUS)) {
8117                 /* Do nothing.  */
8118         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8119                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8120                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8121                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8122                 else
8123                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8124                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8125                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8126         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8127                 int fw_len;
8128
8129                 fw_len = tp->fw_len;
8130                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8131                 tw32(BUFMGR_MB_POOL_ADDR,
8132                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8133                 tw32(BUFMGR_MB_POOL_SIZE,
8134                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8135         }
8136
8137         if (tp->dev->mtu <= ETH_DATA_LEN) {
8138                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8139                      tp->bufmgr_config.mbuf_read_dma_low_water);
8140                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8141                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8142                 tw32(BUFMGR_MB_HIGH_WATER,
8143                      tp->bufmgr_config.mbuf_high_water);
8144         } else {
8145                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8146                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8147                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8148                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8149                 tw32(BUFMGR_MB_HIGH_WATER,
8150                      tp->bufmgr_config.mbuf_high_water_jumbo);
8151         }
8152         tw32(BUFMGR_DMA_LOW_WATER,
8153              tp->bufmgr_config.dma_low_water);
8154         tw32(BUFMGR_DMA_HIGH_WATER,
8155              tp->bufmgr_config.dma_high_water);
8156
8157         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8159                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8161             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8162             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8163                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8164         tw32(BUFMGR_MODE, val);
8165         for (i = 0; i < 2000; i++) {
8166                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8167                         break;
8168                 udelay(10);
8169         }
8170         if (i >= 2000) {
8171                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8172                 return -ENODEV;
8173         }
8174
8175         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8176                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8177
8178         tg3_setup_rxbd_thresholds(tp);
8179
8180         /* Initialize TG3_BDINFO's at:
8181          *  RCVDBDI_STD_BD:     standard eth size rx ring
8182          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8183          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8184          *
8185          * like so:
8186          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8187          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8188          *                              ring attribute flags
8189          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8190          *
8191          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8192          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8193          *
8194          * The size of each ring is fixed in the firmware, but the location is
8195          * configurable.
8196          */
8197         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8198              ((u64) tpr->rx_std_mapping >> 32));
8199         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8200              ((u64) tpr->rx_std_mapping & 0xffffffff));
8201         if (!tg3_flag(tp, 5717_PLUS))
8202                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8203                      NIC_SRAM_RX_BUFFER_DESC);
8204
8205         /* Disable the mini ring */
8206         if (!tg3_flag(tp, 5705_PLUS))
8207                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8208                      BDINFO_FLAGS_DISABLED);
8209
8210         /* Program the jumbo buffer descriptor ring control
8211          * blocks on those devices that have them.
8212          */
8213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8214             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8215
8216                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8217                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8218                              ((u64) tpr->rx_jmb_mapping >> 32));
8219                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8220                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8221                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8222                               BDINFO_FLAGS_MAXLEN_SHIFT;
8223                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8224                              val | BDINFO_FLAGS_USE_EXT_RECV);
8225                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8226                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8227                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8228                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8229                 } else {
8230                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8231                              BDINFO_FLAGS_DISABLED);
8232                 }
8233
8234                 if (tg3_flag(tp, 57765_PLUS)) {
8235                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8236                                 val = TG3_RX_STD_MAX_SIZE_5700;
8237                         else
8238                                 val = TG3_RX_STD_MAX_SIZE_5717;
8239                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8240                         val |= (TG3_RX_STD_DMA_SZ << 2);
8241                 } else
8242                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8243         } else
8244                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8245
8246         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8247
8248         tpr->rx_std_prod_idx = tp->rx_pending;
8249         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8250
8251         tpr->rx_jmb_prod_idx =
8252                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8253         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8254
8255         tg3_rings_reset(tp);
8256
8257         /* Initialize MAC address and backoff seed. */
8258         __tg3_set_mac_addr(tp, 0);
8259
8260         /* MTU + ethernet header + FCS + optional VLAN tag */
8261         tw32(MAC_RX_MTU_SIZE,
8262              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8263
8264         /* The slot time is changed by tg3_setup_phy if we
8265          * run at gigabit with half duplex.
8266          */
8267         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8268               (6 << TX_LENGTHS_IPG_SHIFT) |
8269               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8270
8271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8272                 val |= tr32(MAC_TX_LENGTHS) &
8273                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8274                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8275
8276         tw32(MAC_TX_LENGTHS, val);
8277
8278         /* Receive rules. */
8279         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8280         tw32(RCVLPC_CONFIG, 0x0181);
8281
8282         /* Calculate RDMAC_MODE setting early, we need it to determine
8283          * the RCVLPC_STATE_ENABLE mask.
8284          */
8285         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8286                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8287                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8288                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8289                       RDMAC_MODE_LNGREAD_ENAB);
8290
8291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8292                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8293
8294         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8295             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8297                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8298                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8299                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8300
8301         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8302             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8303                 if (tg3_flag(tp, TSO_CAPABLE) &&
8304                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8305                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8306                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8307                            !tg3_flag(tp, IS_5788)) {
8308                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8309                 }
8310         }
8311
8312         if (tg3_flag(tp, PCI_EXPRESS))
8313                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8314
8315         if (tg3_flag(tp, HW_TSO_1) ||
8316             tg3_flag(tp, HW_TSO_2) ||
8317             tg3_flag(tp, HW_TSO_3))
8318                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8319
8320         if (tg3_flag(tp, 57765_PLUS) ||
8321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8323                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8324
8325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8326                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8327
8328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8329             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8331             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8332             tg3_flag(tp, 57765_PLUS)) {
8333                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8334                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8335                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8336                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8337                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8338                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8339                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8340                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8341                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8342                 }
8343                 tw32(TG3_RDMA_RSRVCTRL_REG,
8344                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8345         }
8346
8347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8349                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8350                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8351                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8352                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8353         }
8354
8355         /* Receive/send statistics. */
8356         if (tg3_flag(tp, 5750_PLUS)) {
8357                 val = tr32(RCVLPC_STATS_ENABLE);
8358                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8359                 tw32(RCVLPC_STATS_ENABLE, val);
8360         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8361                    tg3_flag(tp, TSO_CAPABLE)) {
8362                 val = tr32(RCVLPC_STATS_ENABLE);
8363                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8364                 tw32(RCVLPC_STATS_ENABLE, val);
8365         } else {
8366                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8367         }
8368         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8369         tw32(SNDDATAI_STATSENAB, 0xffffff);
8370         tw32(SNDDATAI_STATSCTRL,
8371              (SNDDATAI_SCTRL_ENABLE |
8372               SNDDATAI_SCTRL_FASTUPD));
8373
8374         /* Setup host coalescing engine. */
8375         tw32(HOSTCC_MODE, 0);
8376         for (i = 0; i < 2000; i++) {
8377                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8378                         break;
8379                 udelay(10);
8380         }
8381
8382         __tg3_set_coalesce(tp, &tp->coal);
8383
8384         if (!tg3_flag(tp, 5705_PLUS)) {
8385                 /* Status/statistics block address.  See tg3_timer,
8386                  * the tg3_periodic_fetch_stats call there, and
8387                  * tg3_get_stats to see how this works for 5705/5750 chips.
8388                  */
8389                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8390                      ((u64) tp->stats_mapping >> 32));
8391                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8392                      ((u64) tp->stats_mapping & 0xffffffff));
8393                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8394
8395                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8396
8397                 /* Clear statistics and status block memory areas */
8398                 for (i = NIC_SRAM_STATS_BLK;
8399                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8400                      i += sizeof(u32)) {
8401                         tg3_write_mem(tp, i, 0);
8402                         udelay(40);
8403                 }
8404         }
8405
8406         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8407
8408         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8409         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8410         if (!tg3_flag(tp, 5705_PLUS))
8411                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8412
8413         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8414                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8415                 /* reset to prevent losing 1st rx packet intermittently */
8416                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8417                 udelay(10);
8418         }
8419
8420         if (tg3_flag(tp, ENABLE_APE))
8421                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8422         else
8423                 tp->mac_mode = 0;
8424         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8425                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8426         if (!tg3_flag(tp, 5705_PLUS) &&
8427             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8428             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8429                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8430         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8431         udelay(40);
8432
8433         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8434          * If TG3_FLAG_IS_NIC is zero, we should read the
8435          * register to preserve the GPIO settings for LOMs. The GPIOs,
8436          * whether used as inputs or outputs, are set by boot code after
8437          * reset.
8438          */
8439         if (!tg3_flag(tp, IS_NIC)) {
8440                 u32 gpio_mask;
8441
8442                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8443                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8444                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8445
8446                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8447                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8448                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8449
8450                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8451                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8452
8453                 tp->grc_local_ctrl &= ~gpio_mask;
8454                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8455
8456                 /* GPIO1 must be driven high for eeprom write protect */
8457                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8458                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8459                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8460         }
8461         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8462         udelay(100);
8463
8464         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8465                 val = tr32(MSGINT_MODE);
8466                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8467                 tw32(MSGINT_MODE, val);
8468         }
8469
8470         if (!tg3_flag(tp, 5705_PLUS)) {
8471                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8472                 udelay(40);
8473         }
8474
8475         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8476                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8477                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8478                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8479                WDMAC_MODE_LNGREAD_ENAB);
8480
8481         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8482             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8483                 if (tg3_flag(tp, TSO_CAPABLE) &&
8484                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8485                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8486                         /* nothing */
8487                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8488                            !tg3_flag(tp, IS_5788)) {
8489                         val |= WDMAC_MODE_RX_ACCEL;
8490                 }
8491         }
8492
8493         /* Enable host coalescing bug fix */
8494         if (tg3_flag(tp, 5755_PLUS))
8495                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8496
8497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8498                 val |= WDMAC_MODE_BURST_ALL_DATA;
8499
8500         tw32_f(WDMAC_MODE, val);
8501         udelay(40);
8502
8503         if (tg3_flag(tp, PCIX_MODE)) {
8504                 u16 pcix_cmd;
8505
8506                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8507                                      &pcix_cmd);
8508                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8509                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8510                         pcix_cmd |= PCI_X_CMD_READ_2K;
8511                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8512                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8513                         pcix_cmd |= PCI_X_CMD_READ_2K;
8514                 }
8515                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8516                                       pcix_cmd);
8517         }
8518
8519         tw32_f(RDMAC_MODE, rdmac_mode);
8520         udelay(40);
8521
8522         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8523         if (!tg3_flag(tp, 5705_PLUS))
8524                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8525
8526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8527                 tw32(SNDDATAC_MODE,
8528                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8529         else
8530                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8531
8532         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8533         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8534         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8535         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8536                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8537         tw32(RCVDBDI_MODE, val);
8538         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8539         if (tg3_flag(tp, HW_TSO_1) ||
8540             tg3_flag(tp, HW_TSO_2) ||
8541             tg3_flag(tp, HW_TSO_3))
8542                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8543         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8544         if (tg3_flag(tp, ENABLE_TSS))
8545                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8546         tw32(SNDBDI_MODE, val);
8547         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8548
8549         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8550                 err = tg3_load_5701_a0_firmware_fix(tp);
8551                 if (err)
8552                         return err;
8553         }
8554
8555         if (tg3_flag(tp, TSO_CAPABLE)) {
8556                 err = tg3_load_tso_firmware(tp);
8557                 if (err)
8558                         return err;
8559         }
8560
8561         tp->tx_mode = TX_MODE_ENABLE;
8562
8563         if (tg3_flag(tp, 5755_PLUS) ||
8564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8565                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8566
8567         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8568                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8569                 tp->tx_mode &= ~val;
8570                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8571         }
8572
8573         tw32_f(MAC_TX_MODE, tp->tx_mode);
8574         udelay(100);
8575
8576         if (tg3_flag(tp, ENABLE_RSS)) {
8577                 u32 reg = MAC_RSS_INDIR_TBL_0;
8578                 u8 *ent = (u8 *)&val;
8579
8580                 /* Setup the indirection table */
8581                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8582                         int idx = i % sizeof(val);
8583
8584                         ent[idx] = i % (tp->irq_cnt - 1);
8585                         if (idx == sizeof(val) - 1) {
8586                                 tw32(reg, val);
8587                                 reg += 4;
8588                         }
8589                 }
8590
8591                 /* Setup the "secret" hash key. */
8592                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8593                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8594                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8595                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8596                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8597                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8598                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8599                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8600                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8601                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8602         }
8603
8604         tp->rx_mode = RX_MODE_ENABLE;
8605         if (tg3_flag(tp, 5755_PLUS))
8606                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8607
8608         if (tg3_flag(tp, ENABLE_RSS))
8609                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8610                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8611                                RX_MODE_RSS_IPV6_HASH_EN |
8612                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8613                                RX_MODE_RSS_IPV4_HASH_EN |
8614                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8615
8616         tw32_f(MAC_RX_MODE, tp->rx_mode);
8617         udelay(10);
8618
8619         tw32(MAC_LED_CTRL, tp->led_ctrl);
8620
8621         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8622         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8623                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8624                 udelay(10);
8625         }
8626         tw32_f(MAC_RX_MODE, tp->rx_mode);
8627         udelay(10);
8628
8629         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8630                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8631                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8632                         /* Set drive transmission level to 1.2V  */
8633                         /* only if the signal pre-emphasis bit is not set  */
8634                         val = tr32(MAC_SERDES_CFG);
8635                         val &= 0xfffff000;
8636                         val |= 0x880;
8637                         tw32(MAC_SERDES_CFG, val);
8638                 }
8639                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8640                         tw32(MAC_SERDES_CFG, 0x616000);
8641         }
8642
8643         /* Prevent chip from dropping frames when flow control
8644          * is enabled.
8645          */
8646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8647                 val = 1;
8648         else
8649                 val = 2;
8650         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8651
8652         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8653             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8654                 /* Use hardware link auto-negotiation */
8655                 tg3_flag_set(tp, HW_AUTONEG);
8656         }
8657
8658         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8659             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8660                 u32 tmp;
8661
8662                 tmp = tr32(SERDES_RX_CTRL);
8663                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8664                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8665                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8666                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8667         }
8668
8669         if (!tg3_flag(tp, USE_PHYLIB)) {
8670                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8671                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8672                         tp->link_config.speed = tp->link_config.orig_speed;
8673                         tp->link_config.duplex = tp->link_config.orig_duplex;
8674                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8675                 }
8676
8677                 err = tg3_setup_phy(tp, 0);
8678                 if (err)
8679                         return err;
8680
8681                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8682                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8683                         u32 tmp;
8684
8685                         /* Clear CRC stats. */
8686                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8687                                 tg3_writephy(tp, MII_TG3_TEST1,
8688                                              tmp | MII_TG3_TEST1_CRC_EN);
8689                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8690                         }
8691                 }
8692         }
8693
8694         __tg3_set_rx_mode(tp->dev);
8695
8696         /* Initialize receive rules. */
8697         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8698         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8699         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8700         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8701
8702         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8703                 limit = 8;
8704         else
8705                 limit = 16;
8706         if (tg3_flag(tp, ENABLE_ASF))
8707                 limit -= 4;
8708         switch (limit) {
8709         case 16:
8710                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8711         case 15:
8712                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8713         case 14:
8714                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8715         case 13:
8716                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8717         case 12:
8718                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8719         case 11:
8720                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8721         case 10:
8722                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8723         case 9:
8724                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8725         case 8:
8726                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8727         case 7:
8728                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8729         case 6:
8730                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8731         case 5:
8732                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8733         case 4:
8734                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8735         case 3:
8736                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8737         case 2:
8738         case 1:
8739
8740         default:
8741                 break;
8742         }
8743
8744         if (tg3_flag(tp, ENABLE_APE))
8745                 /* Write our heartbeat update interval to APE. */
8746                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8747                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8748
8749         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8750
8751         return 0;
8752 }
8753
8754 /* Called at device open time to get the chip ready for
8755  * packet processing.  Invoked with tp->lock held.
8756  */
8757 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8758 {
8759         tg3_switch_clocks(tp);
8760
8761         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8762
8763         return tg3_reset_hw(tp, reset_phy);
8764 }
8765
8766 #define TG3_STAT_ADD32(PSTAT, REG) \
8767 do {    u32 __val = tr32(REG); \
8768         (PSTAT)->low += __val; \
8769         if ((PSTAT)->low < __val) \
8770                 (PSTAT)->high += 1; \
8771 } while (0)
8772
8773 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8774 {
8775         struct tg3_hw_stats *sp = tp->hw_stats;
8776
8777         if (!netif_carrier_ok(tp->dev))
8778                 return;
8779
8780         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8781         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8782         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8783         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8784         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8785         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8786         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8787         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8788         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8789         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8790         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8791         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8792         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8793
8794         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8795         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8796         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8797         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8798         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8799         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8800         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8801         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8802         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8803         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8804         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8805         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8806         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8807         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8808
8809         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8810         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8811             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8812             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8813                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8814         } else {
8815                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8816                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8817                 if (val) {
8818                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8819                         sp->rx_discards.low += val;
8820                         if (sp->rx_discards.low < val)
8821                                 sp->rx_discards.high += 1;
8822                 }
8823                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8824         }
8825         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8826 }
8827
8828 static void tg3_chk_missed_msi(struct tg3 *tp)
8829 {
8830         u32 i;
8831
8832         for (i = 0; i < tp->irq_cnt; i++) {
8833                 struct tg3_napi *tnapi = &tp->napi[i];
8834
8835                 if (tg3_has_work(tnapi)) {
8836                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8837                             tnapi->last_tx_cons == tnapi->tx_cons) {
8838                                 if (tnapi->chk_msi_cnt < 1) {
8839                                         tnapi->chk_msi_cnt++;
8840                                         return;
8841                                 }
8842                                 tw32_mailbox(tnapi->int_mbox,
8843                                              tnapi->last_tag << 24);
8844                         }
8845                 }
8846                 tnapi->chk_msi_cnt = 0;
8847                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8848                 tnapi->last_tx_cons = tnapi->tx_cons;
8849         }
8850 }
8851
8852 static void tg3_timer(unsigned long __opaque)
8853 {
8854         struct tg3 *tp = (struct tg3 *) __opaque;
8855
8856         if (tp->irq_sync)
8857                 goto restart_timer;
8858
8859         spin_lock(&tp->lock);
8860
8861         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8862             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8863                 tg3_chk_missed_msi(tp);
8864
8865         if (!tg3_flag(tp, TAGGED_STATUS)) {
8866                 /* All of this garbage is because when using non-tagged
8867                  * IRQ status the mailbox/status_block protocol the chip
8868                  * uses with the cpu is race prone.
8869                  */
8870                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8871                         tw32(GRC_LOCAL_CTRL,
8872                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8873                 } else {
8874                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8875                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8876                 }
8877
8878                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8879                         tg3_flag_set(tp, RESTART_TIMER);
8880                         spin_unlock(&tp->lock);
8881                         schedule_work(&tp->reset_task);
8882                         return;
8883                 }
8884         }
8885
8886         /* This part only runs once per second. */
8887         if (!--tp->timer_counter) {
8888                 if (tg3_flag(tp, 5705_PLUS))
8889                         tg3_periodic_fetch_stats(tp);
8890
8891                 if (tp->setlpicnt && !--tp->setlpicnt)
8892                         tg3_phy_eee_enable(tp);
8893
8894                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8895                         u32 mac_stat;
8896                         int phy_event;
8897
8898                         mac_stat = tr32(MAC_STATUS);
8899
8900                         phy_event = 0;
8901                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8902                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8903                                         phy_event = 1;
8904                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8905                                 phy_event = 1;
8906
8907                         if (phy_event)
8908                                 tg3_setup_phy(tp, 0);
8909                 } else if (tg3_flag(tp, POLL_SERDES)) {
8910                         u32 mac_stat = tr32(MAC_STATUS);
8911                         int need_setup = 0;
8912
8913                         if (netif_carrier_ok(tp->dev) &&
8914                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8915                                 need_setup = 1;
8916                         }
8917                         if (!netif_carrier_ok(tp->dev) &&
8918                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8919                                          MAC_STATUS_SIGNAL_DET))) {
8920                                 need_setup = 1;
8921                         }
8922                         if (need_setup) {
8923                                 if (!tp->serdes_counter) {
8924                                         tw32_f(MAC_MODE,
8925                                              (tp->mac_mode &
8926                                               ~MAC_MODE_PORT_MODE_MASK));
8927                                         udelay(40);
8928                                         tw32_f(MAC_MODE, tp->mac_mode);
8929                                         udelay(40);
8930                                 }
8931                                 tg3_setup_phy(tp, 0);
8932                         }
8933                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8934                            tg3_flag(tp, 5780_CLASS)) {
8935                         tg3_serdes_parallel_detect(tp);
8936                 }
8937
8938                 tp->timer_counter = tp->timer_multiplier;
8939         }
8940
8941         /* Heartbeat is only sent once every 2 seconds.
8942          *
8943          * The heartbeat is to tell the ASF firmware that the host
8944          * driver is still alive.  In the event that the OS crashes,
8945          * ASF needs to reset the hardware to free up the FIFO space
8946          * that may be filled with rx packets destined for the host.
8947          * If the FIFO is full, ASF will no longer function properly.
8948          *
8949          * Unintended resets have been reported on real time kernels
8950          * where the timer doesn't run on time.  Netpoll will also have
8951          * same problem.
8952          *
8953          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8954          * to check the ring condition when the heartbeat is expiring
8955          * before doing the reset.  This will prevent most unintended
8956          * resets.
8957          */
8958         if (!--tp->asf_counter) {
8959                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8960                         tg3_wait_for_event_ack(tp);
8961
8962                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8963                                       FWCMD_NICDRV_ALIVE3);
8964                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8965                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8966                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8967
8968                         tg3_generate_fw_event(tp);
8969                 }
8970                 tp->asf_counter = tp->asf_multiplier;
8971         }
8972
8973         spin_unlock(&tp->lock);
8974
8975 restart_timer:
8976         tp->timer.expires = jiffies + tp->timer_offset;
8977         add_timer(&tp->timer);
8978 }
8979
8980 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8981 {
8982         irq_handler_t fn;
8983         unsigned long flags;
8984         char *name;
8985         struct tg3_napi *tnapi = &tp->napi[irq_num];
8986
8987         if (tp->irq_cnt == 1)
8988                 name = tp->dev->name;
8989         else {
8990                 name = &tnapi->irq_lbl[0];
8991                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8992                 name[IFNAMSIZ-1] = 0;
8993         }
8994
8995         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8996                 fn = tg3_msi;
8997                 if (tg3_flag(tp, 1SHOT_MSI))
8998                         fn = tg3_msi_1shot;
8999                 flags = 0;
9000         } else {
9001                 fn = tg3_interrupt;
9002                 if (tg3_flag(tp, TAGGED_STATUS))
9003                         fn = tg3_interrupt_tagged;
9004                 flags = IRQF_SHARED;
9005         }
9006
9007         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9008 }
9009
9010 static int tg3_test_interrupt(struct tg3 *tp)
9011 {
9012         struct tg3_napi *tnapi = &tp->napi[0];
9013         struct net_device *dev = tp->dev;
9014         int err, i, intr_ok = 0;
9015         u32 val;
9016
9017         if (!netif_running(dev))
9018                 return -ENODEV;
9019
9020         tg3_disable_ints(tp);
9021
9022         free_irq(tnapi->irq_vec, tnapi);
9023
9024         /*
9025          * Turn off MSI one shot mode.  Otherwise this test has no
9026          * observable way to know whether the interrupt was delivered.
9027          */
9028         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9029                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9030                 tw32(MSGINT_MODE, val);
9031         }
9032
9033         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9034                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9035         if (err)
9036                 return err;
9037
9038         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9039         tg3_enable_ints(tp);
9040
9041         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9042                tnapi->coal_now);
9043
9044         for (i = 0; i < 5; i++) {
9045                 u32 int_mbox, misc_host_ctrl;
9046
9047                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9048                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9049
9050                 if ((int_mbox != 0) ||
9051                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9052                         intr_ok = 1;
9053                         break;
9054                 }
9055
9056                 msleep(10);
9057         }
9058
9059         tg3_disable_ints(tp);
9060
9061         free_irq(tnapi->irq_vec, tnapi);
9062
9063         err = tg3_request_irq(tp, 0);
9064
9065         if (err)
9066                 return err;
9067
9068         if (intr_ok) {
9069                 /* Reenable MSI one shot mode. */
9070                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9071                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9072                         tw32(MSGINT_MODE, val);
9073                 }
9074                 return 0;
9075         }
9076
9077         return -EIO;
9078 }
9079
9080 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9081  * successfully restored
9082  */
9083 static int tg3_test_msi(struct tg3 *tp)
9084 {
9085         int err;
9086         u16 pci_cmd;
9087
9088         if (!tg3_flag(tp, USING_MSI))
9089                 return 0;
9090
9091         /* Turn off SERR reporting in case MSI terminates with Master
9092          * Abort.
9093          */
9094         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9095         pci_write_config_word(tp->pdev, PCI_COMMAND,
9096                               pci_cmd & ~PCI_COMMAND_SERR);
9097
9098         err = tg3_test_interrupt(tp);
9099
9100         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9101
9102         if (!err)
9103                 return 0;
9104
9105         /* other failures */
9106         if (err != -EIO)
9107                 return err;
9108
9109         /* MSI test failed, go back to INTx mode */
9110         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9111                     "to INTx mode. Please report this failure to the PCI "
9112                     "maintainer and include system chipset information\n");
9113
9114         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9115
9116         pci_disable_msi(tp->pdev);
9117
9118         tg3_flag_clear(tp, USING_MSI);
9119         tp->napi[0].irq_vec = tp->pdev->irq;
9120
9121         err = tg3_request_irq(tp, 0);
9122         if (err)
9123                 return err;
9124
9125         /* Need to reset the chip because the MSI cycle may have terminated
9126          * with Master Abort.
9127          */
9128         tg3_full_lock(tp, 1);
9129
9130         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9131         err = tg3_init_hw(tp, 1);
9132
9133         tg3_full_unlock(tp);
9134
9135         if (err)
9136                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9137
9138         return err;
9139 }
9140
9141 static int tg3_request_firmware(struct tg3 *tp)
9142 {
9143         const __be32 *fw_data;
9144
9145         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9146                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9147                            tp->fw_needed);
9148                 return -ENOENT;
9149         }
9150
9151         fw_data = (void *)tp->fw->data;
9152
9153         /* Firmware blob starts with version numbers, followed by
9154          * start address and _full_ length including BSS sections
9155          * (which must be longer than the actual data, of course
9156          */
9157
9158         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9159         if (tp->fw_len < (tp->fw->size - 12)) {
9160                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9161                            tp->fw_len, tp->fw_needed);
9162                 release_firmware(tp->fw);
9163                 tp->fw = NULL;
9164                 return -EINVAL;
9165         }
9166
9167         /* We no longer need firmware; we have it. */
9168         tp->fw_needed = NULL;
9169         return 0;
9170 }
9171
9172 static bool tg3_enable_msix(struct tg3 *tp)
9173 {
9174         int i, rc, cpus = num_online_cpus();
9175         struct msix_entry msix_ent[tp->irq_max];
9176
9177         if (cpus == 1)
9178                 /* Just fallback to the simpler MSI mode. */
9179                 return false;
9180
9181         /*
9182          * We want as many rx rings enabled as there are cpus.
9183          * The first MSIX vector only deals with link interrupts, etc,
9184          * so we add one to the number of vectors we are requesting.
9185          */
9186         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9187
9188         for (i = 0; i < tp->irq_max; i++) {
9189                 msix_ent[i].entry  = i;
9190                 msix_ent[i].vector = 0;
9191         }
9192
9193         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9194         if (rc < 0) {
9195                 return false;
9196         } else if (rc != 0) {
9197                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9198                         return false;
9199                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9200                               tp->irq_cnt, rc);
9201                 tp->irq_cnt = rc;
9202         }
9203
9204         for (i = 0; i < tp->irq_max; i++)
9205                 tp->napi[i].irq_vec = msix_ent[i].vector;
9206
9207         netif_set_real_num_tx_queues(tp->dev, 1);
9208         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9209         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9210                 pci_disable_msix(tp->pdev);
9211                 return false;
9212         }
9213
9214         if (tp->irq_cnt > 1) {
9215                 tg3_flag_set(tp, ENABLE_RSS);
9216
9217                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9218                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9219                         tg3_flag_set(tp, ENABLE_TSS);
9220                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9221                 }
9222         }
9223
9224         return true;
9225 }
9226
9227 static void tg3_ints_init(struct tg3 *tp)
9228 {
9229         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9230             !tg3_flag(tp, TAGGED_STATUS)) {
9231                 /* All MSI supporting chips should support tagged
9232                  * status.  Assert that this is the case.
9233                  */
9234                 netdev_warn(tp->dev,
9235                             "MSI without TAGGED_STATUS? Not using MSI\n");
9236                 goto defcfg;
9237         }
9238
9239         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9240                 tg3_flag_set(tp, USING_MSIX);
9241         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9242                 tg3_flag_set(tp, USING_MSI);
9243
9244         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9245                 u32 msi_mode = tr32(MSGINT_MODE);
9246                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9247                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9248                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9249         }
9250 defcfg:
9251         if (!tg3_flag(tp, USING_MSIX)) {
9252                 tp->irq_cnt = 1;
9253                 tp->napi[0].irq_vec = tp->pdev->irq;
9254                 netif_set_real_num_tx_queues(tp->dev, 1);
9255                 netif_set_real_num_rx_queues(tp->dev, 1);
9256         }
9257 }
9258
9259 static void tg3_ints_fini(struct tg3 *tp)
9260 {
9261         if (tg3_flag(tp, USING_MSIX))
9262                 pci_disable_msix(tp->pdev);
9263         else if (tg3_flag(tp, USING_MSI))
9264                 pci_disable_msi(tp->pdev);
9265         tg3_flag_clear(tp, USING_MSI);
9266         tg3_flag_clear(tp, USING_MSIX);
9267         tg3_flag_clear(tp, ENABLE_RSS);
9268         tg3_flag_clear(tp, ENABLE_TSS);
9269 }
9270
9271 static int tg3_open(struct net_device *dev)
9272 {
9273         struct tg3 *tp = netdev_priv(dev);
9274         int i, err;
9275
9276         if (tp->fw_needed) {
9277                 err = tg3_request_firmware(tp);
9278                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9279                         if (err)
9280                                 return err;
9281                 } else if (err) {
9282                         netdev_warn(tp->dev, "TSO capability disabled\n");
9283                         tg3_flag_clear(tp, TSO_CAPABLE);
9284                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9285                         netdev_notice(tp->dev, "TSO capability restored\n");
9286                         tg3_flag_set(tp, TSO_CAPABLE);
9287                 }
9288         }
9289
9290         netif_carrier_off(tp->dev);
9291
9292         err = tg3_power_up(tp);
9293         if (err)
9294                 return err;
9295
9296         tg3_full_lock(tp, 0);
9297
9298         tg3_disable_ints(tp);
9299         tg3_flag_clear(tp, INIT_COMPLETE);
9300
9301         tg3_full_unlock(tp);
9302
9303         /*
9304          * Setup interrupts first so we know how
9305          * many NAPI resources to allocate
9306          */
9307         tg3_ints_init(tp);
9308
9309         /* The placement of this call is tied
9310          * to the setup and use of Host TX descriptors.
9311          */
9312         err = tg3_alloc_consistent(tp);
9313         if (err)
9314                 goto err_out1;
9315
9316         tg3_napi_init(tp);
9317
9318         tg3_napi_enable(tp);
9319
9320         for (i = 0; i < tp->irq_cnt; i++) {
9321                 struct tg3_napi *tnapi = &tp->napi[i];
9322                 err = tg3_request_irq(tp, i);
9323                 if (err) {
9324                         for (i--; i >= 0; i--)
9325                                 free_irq(tnapi->irq_vec, tnapi);
9326                         break;
9327                 }
9328         }
9329
9330         if (err)
9331                 goto err_out2;
9332
9333         tg3_full_lock(tp, 0);
9334
9335         err = tg3_init_hw(tp, 1);
9336         if (err) {
9337                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9338                 tg3_free_rings(tp);
9339         } else {
9340                 if (tg3_flag(tp, TAGGED_STATUS) &&
9341                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9342                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9343                         tp->timer_offset = HZ;
9344                 else
9345                         tp->timer_offset = HZ / 10;
9346
9347                 BUG_ON(tp->timer_offset > HZ);
9348                 tp->timer_counter = tp->timer_multiplier =
9349                         (HZ / tp->timer_offset);
9350                 tp->asf_counter = tp->asf_multiplier =
9351                         ((HZ / tp->timer_offset) * 2);
9352
9353                 init_timer(&tp->timer);
9354                 tp->timer.expires = jiffies + tp->timer_offset;
9355                 tp->timer.data = (unsigned long) tp;
9356                 tp->timer.function = tg3_timer;
9357         }
9358
9359         tg3_full_unlock(tp);
9360
9361         if (err)
9362                 goto err_out3;
9363
9364         if (tg3_flag(tp, USING_MSI)) {
9365                 err = tg3_test_msi(tp);
9366
9367                 if (err) {
9368                         tg3_full_lock(tp, 0);
9369                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9370                         tg3_free_rings(tp);
9371                         tg3_full_unlock(tp);
9372
9373                         goto err_out2;
9374                 }
9375
9376                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9377                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9378
9379                         tw32(PCIE_TRANSACTION_CFG,
9380                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9381                 }
9382         }
9383
9384         tg3_phy_start(tp);
9385
9386         tg3_full_lock(tp, 0);
9387
9388         add_timer(&tp->timer);
9389         tg3_flag_set(tp, INIT_COMPLETE);
9390         tg3_enable_ints(tp);
9391
9392         tg3_full_unlock(tp);
9393
9394         netif_tx_start_all_queues(dev);
9395
9396         /*
9397          * Reset loopback feature if it was turned on while the device was down
9398          * make sure that it's installed properly now.
9399          */
9400         if (dev->features & NETIF_F_LOOPBACK)
9401                 tg3_set_loopback(dev, dev->features);
9402
9403         return 0;
9404
9405 err_out3:
9406         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9407                 struct tg3_napi *tnapi = &tp->napi[i];
9408                 free_irq(tnapi->irq_vec, tnapi);
9409         }
9410
9411 err_out2:
9412         tg3_napi_disable(tp);
9413         tg3_napi_fini(tp);
9414         tg3_free_consistent(tp);
9415
9416 err_out1:
9417         tg3_ints_fini(tp);
9418         return err;
9419 }
9420
9421 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9422                                                  struct rtnl_link_stats64 *);
9423 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9424
9425 static int tg3_close(struct net_device *dev)
9426 {
9427         int i;
9428         struct tg3 *tp = netdev_priv(dev);
9429
9430         tg3_napi_disable(tp);
9431         cancel_work_sync(&tp->reset_task);
9432
9433         netif_tx_stop_all_queues(dev);
9434
9435         del_timer_sync(&tp->timer);
9436
9437         tg3_phy_stop(tp);
9438
9439         tg3_full_lock(tp, 1);
9440
9441         tg3_disable_ints(tp);
9442
9443         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9444         tg3_free_rings(tp);
9445         tg3_flag_clear(tp, INIT_COMPLETE);
9446
9447         tg3_full_unlock(tp);
9448
9449         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9450                 struct tg3_napi *tnapi = &tp->napi[i];
9451                 free_irq(tnapi->irq_vec, tnapi);
9452         }
9453
9454         tg3_ints_fini(tp);
9455
9456         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9457
9458         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9459                sizeof(tp->estats_prev));
9460
9461         tg3_napi_fini(tp);
9462
9463         tg3_free_consistent(tp);
9464
9465         tg3_power_down(tp);
9466
9467         netif_carrier_off(tp->dev);
9468
9469         return 0;
9470 }
9471
9472 static inline u64 get_stat64(tg3_stat64_t *val)
9473 {
9474        return ((u64)val->high << 32) | ((u64)val->low);
9475 }
9476
9477 static u64 calc_crc_errors(struct tg3 *tp)
9478 {
9479         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9480
9481         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9482             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9483              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9484                 u32 val;
9485
9486                 spin_lock_bh(&tp->lock);
9487                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9488                         tg3_writephy(tp, MII_TG3_TEST1,
9489                                      val | MII_TG3_TEST1_CRC_EN);
9490                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9491                 } else
9492                         val = 0;
9493                 spin_unlock_bh(&tp->lock);
9494
9495                 tp->phy_crc_errors += val;
9496
9497                 return tp->phy_crc_errors;
9498         }
9499
9500         return get_stat64(&hw_stats->rx_fcs_errors);
9501 }
9502
9503 #define ESTAT_ADD(member) \
9504         estats->member =        old_estats->member + \
9505                                 get_stat64(&hw_stats->member)
9506
9507 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9508 {
9509         struct tg3_ethtool_stats *estats = &tp->estats;
9510         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9511         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9512
9513         if (!hw_stats)
9514                 return old_estats;
9515
9516         ESTAT_ADD(rx_octets);
9517         ESTAT_ADD(rx_fragments);
9518         ESTAT_ADD(rx_ucast_packets);
9519         ESTAT_ADD(rx_mcast_packets);
9520         ESTAT_ADD(rx_bcast_packets);
9521         ESTAT_ADD(rx_fcs_errors);
9522         ESTAT_ADD(rx_align_errors);
9523         ESTAT_ADD(rx_xon_pause_rcvd);
9524         ESTAT_ADD(rx_xoff_pause_rcvd);
9525         ESTAT_ADD(rx_mac_ctrl_rcvd);
9526         ESTAT_ADD(rx_xoff_entered);
9527         ESTAT_ADD(rx_frame_too_long_errors);
9528         ESTAT_ADD(rx_jabbers);
9529         ESTAT_ADD(rx_undersize_packets);
9530         ESTAT_ADD(rx_in_length_errors);
9531         ESTAT_ADD(rx_out_length_errors);
9532         ESTAT_ADD(rx_64_or_less_octet_packets);
9533         ESTAT_ADD(rx_65_to_127_octet_packets);
9534         ESTAT_ADD(rx_128_to_255_octet_packets);
9535         ESTAT_ADD(rx_256_to_511_octet_packets);
9536         ESTAT_ADD(rx_512_to_1023_octet_packets);
9537         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9538         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9539         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9540         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9541         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9542
9543         ESTAT_ADD(tx_octets);
9544         ESTAT_ADD(tx_collisions);
9545         ESTAT_ADD(tx_xon_sent);
9546         ESTAT_ADD(tx_xoff_sent);
9547         ESTAT_ADD(tx_flow_control);
9548         ESTAT_ADD(tx_mac_errors);
9549         ESTAT_ADD(tx_single_collisions);
9550         ESTAT_ADD(tx_mult_collisions);
9551         ESTAT_ADD(tx_deferred);
9552         ESTAT_ADD(tx_excessive_collisions);
9553         ESTAT_ADD(tx_late_collisions);
9554         ESTAT_ADD(tx_collide_2times);
9555         ESTAT_ADD(tx_collide_3times);
9556         ESTAT_ADD(tx_collide_4times);
9557         ESTAT_ADD(tx_collide_5times);
9558         ESTAT_ADD(tx_collide_6times);
9559         ESTAT_ADD(tx_collide_7times);
9560         ESTAT_ADD(tx_collide_8times);
9561         ESTAT_ADD(tx_collide_9times);
9562         ESTAT_ADD(tx_collide_10times);
9563         ESTAT_ADD(tx_collide_11times);
9564         ESTAT_ADD(tx_collide_12times);
9565         ESTAT_ADD(tx_collide_13times);
9566         ESTAT_ADD(tx_collide_14times);
9567         ESTAT_ADD(tx_collide_15times);
9568         ESTAT_ADD(tx_ucast_packets);
9569         ESTAT_ADD(tx_mcast_packets);
9570         ESTAT_ADD(tx_bcast_packets);
9571         ESTAT_ADD(tx_carrier_sense_errors);
9572         ESTAT_ADD(tx_discards);
9573         ESTAT_ADD(tx_errors);
9574
9575         ESTAT_ADD(dma_writeq_full);
9576         ESTAT_ADD(dma_write_prioq_full);
9577         ESTAT_ADD(rxbds_empty);
9578         ESTAT_ADD(rx_discards);
9579         ESTAT_ADD(rx_errors);
9580         ESTAT_ADD(rx_threshold_hit);
9581
9582         ESTAT_ADD(dma_readq_full);
9583         ESTAT_ADD(dma_read_prioq_full);
9584         ESTAT_ADD(tx_comp_queue_full);
9585
9586         ESTAT_ADD(ring_set_send_prod_index);
9587         ESTAT_ADD(ring_status_update);
9588         ESTAT_ADD(nic_irqs);
9589         ESTAT_ADD(nic_avoided_irqs);
9590         ESTAT_ADD(nic_tx_threshold_hit);
9591
9592         ESTAT_ADD(mbuf_lwm_thresh_hit);
9593
9594         return estats;
9595 }
9596
9597 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9598                                                  struct rtnl_link_stats64 *stats)
9599 {
9600         struct tg3 *tp = netdev_priv(dev);
9601         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9602         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9603
9604         if (!hw_stats)
9605                 return old_stats;
9606
9607         stats->rx_packets = old_stats->rx_packets +
9608                 get_stat64(&hw_stats->rx_ucast_packets) +
9609                 get_stat64(&hw_stats->rx_mcast_packets) +
9610                 get_stat64(&hw_stats->rx_bcast_packets);
9611
9612         stats->tx_packets = old_stats->tx_packets +
9613                 get_stat64(&hw_stats->tx_ucast_packets) +
9614                 get_stat64(&hw_stats->tx_mcast_packets) +
9615                 get_stat64(&hw_stats->tx_bcast_packets);
9616
9617         stats->rx_bytes = old_stats->rx_bytes +
9618                 get_stat64(&hw_stats->rx_octets);
9619         stats->tx_bytes = old_stats->tx_bytes +
9620                 get_stat64(&hw_stats->tx_octets);
9621
9622         stats->rx_errors = old_stats->rx_errors +
9623                 get_stat64(&hw_stats->rx_errors);
9624         stats->tx_errors = old_stats->tx_errors +
9625                 get_stat64(&hw_stats->tx_errors) +
9626                 get_stat64(&hw_stats->tx_mac_errors) +
9627                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9628                 get_stat64(&hw_stats->tx_discards);
9629
9630         stats->multicast = old_stats->multicast +
9631                 get_stat64(&hw_stats->rx_mcast_packets);
9632         stats->collisions = old_stats->collisions +
9633                 get_stat64(&hw_stats->tx_collisions);
9634
9635         stats->rx_length_errors = old_stats->rx_length_errors +
9636                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9637                 get_stat64(&hw_stats->rx_undersize_packets);
9638
9639         stats->rx_over_errors = old_stats->rx_over_errors +
9640                 get_stat64(&hw_stats->rxbds_empty);
9641         stats->rx_frame_errors = old_stats->rx_frame_errors +
9642                 get_stat64(&hw_stats->rx_align_errors);
9643         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9644                 get_stat64(&hw_stats->tx_discards);
9645         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9646                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9647
9648         stats->rx_crc_errors = old_stats->rx_crc_errors +
9649                 calc_crc_errors(tp);
9650
9651         stats->rx_missed_errors = old_stats->rx_missed_errors +
9652                 get_stat64(&hw_stats->rx_discards);
9653
9654         stats->rx_dropped = tp->rx_dropped;
9655
9656         return stats;
9657 }
9658
9659 static inline u32 calc_crc(unsigned char *buf, int len)
9660 {
9661         u32 reg;
9662         u32 tmp;
9663         int j, k;
9664
9665         reg = 0xffffffff;
9666
9667         for (j = 0; j < len; j++) {
9668                 reg ^= buf[j];
9669
9670                 for (k = 0; k < 8; k++) {
9671                         tmp = reg & 0x01;
9672
9673                         reg >>= 1;
9674
9675                         if (tmp)
9676                                 reg ^= 0xedb88320;
9677                 }
9678         }
9679
9680         return ~reg;
9681 }
9682
9683 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9684 {
9685         /* accept or reject all multicast frames */
9686         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9687         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9688         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9689         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9690 }
9691
9692 static void __tg3_set_rx_mode(struct net_device *dev)
9693 {
9694         struct tg3 *tp = netdev_priv(dev);
9695         u32 rx_mode;
9696
9697         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9698                                   RX_MODE_KEEP_VLAN_TAG);
9699
9700 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9701         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9702          * flag clear.
9703          */
9704         if (!tg3_flag(tp, ENABLE_ASF))
9705                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9706 #endif
9707
9708         if (dev->flags & IFF_PROMISC) {
9709                 /* Promiscuous mode. */
9710                 rx_mode |= RX_MODE_PROMISC;
9711         } else if (dev->flags & IFF_ALLMULTI) {
9712                 /* Accept all multicast. */
9713                 tg3_set_multi(tp, 1);
9714         } else if (netdev_mc_empty(dev)) {
9715                 /* Reject all multicast. */
9716                 tg3_set_multi(tp, 0);
9717         } else {
9718                 /* Accept one or more multicast(s). */
9719                 struct netdev_hw_addr *ha;
9720                 u32 mc_filter[4] = { 0, };
9721                 u32 regidx;
9722                 u32 bit;
9723                 u32 crc;
9724
9725                 netdev_for_each_mc_addr(ha, dev) {
9726                         crc = calc_crc(ha->addr, ETH_ALEN);
9727                         bit = ~crc & 0x7f;
9728                         regidx = (bit & 0x60) >> 5;
9729                         bit &= 0x1f;
9730                         mc_filter[regidx] |= (1 << bit);
9731                 }
9732
9733                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9734                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9735                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9736                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9737         }
9738
9739         if (rx_mode != tp->rx_mode) {
9740                 tp->rx_mode = rx_mode;
9741                 tw32_f(MAC_RX_MODE, rx_mode);
9742                 udelay(10);
9743         }
9744 }
9745
9746 static void tg3_set_rx_mode(struct net_device *dev)
9747 {
9748         struct tg3 *tp = netdev_priv(dev);
9749
9750         if (!netif_running(dev))
9751                 return;
9752
9753         tg3_full_lock(tp, 0);
9754         __tg3_set_rx_mode(dev);
9755         tg3_full_unlock(tp);
9756 }
9757
9758 static int tg3_get_regs_len(struct net_device *dev)
9759 {
9760         return TG3_REG_BLK_SIZE;
9761 }
9762
9763 static void tg3_get_regs(struct net_device *dev,
9764                 struct ethtool_regs *regs, void *_p)
9765 {
9766         struct tg3 *tp = netdev_priv(dev);
9767
9768         regs->version = 0;
9769
9770         memset(_p, 0, TG3_REG_BLK_SIZE);
9771
9772         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9773                 return;
9774
9775         tg3_full_lock(tp, 0);
9776
9777         tg3_dump_legacy_regs(tp, (u32 *)_p);
9778
9779         tg3_full_unlock(tp);
9780 }
9781
9782 static int tg3_get_eeprom_len(struct net_device *dev)
9783 {
9784         struct tg3 *tp = netdev_priv(dev);
9785
9786         return tp->nvram_size;
9787 }
9788
9789 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9790 {
9791         struct tg3 *tp = netdev_priv(dev);
9792         int ret;
9793         u8  *pd;
9794         u32 i, offset, len, b_offset, b_count;
9795         __be32 val;
9796
9797         if (tg3_flag(tp, NO_NVRAM))
9798                 return -EINVAL;
9799
9800         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9801                 return -EAGAIN;
9802
9803         offset = eeprom->offset;
9804         len = eeprom->len;
9805         eeprom->len = 0;
9806
9807         eeprom->magic = TG3_EEPROM_MAGIC;
9808
9809         if (offset & 3) {
9810                 /* adjustments to start on required 4 byte boundary */
9811                 b_offset = offset & 3;
9812                 b_count = 4 - b_offset;
9813                 if (b_count > len) {
9814                         /* i.e. offset=1 len=2 */
9815                         b_count = len;
9816                 }
9817                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9818                 if (ret)
9819                         return ret;
9820                 memcpy(data, ((char *)&val) + b_offset, b_count);
9821                 len -= b_count;
9822                 offset += b_count;
9823                 eeprom->len += b_count;
9824         }
9825
9826         /* read bytes up to the last 4 byte boundary */
9827         pd = &data[eeprom->len];
9828         for (i = 0; i < (len - (len & 3)); i += 4) {
9829                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9830                 if (ret) {
9831                         eeprom->len += i;
9832                         return ret;
9833                 }
9834                 memcpy(pd + i, &val, 4);
9835         }
9836         eeprom->len += i;
9837
9838         if (len & 3) {
9839                 /* read last bytes not ending on 4 byte boundary */
9840                 pd = &data[eeprom->len];
9841                 b_count = len & 3;
9842                 b_offset = offset + len - b_count;
9843                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9844                 if (ret)
9845                         return ret;
9846                 memcpy(pd, &val, b_count);
9847                 eeprom->len += b_count;
9848         }
9849         return 0;
9850 }
9851
9852 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9853
9854 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9855 {
9856         struct tg3 *tp = netdev_priv(dev);
9857         int ret;
9858         u32 offset, len, b_offset, odd_len;
9859         u8 *buf;
9860         __be32 start, end;
9861
9862         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9863                 return -EAGAIN;
9864
9865         if (tg3_flag(tp, NO_NVRAM) ||
9866             eeprom->magic != TG3_EEPROM_MAGIC)
9867                 return -EINVAL;
9868
9869         offset = eeprom->offset;
9870         len = eeprom->len;
9871
9872         if ((b_offset = (offset & 3))) {
9873                 /* adjustments to start on required 4 byte boundary */
9874                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9875                 if (ret)
9876                         return ret;
9877                 len += b_offset;
9878                 offset &= ~3;
9879                 if (len < 4)
9880                         len = 4;
9881         }
9882
9883         odd_len = 0;
9884         if (len & 3) {
9885                 /* adjustments to end on required 4 byte boundary */
9886                 odd_len = 1;
9887                 len = (len + 3) & ~3;
9888                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9889                 if (ret)
9890                         return ret;
9891         }
9892
9893         buf = data;
9894         if (b_offset || odd_len) {
9895                 buf = kmalloc(len, GFP_KERNEL);
9896                 if (!buf)
9897                         return -ENOMEM;
9898                 if (b_offset)
9899                         memcpy(buf, &start, 4);
9900                 if (odd_len)
9901                         memcpy(buf+len-4, &end, 4);
9902                 memcpy(buf + b_offset, data, eeprom->len);
9903         }
9904
9905         ret = tg3_nvram_write_block(tp, offset, len, buf);
9906
9907         if (buf != data)
9908                 kfree(buf);
9909
9910         return ret;
9911 }
9912
9913 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9914 {
9915         struct tg3 *tp = netdev_priv(dev);
9916
9917         if (tg3_flag(tp, USE_PHYLIB)) {
9918                 struct phy_device *phydev;
9919                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9920                         return -EAGAIN;
9921                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9922                 return phy_ethtool_gset(phydev, cmd);
9923         }
9924
9925         cmd->supported = (SUPPORTED_Autoneg);
9926
9927         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9928                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9929                                    SUPPORTED_1000baseT_Full);
9930
9931         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9932                 cmd->supported |= (SUPPORTED_100baseT_Half |
9933                                   SUPPORTED_100baseT_Full |
9934                                   SUPPORTED_10baseT_Half |
9935                                   SUPPORTED_10baseT_Full |
9936                                   SUPPORTED_TP);
9937                 cmd->port = PORT_TP;
9938         } else {
9939                 cmd->supported |= SUPPORTED_FIBRE;
9940                 cmd->port = PORT_FIBRE;
9941         }
9942
9943         cmd->advertising = tp->link_config.advertising;
9944         if (netif_running(dev)) {
9945                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9946                 cmd->duplex = tp->link_config.active_duplex;
9947         } else {
9948                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9949                 cmd->duplex = DUPLEX_INVALID;
9950         }
9951         cmd->phy_address = tp->phy_addr;
9952         cmd->transceiver = XCVR_INTERNAL;
9953         cmd->autoneg = tp->link_config.autoneg;
9954         cmd->maxtxpkt = 0;
9955         cmd->maxrxpkt = 0;
9956         return 0;
9957 }
9958
9959 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9960 {
9961         struct tg3 *tp = netdev_priv(dev);
9962         u32 speed = ethtool_cmd_speed(cmd);
9963
9964         if (tg3_flag(tp, USE_PHYLIB)) {
9965                 struct phy_device *phydev;
9966                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9967                         return -EAGAIN;
9968                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9969                 return phy_ethtool_sset(phydev, cmd);
9970         }
9971
9972         if (cmd->autoneg != AUTONEG_ENABLE &&
9973             cmd->autoneg != AUTONEG_DISABLE)
9974                 return -EINVAL;
9975
9976         if (cmd->autoneg == AUTONEG_DISABLE &&
9977             cmd->duplex != DUPLEX_FULL &&
9978             cmd->duplex != DUPLEX_HALF)
9979                 return -EINVAL;
9980
9981         if (cmd->autoneg == AUTONEG_ENABLE) {
9982                 u32 mask = ADVERTISED_Autoneg |
9983                            ADVERTISED_Pause |
9984                            ADVERTISED_Asym_Pause;
9985
9986                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9987                         mask |= ADVERTISED_1000baseT_Half |
9988                                 ADVERTISED_1000baseT_Full;
9989
9990                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9991                         mask |= ADVERTISED_100baseT_Half |
9992                                 ADVERTISED_100baseT_Full |
9993                                 ADVERTISED_10baseT_Half |
9994                                 ADVERTISED_10baseT_Full |
9995                                 ADVERTISED_TP;
9996                 else
9997                         mask |= ADVERTISED_FIBRE;
9998
9999                 if (cmd->advertising & ~mask)
10000                         return -EINVAL;
10001
10002                 mask &= (ADVERTISED_1000baseT_Half |
10003                          ADVERTISED_1000baseT_Full |
10004                          ADVERTISED_100baseT_Half |
10005                          ADVERTISED_100baseT_Full |
10006                          ADVERTISED_10baseT_Half |
10007                          ADVERTISED_10baseT_Full);
10008
10009                 cmd->advertising &= mask;
10010         } else {
10011                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10012                         if (speed != SPEED_1000)
10013                                 return -EINVAL;
10014
10015                         if (cmd->duplex != DUPLEX_FULL)
10016                                 return -EINVAL;
10017                 } else {
10018                         if (speed != SPEED_100 &&
10019                             speed != SPEED_10)
10020                                 return -EINVAL;
10021                 }
10022         }
10023
10024         tg3_full_lock(tp, 0);
10025
10026         tp->link_config.autoneg = cmd->autoneg;
10027         if (cmd->autoneg == AUTONEG_ENABLE) {
10028                 tp->link_config.advertising = (cmd->advertising |
10029                                               ADVERTISED_Autoneg);
10030                 tp->link_config.speed = SPEED_INVALID;
10031                 tp->link_config.duplex = DUPLEX_INVALID;
10032         } else {
10033                 tp->link_config.advertising = 0;
10034                 tp->link_config.speed = speed;
10035                 tp->link_config.duplex = cmd->duplex;
10036         }
10037
10038         tp->link_config.orig_speed = tp->link_config.speed;
10039         tp->link_config.orig_duplex = tp->link_config.duplex;
10040         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10041
10042         if (netif_running(dev))
10043                 tg3_setup_phy(tp, 1);
10044
10045         tg3_full_unlock(tp);
10046
10047         return 0;
10048 }
10049
10050 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10051 {
10052         struct tg3 *tp = netdev_priv(dev);
10053
10054         strcpy(info->driver, DRV_MODULE_NAME);
10055         strcpy(info->version, DRV_MODULE_VERSION);
10056         strcpy(info->fw_version, tp->fw_ver);
10057         strcpy(info->bus_info, pci_name(tp->pdev));
10058 }
10059
10060 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10061 {
10062         struct tg3 *tp = netdev_priv(dev);
10063
10064         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10065                 wol->supported = WAKE_MAGIC;
10066         else
10067                 wol->supported = 0;
10068         wol->wolopts = 0;
10069         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10070                 wol->wolopts = WAKE_MAGIC;
10071         memset(&wol->sopass, 0, sizeof(wol->sopass));
10072 }
10073
10074 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10075 {
10076         struct tg3 *tp = netdev_priv(dev);
10077         struct device *dp = &tp->pdev->dev;
10078
10079         if (wol->wolopts & ~WAKE_MAGIC)
10080                 return -EINVAL;
10081         if ((wol->wolopts & WAKE_MAGIC) &&
10082             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10083                 return -EINVAL;
10084
10085         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10086
10087         spin_lock_bh(&tp->lock);
10088         if (device_may_wakeup(dp))
10089                 tg3_flag_set(tp, WOL_ENABLE);
10090         else
10091                 tg3_flag_clear(tp, WOL_ENABLE);
10092         spin_unlock_bh(&tp->lock);
10093
10094         return 0;
10095 }
10096
10097 static u32 tg3_get_msglevel(struct net_device *dev)
10098 {
10099         struct tg3 *tp = netdev_priv(dev);
10100         return tp->msg_enable;
10101 }
10102
10103 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10104 {
10105         struct tg3 *tp = netdev_priv(dev);
10106         tp->msg_enable = value;
10107 }
10108
10109 static int tg3_nway_reset(struct net_device *dev)
10110 {
10111         struct tg3 *tp = netdev_priv(dev);
10112         int r;
10113
10114         if (!netif_running(dev))
10115                 return -EAGAIN;
10116
10117         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10118                 return -EINVAL;
10119
10120         if (tg3_flag(tp, USE_PHYLIB)) {
10121                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10122                         return -EAGAIN;
10123                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10124         } else {
10125                 u32 bmcr;
10126
10127                 spin_lock_bh(&tp->lock);
10128                 r = -EINVAL;
10129                 tg3_readphy(tp, MII_BMCR, &bmcr);
10130                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10131                     ((bmcr & BMCR_ANENABLE) ||
10132                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10133                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10134                                                    BMCR_ANENABLE);
10135                         r = 0;
10136                 }
10137                 spin_unlock_bh(&tp->lock);
10138         }
10139
10140         return r;
10141 }
10142
10143 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10144 {
10145         struct tg3 *tp = netdev_priv(dev);
10146
10147         ering->rx_max_pending = tp->rx_std_ring_mask;
10148         ering->rx_mini_max_pending = 0;
10149         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10150                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10151         else
10152                 ering->rx_jumbo_max_pending = 0;
10153
10154         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10155
10156         ering->rx_pending = tp->rx_pending;
10157         ering->rx_mini_pending = 0;
10158         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10159                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10160         else
10161                 ering->rx_jumbo_pending = 0;
10162
10163         ering->tx_pending = tp->napi[0].tx_pending;
10164 }
10165
10166 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10167 {
10168         struct tg3 *tp = netdev_priv(dev);
10169         int i, irq_sync = 0, err = 0;
10170
10171         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10172             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10173             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10174             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10175             (tg3_flag(tp, TSO_BUG) &&
10176              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10177                 return -EINVAL;
10178
10179         if (netif_running(dev)) {
10180                 tg3_phy_stop(tp);
10181                 tg3_netif_stop(tp);
10182                 irq_sync = 1;
10183         }
10184
10185         tg3_full_lock(tp, irq_sync);
10186
10187         tp->rx_pending = ering->rx_pending;
10188
10189         if (tg3_flag(tp, MAX_RXPEND_64) &&
10190             tp->rx_pending > 63)
10191                 tp->rx_pending = 63;
10192         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10193
10194         for (i = 0; i < tp->irq_max; i++)
10195                 tp->napi[i].tx_pending = ering->tx_pending;
10196
10197         if (netif_running(dev)) {
10198                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10199                 err = tg3_restart_hw(tp, 1);
10200                 if (!err)
10201                         tg3_netif_start(tp);
10202         }
10203
10204         tg3_full_unlock(tp);
10205
10206         if (irq_sync && !err)
10207                 tg3_phy_start(tp);
10208
10209         return err;
10210 }
10211
10212 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10213 {
10214         struct tg3 *tp = netdev_priv(dev);
10215
10216         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10217
10218         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10219                 epause->rx_pause = 1;
10220         else
10221                 epause->rx_pause = 0;
10222
10223         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10224                 epause->tx_pause = 1;
10225         else
10226                 epause->tx_pause = 0;
10227 }
10228
10229 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10230 {
10231         struct tg3 *tp = netdev_priv(dev);
10232         int err = 0;
10233
10234         if (tg3_flag(tp, USE_PHYLIB)) {
10235                 u32 newadv;
10236                 struct phy_device *phydev;
10237
10238                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10239
10240                 if (!(phydev->supported & SUPPORTED_Pause) ||
10241                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10242                      (epause->rx_pause != epause->tx_pause)))
10243                         return -EINVAL;
10244
10245                 tp->link_config.flowctrl = 0;
10246                 if (epause->rx_pause) {
10247                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10248
10249                         if (epause->tx_pause) {
10250                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10251                                 newadv = ADVERTISED_Pause;
10252                         } else
10253                                 newadv = ADVERTISED_Pause |
10254                                          ADVERTISED_Asym_Pause;
10255                 } else if (epause->tx_pause) {
10256                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10257                         newadv = ADVERTISED_Asym_Pause;
10258                 } else
10259                         newadv = 0;
10260
10261                 if (epause->autoneg)
10262                         tg3_flag_set(tp, PAUSE_AUTONEG);
10263                 else
10264                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10265
10266                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10267                         u32 oldadv = phydev->advertising &
10268                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10269                         if (oldadv != newadv) {
10270                                 phydev->advertising &=
10271                                         ~(ADVERTISED_Pause |
10272                                           ADVERTISED_Asym_Pause);
10273                                 phydev->advertising |= newadv;
10274                                 if (phydev->autoneg) {
10275                                         /*
10276                                          * Always renegotiate the link to
10277                                          * inform our link partner of our
10278                                          * flow control settings, even if the
10279                                          * flow control is forced.  Let
10280                                          * tg3_adjust_link() do the final
10281                                          * flow control setup.
10282                                          */
10283                                         return phy_start_aneg(phydev);
10284                                 }
10285                         }
10286
10287                         if (!epause->autoneg)
10288                                 tg3_setup_flow_control(tp, 0, 0);
10289                 } else {
10290                         tp->link_config.orig_advertising &=
10291                                         ~(ADVERTISED_Pause |
10292                                           ADVERTISED_Asym_Pause);
10293                         tp->link_config.orig_advertising |= newadv;
10294                 }
10295         } else {
10296                 int irq_sync = 0;
10297
10298                 if (netif_running(dev)) {
10299                         tg3_netif_stop(tp);
10300                         irq_sync = 1;
10301                 }
10302
10303                 tg3_full_lock(tp, irq_sync);
10304
10305                 if (epause->autoneg)
10306                         tg3_flag_set(tp, PAUSE_AUTONEG);
10307                 else
10308                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10309                 if (epause->rx_pause)
10310                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10311                 else
10312                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10313                 if (epause->tx_pause)
10314                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10315                 else
10316                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10317
10318                 if (netif_running(dev)) {
10319                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10320                         err = tg3_restart_hw(tp, 1);
10321                         if (!err)
10322                                 tg3_netif_start(tp);
10323                 }
10324
10325                 tg3_full_unlock(tp);
10326         }
10327
10328         return err;
10329 }
10330
10331 static int tg3_get_sset_count(struct net_device *dev, int sset)
10332 {
10333         switch (sset) {
10334         case ETH_SS_TEST:
10335                 return TG3_NUM_TEST;
10336         case ETH_SS_STATS:
10337                 return TG3_NUM_STATS;
10338         default:
10339                 return -EOPNOTSUPP;
10340         }
10341 }
10342
10343 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10344 {
10345         switch (stringset) {
10346         case ETH_SS_STATS:
10347                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10348                 break;
10349         case ETH_SS_TEST:
10350                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10351                 break;
10352         default:
10353                 WARN_ON(1);     /* we need a WARN() */
10354                 break;
10355         }
10356 }
10357
10358 static int tg3_set_phys_id(struct net_device *dev,
10359                             enum ethtool_phys_id_state state)
10360 {
10361         struct tg3 *tp = netdev_priv(dev);
10362
10363         if (!netif_running(tp->dev))
10364                 return -EAGAIN;
10365
10366         switch (state) {
10367         case ETHTOOL_ID_ACTIVE:
10368                 return 1;       /* cycle on/off once per second */
10369
10370         case ETHTOOL_ID_ON:
10371                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10372                      LED_CTRL_1000MBPS_ON |
10373                      LED_CTRL_100MBPS_ON |
10374                      LED_CTRL_10MBPS_ON |
10375                      LED_CTRL_TRAFFIC_OVERRIDE |
10376                      LED_CTRL_TRAFFIC_BLINK |
10377                      LED_CTRL_TRAFFIC_LED);
10378                 break;
10379
10380         case ETHTOOL_ID_OFF:
10381                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10382                      LED_CTRL_TRAFFIC_OVERRIDE);
10383                 break;
10384
10385         case ETHTOOL_ID_INACTIVE:
10386                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10387                 break;
10388         }
10389
10390         return 0;
10391 }
10392
10393 static void tg3_get_ethtool_stats(struct net_device *dev,
10394                                    struct ethtool_stats *estats, u64 *tmp_stats)
10395 {
10396         struct tg3 *tp = netdev_priv(dev);
10397         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10398 }
10399
10400 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10401 {
10402         int i;
10403         __be32 *buf;
10404         u32 offset = 0, len = 0;
10405         u32 magic, val;
10406
10407         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10408                 return NULL;
10409
10410         if (magic == TG3_EEPROM_MAGIC) {
10411                 for (offset = TG3_NVM_DIR_START;
10412                      offset < TG3_NVM_DIR_END;
10413                      offset += TG3_NVM_DIRENT_SIZE) {
10414                         if (tg3_nvram_read(tp, offset, &val))
10415                                 return NULL;
10416
10417                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10418                             TG3_NVM_DIRTYPE_EXTVPD)
10419                                 break;
10420                 }
10421
10422                 if (offset != TG3_NVM_DIR_END) {
10423                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10424                         if (tg3_nvram_read(tp, offset + 4, &offset))
10425                                 return NULL;
10426
10427                         offset = tg3_nvram_logical_addr(tp, offset);
10428                 }
10429         }
10430
10431         if (!offset || !len) {
10432                 offset = TG3_NVM_VPD_OFF;
10433                 len = TG3_NVM_VPD_LEN;
10434         }
10435
10436         buf = kmalloc(len, GFP_KERNEL);
10437         if (buf == NULL)
10438                 return NULL;
10439
10440         if (magic == TG3_EEPROM_MAGIC) {
10441                 for (i = 0; i < len; i += 4) {
10442                         /* The data is in little-endian format in NVRAM.
10443                          * Use the big-endian read routines to preserve
10444                          * the byte order as it exists in NVRAM.
10445                          */
10446                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10447                                 goto error;
10448                 }
10449         } else {
10450                 u8 *ptr;
10451                 ssize_t cnt;
10452                 unsigned int pos = 0;
10453
10454                 ptr = (u8 *)&buf[0];
10455                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10456                         cnt = pci_read_vpd(tp->pdev, pos,
10457                                            len - pos, ptr);
10458                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10459                                 cnt = 0;
10460                         else if (cnt < 0)
10461                                 goto error;
10462                 }
10463                 if (pos != len)
10464                         goto error;
10465         }
10466
10467         return buf;
10468
10469 error:
10470         kfree(buf);
10471         return NULL;
10472 }
10473
10474 #define NVRAM_TEST_SIZE 0x100
10475 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10476 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10477 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10478 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10479 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10480
10481 static int tg3_test_nvram(struct tg3 *tp)
10482 {
10483         u32 csum, magic;
10484         __be32 *buf;
10485         int i, j, k, err = 0, size;
10486
10487         if (tg3_flag(tp, NO_NVRAM))
10488                 return 0;
10489
10490         if (tg3_nvram_read(tp, 0, &magic) != 0)
10491                 return -EIO;
10492
10493         if (magic == TG3_EEPROM_MAGIC)
10494                 size = NVRAM_TEST_SIZE;
10495         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10496                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10497                     TG3_EEPROM_SB_FORMAT_1) {
10498                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10499                         case TG3_EEPROM_SB_REVISION_0:
10500                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10501                                 break;
10502                         case TG3_EEPROM_SB_REVISION_2:
10503                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10504                                 break;
10505                         case TG3_EEPROM_SB_REVISION_3:
10506                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10507                                 break;
10508                         default:
10509                                 return 0;
10510                         }
10511                 } else
10512                         return 0;
10513         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10514                 size = NVRAM_SELFBOOT_HW_SIZE;
10515         else
10516                 return -EIO;
10517
10518         buf = kmalloc(size, GFP_KERNEL);
10519         if (buf == NULL)
10520                 return -ENOMEM;
10521
10522         err = -EIO;
10523         for (i = 0, j = 0; i < size; i += 4, j++) {
10524                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10525                 if (err)
10526                         break;
10527         }
10528         if (i < size)
10529                 goto out;
10530
10531         /* Selfboot format */
10532         magic = be32_to_cpu(buf[0]);
10533         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10534             TG3_EEPROM_MAGIC_FW) {
10535                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10536
10537                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10538                     TG3_EEPROM_SB_REVISION_2) {
10539                         /* For rev 2, the csum doesn't include the MBA. */
10540                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10541                                 csum8 += buf8[i];
10542                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10543                                 csum8 += buf8[i];
10544                 } else {
10545                         for (i = 0; i < size; i++)
10546                                 csum8 += buf8[i];
10547                 }
10548
10549                 if (csum8 == 0) {
10550                         err = 0;
10551                         goto out;
10552                 }
10553
10554                 err = -EIO;
10555                 goto out;
10556         }
10557
10558         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10559             TG3_EEPROM_MAGIC_HW) {
10560                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10561                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10562                 u8 *buf8 = (u8 *) buf;
10563
10564                 /* Separate the parity bits and the data bytes.  */
10565                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10566                         if ((i == 0) || (i == 8)) {
10567                                 int l;
10568                                 u8 msk;
10569
10570                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10571                                         parity[k++] = buf8[i] & msk;
10572                                 i++;
10573                         } else if (i == 16) {
10574                                 int l;
10575                                 u8 msk;
10576
10577                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10578                                         parity[k++] = buf8[i] & msk;
10579                                 i++;
10580
10581                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10582                                         parity[k++] = buf8[i] & msk;
10583                                 i++;
10584                         }
10585                         data[j++] = buf8[i];
10586                 }
10587
10588                 err = -EIO;
10589                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10590                         u8 hw8 = hweight8(data[i]);
10591
10592                         if ((hw8 & 0x1) && parity[i])
10593                                 goto out;
10594                         else if (!(hw8 & 0x1) && !parity[i])
10595                                 goto out;
10596                 }
10597                 err = 0;
10598                 goto out;
10599         }
10600
10601         err = -EIO;
10602
10603         /* Bootstrap checksum at offset 0x10 */
10604         csum = calc_crc((unsigned char *) buf, 0x10);
10605         if (csum != le32_to_cpu(buf[0x10/4]))
10606                 goto out;
10607
10608         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10609         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10610         if (csum != le32_to_cpu(buf[0xfc/4]))
10611                 goto out;
10612
10613         kfree(buf);
10614
10615         buf = tg3_vpd_readblock(tp);
10616         if (!buf)
10617                 return -ENOMEM;
10618
10619         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10620                              PCI_VPD_LRDT_RO_DATA);
10621         if (i > 0) {
10622                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10623                 if (j < 0)
10624                         goto out;
10625
10626                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10627                         goto out;
10628
10629                 i += PCI_VPD_LRDT_TAG_SIZE;
10630                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10631                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10632                 if (j > 0) {
10633                         u8 csum8 = 0;
10634
10635                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10636
10637                         for (i = 0; i <= j; i++)
10638                                 csum8 += ((u8 *)buf)[i];
10639
10640                         if (csum8)
10641                                 goto out;
10642                 }
10643         }
10644
10645         err = 0;
10646
10647 out:
10648         kfree(buf);
10649         return err;
10650 }
10651
10652 #define TG3_SERDES_TIMEOUT_SEC  2
10653 #define TG3_COPPER_TIMEOUT_SEC  6
10654
10655 static int tg3_test_link(struct tg3 *tp)
10656 {
10657         int i, max;
10658
10659         if (!netif_running(tp->dev))
10660                 return -ENODEV;
10661
10662         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10663                 max = TG3_SERDES_TIMEOUT_SEC;
10664         else
10665                 max = TG3_COPPER_TIMEOUT_SEC;
10666
10667         for (i = 0; i < max; i++) {
10668                 if (netif_carrier_ok(tp->dev))
10669                         return 0;
10670
10671                 if (msleep_interruptible(1000))
10672                         break;
10673         }
10674
10675         return -EIO;
10676 }
10677
10678 /* Only test the commonly used registers */
10679 static int tg3_test_registers(struct tg3 *tp)
10680 {
10681         int i, is_5705, is_5750;
10682         u32 offset, read_mask, write_mask, val, save_val, read_val;
10683         static struct {
10684                 u16 offset;
10685                 u16 flags;
10686 #define TG3_FL_5705     0x1
10687 #define TG3_FL_NOT_5705 0x2
10688 #define TG3_FL_NOT_5788 0x4
10689 #define TG3_FL_NOT_5750 0x8
10690                 u32 read_mask;
10691                 u32 write_mask;
10692         } reg_tbl[] = {
10693                 /* MAC Control Registers */
10694                 { MAC_MODE, TG3_FL_NOT_5705,
10695                         0x00000000, 0x00ef6f8c },
10696                 { MAC_MODE, TG3_FL_5705,
10697                         0x00000000, 0x01ef6b8c },
10698                 { MAC_STATUS, TG3_FL_NOT_5705,
10699                         0x03800107, 0x00000000 },
10700                 { MAC_STATUS, TG3_FL_5705,
10701                         0x03800100, 0x00000000 },
10702                 { MAC_ADDR_0_HIGH, 0x0000,
10703                         0x00000000, 0x0000ffff },
10704                 { MAC_ADDR_0_LOW, 0x0000,
10705                         0x00000000, 0xffffffff },
10706                 { MAC_RX_MTU_SIZE, 0x0000,
10707                         0x00000000, 0x0000ffff },
10708                 { MAC_TX_MODE, 0x0000,
10709                         0x00000000, 0x00000070 },
10710                 { MAC_TX_LENGTHS, 0x0000,
10711                         0x00000000, 0x00003fff },
10712                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10713                         0x00000000, 0x000007fc },
10714                 { MAC_RX_MODE, TG3_FL_5705,
10715                         0x00000000, 0x000007dc },
10716                 { MAC_HASH_REG_0, 0x0000,
10717                         0x00000000, 0xffffffff },
10718                 { MAC_HASH_REG_1, 0x0000,
10719                         0x00000000, 0xffffffff },
10720                 { MAC_HASH_REG_2, 0x0000,
10721                         0x00000000, 0xffffffff },
10722                 { MAC_HASH_REG_3, 0x0000,
10723                         0x00000000, 0xffffffff },
10724
10725                 /* Receive Data and Receive BD Initiator Control Registers. */
10726                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10727                         0x00000000, 0xffffffff },
10728                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10729                         0x00000000, 0xffffffff },
10730                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10731                         0x00000000, 0x00000003 },
10732                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10733                         0x00000000, 0xffffffff },
10734                 { RCVDBDI_STD_BD+0, 0x0000,
10735                         0x00000000, 0xffffffff },
10736                 { RCVDBDI_STD_BD+4, 0x0000,
10737                         0x00000000, 0xffffffff },
10738                 { RCVDBDI_STD_BD+8, 0x0000,
10739                         0x00000000, 0xffff0002 },
10740                 { RCVDBDI_STD_BD+0xc, 0x0000,
10741                         0x00000000, 0xffffffff },
10742
10743                 /* Receive BD Initiator Control Registers. */
10744                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10745                         0x00000000, 0xffffffff },
10746                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10747                         0x00000000, 0x000003ff },
10748                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10749                         0x00000000, 0xffffffff },
10750
10751                 /* Host Coalescing Control Registers. */
10752                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10753                         0x00000000, 0x00000004 },
10754                 { HOSTCC_MODE, TG3_FL_5705,
10755                         0x00000000, 0x000000f6 },
10756                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10757                         0x00000000, 0xffffffff },
10758                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10759                         0x00000000, 0x000003ff },
10760                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10761                         0x00000000, 0xffffffff },
10762                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10763                         0x00000000, 0x000003ff },
10764                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10765                         0x00000000, 0xffffffff },
10766                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10767                         0x00000000, 0x000000ff },
10768                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10769                         0x00000000, 0xffffffff },
10770                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10771                         0x00000000, 0x000000ff },
10772                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10773                         0x00000000, 0xffffffff },
10774                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10775                         0x00000000, 0xffffffff },
10776                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10777                         0x00000000, 0xffffffff },
10778                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10779                         0x00000000, 0x000000ff },
10780                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10781                         0x00000000, 0xffffffff },
10782                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10783                         0x00000000, 0x000000ff },
10784                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10785                         0x00000000, 0xffffffff },
10786                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10787                         0x00000000, 0xffffffff },
10788                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10789                         0x00000000, 0xffffffff },
10790                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10791                         0x00000000, 0xffffffff },
10792                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10793                         0x00000000, 0xffffffff },
10794                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10795                         0xffffffff, 0x00000000 },
10796                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10797                         0xffffffff, 0x00000000 },
10798
10799                 /* Buffer Manager Control Registers. */
10800                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10801                         0x00000000, 0x007fff80 },
10802                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10803                         0x00000000, 0x007fffff },
10804                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10805                         0x00000000, 0x0000003f },
10806                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10807                         0x00000000, 0x000001ff },
10808                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10809                         0x00000000, 0x000001ff },
10810                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10811                         0xffffffff, 0x00000000 },
10812                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10813                         0xffffffff, 0x00000000 },
10814
10815                 /* Mailbox Registers */
10816                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10817                         0x00000000, 0x000001ff },
10818                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10819                         0x00000000, 0x000001ff },
10820                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10821                         0x00000000, 0x000007ff },
10822                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10823                         0x00000000, 0x000001ff },
10824
10825                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10826         };
10827
10828         is_5705 = is_5750 = 0;
10829         if (tg3_flag(tp, 5705_PLUS)) {
10830                 is_5705 = 1;
10831                 if (tg3_flag(tp, 5750_PLUS))
10832                         is_5750 = 1;
10833         }
10834
10835         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10836                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10837                         continue;
10838
10839                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10840                         continue;
10841
10842                 if (tg3_flag(tp, IS_5788) &&
10843                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10844                         continue;
10845
10846                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10847                         continue;
10848
10849                 offset = (u32) reg_tbl[i].offset;
10850                 read_mask = reg_tbl[i].read_mask;
10851                 write_mask = reg_tbl[i].write_mask;
10852
10853                 /* Save the original register content */
10854                 save_val = tr32(offset);
10855
10856                 /* Determine the read-only value. */
10857                 read_val = save_val & read_mask;
10858
10859                 /* Write zero to the register, then make sure the read-only bits
10860                  * are not changed and the read/write bits are all zeros.
10861                  */
10862                 tw32(offset, 0);
10863
10864                 val = tr32(offset);
10865
10866                 /* Test the read-only and read/write bits. */
10867                 if (((val & read_mask) != read_val) || (val & write_mask))
10868                         goto out;
10869
10870                 /* Write ones to all the bits defined by RdMask and WrMask, then
10871                  * make sure the read-only bits are not changed and the
10872                  * read/write bits are all ones.
10873                  */
10874                 tw32(offset, read_mask | write_mask);
10875
10876                 val = tr32(offset);
10877
10878                 /* Test the read-only bits. */
10879                 if ((val & read_mask) != read_val)
10880                         goto out;
10881
10882                 /* Test the read/write bits. */
10883                 if ((val & write_mask) != write_mask)
10884                         goto out;
10885
10886                 tw32(offset, save_val);
10887         }
10888
10889         return 0;
10890
10891 out:
10892         if (netif_msg_hw(tp))
10893                 netdev_err(tp->dev,
10894                            "Register test failed at offset %x\n", offset);
10895         tw32(offset, save_val);
10896         return -EIO;
10897 }
10898
10899 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10900 {
10901         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10902         int i;
10903         u32 j;
10904
10905         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10906                 for (j = 0; j < len; j += 4) {
10907                         u32 val;
10908
10909                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10910                         tg3_read_mem(tp, offset + j, &val);
10911                         if (val != test_pattern[i])
10912                                 return -EIO;
10913                 }
10914         }
10915         return 0;
10916 }
10917
10918 static int tg3_test_memory(struct tg3 *tp)
10919 {
10920         static struct mem_entry {
10921                 u32 offset;
10922                 u32 len;
10923         } mem_tbl_570x[] = {
10924                 { 0x00000000, 0x00b50},
10925                 { 0x00002000, 0x1c000},
10926                 { 0xffffffff, 0x00000}
10927         }, mem_tbl_5705[] = {
10928                 { 0x00000100, 0x0000c},
10929                 { 0x00000200, 0x00008},
10930                 { 0x00004000, 0x00800},
10931                 { 0x00006000, 0x01000},
10932                 { 0x00008000, 0x02000},
10933                 { 0x00010000, 0x0e000},
10934                 { 0xffffffff, 0x00000}
10935         }, mem_tbl_5755[] = {
10936                 { 0x00000200, 0x00008},
10937                 { 0x00004000, 0x00800},
10938                 { 0x00006000, 0x00800},
10939                 { 0x00008000, 0x02000},
10940                 { 0x00010000, 0x0c000},
10941                 { 0xffffffff, 0x00000}
10942         }, mem_tbl_5906[] = {
10943                 { 0x00000200, 0x00008},
10944                 { 0x00004000, 0x00400},
10945                 { 0x00006000, 0x00400},
10946                 { 0x00008000, 0x01000},
10947                 { 0x00010000, 0x01000},
10948                 { 0xffffffff, 0x00000}
10949         }, mem_tbl_5717[] = {
10950                 { 0x00000200, 0x00008},
10951                 { 0x00010000, 0x0a000},
10952                 { 0x00020000, 0x13c00},
10953                 { 0xffffffff, 0x00000}
10954         }, mem_tbl_57765[] = {
10955                 { 0x00000200, 0x00008},
10956                 { 0x00004000, 0x00800},
10957                 { 0x00006000, 0x09800},
10958                 { 0x00010000, 0x0a000},
10959                 { 0xffffffff, 0x00000}
10960         };
10961         struct mem_entry *mem_tbl;
10962         int err = 0;
10963         int i;
10964
10965         if (tg3_flag(tp, 5717_PLUS))
10966                 mem_tbl = mem_tbl_5717;
10967         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10968                 mem_tbl = mem_tbl_57765;
10969         else if (tg3_flag(tp, 5755_PLUS))
10970                 mem_tbl = mem_tbl_5755;
10971         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10972                 mem_tbl = mem_tbl_5906;
10973         else if (tg3_flag(tp, 5705_PLUS))
10974                 mem_tbl = mem_tbl_5705;
10975         else
10976                 mem_tbl = mem_tbl_570x;
10977
10978         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10979                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10980                 if (err)
10981                         break;
10982         }
10983
10984         return err;
10985 }
10986
10987 #define TG3_MAC_LOOPBACK        0
10988 #define TG3_PHY_LOOPBACK        1
10989 #define TG3_TSO_LOOPBACK        2
10990
10991 #define TG3_TSO_MSS             500
10992
10993 #define TG3_TSO_IP_HDR_LEN      20
10994 #define TG3_TSO_TCP_HDR_LEN     20
10995 #define TG3_TSO_TCP_OPT_LEN     12
10996
10997 static const u8 tg3_tso_header[] = {
10998 0x08, 0x00,
10999 0x45, 0x00, 0x00, 0x00,
11000 0x00, 0x00, 0x40, 0x00,
11001 0x40, 0x06, 0x00, 0x00,
11002 0x0a, 0x00, 0x00, 0x01,
11003 0x0a, 0x00, 0x00, 0x02,
11004 0x0d, 0x00, 0xe0, 0x00,
11005 0x00, 0x00, 0x01, 0x00,
11006 0x00, 0x00, 0x02, 0x00,
11007 0x80, 0x10, 0x10, 0x00,
11008 0x14, 0x09, 0x00, 0x00,
11009 0x01, 0x01, 0x08, 0x0a,
11010 0x11, 0x11, 0x11, 0x11,
11011 0x11, 0x11, 0x11, 0x11,
11012 };
11013
11014 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11015 {
11016         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11017         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11018         struct sk_buff *skb, *rx_skb;
11019         u8 *tx_data;
11020         dma_addr_t map;
11021         int num_pkts, tx_len, rx_len, i, err;
11022         struct tg3_rx_buffer_desc *desc;
11023         struct tg3_napi *tnapi, *rnapi;
11024         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11025
11026         tnapi = &tp->napi[0];
11027         rnapi = &tp->napi[0];
11028         if (tp->irq_cnt > 1) {
11029                 if (tg3_flag(tp, ENABLE_RSS))
11030                         rnapi = &tp->napi[1];
11031                 if (tg3_flag(tp, ENABLE_TSS))
11032                         tnapi = &tp->napi[1];
11033         }
11034         coal_now = tnapi->coal_now | rnapi->coal_now;
11035
11036         if (loopback_mode == TG3_MAC_LOOPBACK) {
11037                 /* HW errata - mac loopback fails in some cases on 5780.
11038                  * Normal traffic and PHY loopback are not affected by
11039                  * errata.  Also, the MAC loopback test is deprecated for
11040                  * all newer ASIC revisions.
11041                  */
11042                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11043                     tg3_flag(tp, CPMU_PRESENT))
11044                         return 0;
11045
11046                 mac_mode = tp->mac_mode &
11047                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11048                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11049                 if (!tg3_flag(tp, 5705_PLUS))
11050                         mac_mode |= MAC_MODE_LINK_POLARITY;
11051                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11052                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11053                 else
11054                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11055                 tw32(MAC_MODE, mac_mode);
11056         } else {
11057                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11058                         tg3_phy_fet_toggle_apd(tp, false);
11059                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11060                 } else
11061                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11062
11063                 tg3_phy_toggle_automdix(tp, 0);
11064
11065                 tg3_writephy(tp, MII_BMCR, val);
11066                 udelay(40);
11067
11068                 mac_mode = tp->mac_mode &
11069                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11070                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11071                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11072                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11073                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11074                         /* The write needs to be flushed for the AC131 */
11075                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11076                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11077                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11078                 } else
11079                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11080
11081                 /* reset to prevent losing 1st rx packet intermittently */
11082                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11083                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11084                         udelay(10);
11085                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11086                 }
11087                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11088                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11089                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11090                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11091                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11092                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11093                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11094                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11095                 }
11096                 tw32(MAC_MODE, mac_mode);
11097
11098                 /* Wait for link */
11099                 for (i = 0; i < 100; i++) {
11100                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11101                                 break;
11102                         mdelay(1);
11103                 }
11104         }
11105
11106         err = -EIO;
11107
11108         tx_len = pktsz;
11109         skb = netdev_alloc_skb(tp->dev, tx_len);
11110         if (!skb)
11111                 return -ENOMEM;
11112
11113         tx_data = skb_put(skb, tx_len);
11114         memcpy(tx_data, tp->dev->dev_addr, 6);
11115         memset(tx_data + 6, 0x0, 8);
11116
11117         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11118
11119         if (loopback_mode == TG3_TSO_LOOPBACK) {
11120                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11121
11122                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11123                               TG3_TSO_TCP_OPT_LEN;
11124
11125                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11126                        sizeof(tg3_tso_header));
11127                 mss = TG3_TSO_MSS;
11128
11129                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11130                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11131
11132                 /* Set the total length field in the IP header */
11133                 iph->tot_len = htons((u16)(mss + hdr_len));
11134
11135                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11136                               TXD_FLAG_CPU_POST_DMA);
11137
11138                 if (tg3_flag(tp, HW_TSO_1) ||
11139                     tg3_flag(tp, HW_TSO_2) ||
11140                     tg3_flag(tp, HW_TSO_3)) {
11141                         struct tcphdr *th;
11142                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11143                         th = (struct tcphdr *)&tx_data[val];
11144                         th->check = 0;
11145                 } else
11146                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11147
11148                 if (tg3_flag(tp, HW_TSO_3)) {
11149                         mss |= (hdr_len & 0xc) << 12;
11150                         if (hdr_len & 0x10)
11151                                 base_flags |= 0x00000010;
11152                         base_flags |= (hdr_len & 0x3e0) << 5;
11153                 } else if (tg3_flag(tp, HW_TSO_2))
11154                         mss |= hdr_len << 9;
11155                 else if (tg3_flag(tp, HW_TSO_1) ||
11156                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11157                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11158                 } else {
11159                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11160                 }
11161
11162                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11163         } else {
11164                 num_pkts = 1;
11165                 data_off = ETH_HLEN;
11166         }
11167
11168         for (i = data_off; i < tx_len; i++)
11169                 tx_data[i] = (u8) (i & 0xff);
11170
11171         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11172         if (pci_dma_mapping_error(tp->pdev, map)) {
11173                 dev_kfree_skb(skb);
11174                 return -EIO;
11175         }
11176
11177         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11178                rnapi->coal_now);
11179
11180         udelay(10);
11181
11182         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11183
11184         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11185                     base_flags, (mss << 1) | 1);
11186
11187         tnapi->tx_prod++;
11188
11189         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11190         tr32_mailbox(tnapi->prodmbox);
11191
11192         udelay(10);
11193
11194         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11195         for (i = 0; i < 35; i++) {
11196                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11197                        coal_now);
11198
11199                 udelay(10);
11200
11201                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11202                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11203                 if ((tx_idx == tnapi->tx_prod) &&
11204                     (rx_idx == (rx_start_idx + num_pkts)))
11205                         break;
11206         }
11207
11208         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11209         dev_kfree_skb(skb);
11210
11211         if (tx_idx != tnapi->tx_prod)
11212                 goto out;
11213
11214         if (rx_idx != rx_start_idx + num_pkts)
11215                 goto out;
11216
11217         val = data_off;
11218         while (rx_idx != rx_start_idx) {
11219                 desc = &rnapi->rx_rcb[rx_start_idx++];
11220                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11221                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11222
11223                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11224                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11225                         goto out;
11226
11227                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11228                          - ETH_FCS_LEN;
11229
11230                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11231                         if (rx_len != tx_len)
11232                                 goto out;
11233
11234                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11235                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11236                                         goto out;
11237                         } else {
11238                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11239                                         goto out;
11240                         }
11241                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11242                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11243                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11244                         goto out;
11245                 }
11246
11247                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11248                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11249                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11250                                              mapping);
11251                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11252                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11253                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11254                                              mapping);
11255                 } else
11256                         goto out;
11257
11258                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11259                                             PCI_DMA_FROMDEVICE);
11260
11261                 for (i = data_off; i < rx_len; i++, val++) {
11262                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11263                                 goto out;
11264                 }
11265         }
11266
11267         err = 0;
11268
11269         /* tg3_free_rings will unmap and free the rx_skb */
11270 out:
11271         return err;
11272 }
11273
11274 #define TG3_STD_LOOPBACK_FAILED         1
11275 #define TG3_JMB_LOOPBACK_FAILED         2
11276 #define TG3_TSO_LOOPBACK_FAILED         4
11277
11278 #define TG3_MAC_LOOPBACK_SHIFT          0
11279 #define TG3_PHY_LOOPBACK_SHIFT          4
11280 #define TG3_LOOPBACK_FAILED             0x00000077
11281
11282 static int tg3_test_loopback(struct tg3 *tp)
11283 {
11284         int err = 0;
11285         u32 eee_cap, cpmuctrl = 0;
11286
11287         if (!netif_running(tp->dev))
11288                 return TG3_LOOPBACK_FAILED;
11289
11290         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11291         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11292
11293         err = tg3_reset_hw(tp, 1);
11294         if (err) {
11295                 err = TG3_LOOPBACK_FAILED;
11296                 goto done;
11297         }
11298
11299         if (tg3_flag(tp, ENABLE_RSS)) {
11300                 int i;
11301
11302                 /* Reroute all rx packets to the 1st queue */
11303                 for (i = MAC_RSS_INDIR_TBL_0;
11304                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11305                         tw32(i, 0x0);
11306         }
11307
11308         /* Turn off gphy autopowerdown. */
11309         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11310                 tg3_phy_toggle_apd(tp, false);
11311
11312         if (tg3_flag(tp, CPMU_PRESENT)) {
11313                 int i;
11314                 u32 status;
11315
11316                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11317
11318                 /* Wait for up to 40 microseconds to acquire lock. */
11319                 for (i = 0; i < 4; i++) {
11320                         status = tr32(TG3_CPMU_MUTEX_GNT);
11321                         if (status == CPMU_MUTEX_GNT_DRIVER)
11322                                 break;
11323                         udelay(10);
11324                 }
11325
11326                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11327                         err = TG3_LOOPBACK_FAILED;
11328                         goto done;
11329                 }
11330
11331                 /* Turn off link-based power management. */
11332                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11333                 tw32(TG3_CPMU_CTRL,
11334                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11335                                   CPMU_CTRL_LINK_AWARE_MODE));
11336         }
11337
11338         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11339                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11340
11341         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11342             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11343                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11344
11345         if (tg3_flag(tp, CPMU_PRESENT)) {
11346                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11347
11348                 /* Release the mutex */
11349                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11350         }
11351
11352         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11353             !tg3_flag(tp, USE_PHYLIB)) {
11354                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11355                         err |= TG3_STD_LOOPBACK_FAILED <<
11356                                TG3_PHY_LOOPBACK_SHIFT;
11357                 if (tg3_flag(tp, TSO_CAPABLE) &&
11358                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11359                         err |= TG3_TSO_LOOPBACK_FAILED <<
11360                                TG3_PHY_LOOPBACK_SHIFT;
11361                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11362                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11363                         err |= TG3_JMB_LOOPBACK_FAILED <<
11364                                TG3_PHY_LOOPBACK_SHIFT;
11365         }
11366
11367         /* Re-enable gphy autopowerdown. */
11368         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11369                 tg3_phy_toggle_apd(tp, true);
11370
11371 done:
11372         tp->phy_flags |= eee_cap;
11373
11374         return err;
11375 }
11376
11377 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11378                           u64 *data)
11379 {
11380         struct tg3 *tp = netdev_priv(dev);
11381
11382         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11383                 tg3_power_up(tp);
11384
11385         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11386
11387         if (tg3_test_nvram(tp) != 0) {
11388                 etest->flags |= ETH_TEST_FL_FAILED;
11389                 data[0] = 1;
11390         }
11391         if (tg3_test_link(tp) != 0) {
11392                 etest->flags |= ETH_TEST_FL_FAILED;
11393                 data[1] = 1;
11394         }
11395         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11396                 int err, err2 = 0, irq_sync = 0;
11397
11398                 if (netif_running(dev)) {
11399                         tg3_phy_stop(tp);
11400                         tg3_netif_stop(tp);
11401                         irq_sync = 1;
11402                 }
11403
11404                 tg3_full_lock(tp, irq_sync);
11405
11406                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11407                 err = tg3_nvram_lock(tp);
11408                 tg3_halt_cpu(tp, RX_CPU_BASE);
11409                 if (!tg3_flag(tp, 5705_PLUS))
11410                         tg3_halt_cpu(tp, TX_CPU_BASE);
11411                 if (!err)
11412                         tg3_nvram_unlock(tp);
11413
11414                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11415                         tg3_phy_reset(tp);
11416
11417                 if (tg3_test_registers(tp) != 0) {
11418                         etest->flags |= ETH_TEST_FL_FAILED;
11419                         data[2] = 1;
11420                 }
11421                 if (tg3_test_memory(tp) != 0) {
11422                         etest->flags |= ETH_TEST_FL_FAILED;
11423                         data[3] = 1;
11424                 }
11425                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11426                         etest->flags |= ETH_TEST_FL_FAILED;
11427
11428                 tg3_full_unlock(tp);
11429
11430                 if (tg3_test_interrupt(tp) != 0) {
11431                         etest->flags |= ETH_TEST_FL_FAILED;
11432                         data[5] = 1;
11433                 }
11434
11435                 tg3_full_lock(tp, 0);
11436
11437                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11438                 if (netif_running(dev)) {
11439                         tg3_flag_set(tp, INIT_COMPLETE);
11440                         err2 = tg3_restart_hw(tp, 1);
11441                         if (!err2)
11442                                 tg3_netif_start(tp);
11443                 }
11444
11445                 tg3_full_unlock(tp);
11446
11447                 if (irq_sync && !err2)
11448                         tg3_phy_start(tp);
11449         }
11450         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11451                 tg3_power_down(tp);
11452
11453 }
11454
11455 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11456 {
11457         struct mii_ioctl_data *data = if_mii(ifr);
11458         struct tg3 *tp = netdev_priv(dev);
11459         int err;
11460
11461         if (tg3_flag(tp, USE_PHYLIB)) {
11462                 struct phy_device *phydev;
11463                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11464                         return -EAGAIN;
11465                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11466                 return phy_mii_ioctl(phydev, ifr, cmd);
11467         }
11468
11469         switch (cmd) {
11470         case SIOCGMIIPHY:
11471                 data->phy_id = tp->phy_addr;
11472
11473                 /* fallthru */
11474         case SIOCGMIIREG: {
11475                 u32 mii_regval;
11476
11477                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11478                         break;                  /* We have no PHY */
11479
11480                 if (!netif_running(dev))
11481                         return -EAGAIN;
11482
11483                 spin_lock_bh(&tp->lock);
11484                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11485                 spin_unlock_bh(&tp->lock);
11486
11487                 data->val_out = mii_regval;
11488
11489                 return err;
11490         }
11491
11492         case SIOCSMIIREG:
11493                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11494                         break;                  /* We have no PHY */
11495
11496                 if (!netif_running(dev))
11497                         return -EAGAIN;
11498
11499                 spin_lock_bh(&tp->lock);
11500                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11501                 spin_unlock_bh(&tp->lock);
11502
11503                 return err;
11504
11505         default:
11506                 /* do nothing */
11507                 break;
11508         }
11509         return -EOPNOTSUPP;
11510 }
11511
11512 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11513 {
11514         struct tg3 *tp = netdev_priv(dev);
11515
11516         memcpy(ec, &tp->coal, sizeof(*ec));
11517         return 0;
11518 }
11519
11520 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11521 {
11522         struct tg3 *tp = netdev_priv(dev);
11523         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11524         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11525
11526         if (!tg3_flag(tp, 5705_PLUS)) {
11527                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11528                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11529                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11530                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11531         }
11532
11533         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11534             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11535             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11536             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11537             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11538             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11539             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11540             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11541             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11542             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11543                 return -EINVAL;
11544
11545         /* No rx interrupts will be generated if both are zero */
11546         if ((ec->rx_coalesce_usecs == 0) &&
11547             (ec->rx_max_coalesced_frames == 0))
11548                 return -EINVAL;
11549
11550         /* No tx interrupts will be generated if both are zero */
11551         if ((ec->tx_coalesce_usecs == 0) &&
11552             (ec->tx_max_coalesced_frames == 0))
11553                 return -EINVAL;
11554
11555         /* Only copy relevant parameters, ignore all others. */
11556         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11557         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11558         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11559         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11560         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11561         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11562         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11563         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11564         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11565
11566         if (netif_running(dev)) {
11567                 tg3_full_lock(tp, 0);
11568                 __tg3_set_coalesce(tp, &tp->coal);
11569                 tg3_full_unlock(tp);
11570         }
11571         return 0;
11572 }
11573
11574 static const struct ethtool_ops tg3_ethtool_ops = {
11575         .get_settings           = tg3_get_settings,
11576         .set_settings           = tg3_set_settings,
11577         .get_drvinfo            = tg3_get_drvinfo,
11578         .get_regs_len           = tg3_get_regs_len,
11579         .get_regs               = tg3_get_regs,
11580         .get_wol                = tg3_get_wol,
11581         .set_wol                = tg3_set_wol,
11582         .get_msglevel           = tg3_get_msglevel,
11583         .set_msglevel           = tg3_set_msglevel,
11584         .nway_reset             = tg3_nway_reset,
11585         .get_link               = ethtool_op_get_link,
11586         .get_eeprom_len         = tg3_get_eeprom_len,
11587         .get_eeprom             = tg3_get_eeprom,
11588         .set_eeprom             = tg3_set_eeprom,
11589         .get_ringparam          = tg3_get_ringparam,
11590         .set_ringparam          = tg3_set_ringparam,
11591         .get_pauseparam         = tg3_get_pauseparam,
11592         .set_pauseparam         = tg3_set_pauseparam,
11593         .self_test              = tg3_self_test,
11594         .get_strings            = tg3_get_strings,
11595         .set_phys_id            = tg3_set_phys_id,
11596         .get_ethtool_stats      = tg3_get_ethtool_stats,
11597         .get_coalesce           = tg3_get_coalesce,
11598         .set_coalesce           = tg3_set_coalesce,
11599         .get_sset_count         = tg3_get_sset_count,
11600 };
11601
11602 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11603 {
11604         u32 cursize, val, magic;
11605
11606         tp->nvram_size = EEPROM_CHIP_SIZE;
11607
11608         if (tg3_nvram_read(tp, 0, &magic) != 0)
11609                 return;
11610
11611         if ((magic != TG3_EEPROM_MAGIC) &&
11612             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11613             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11614                 return;
11615
11616         /*
11617          * Size the chip by reading offsets at increasing powers of two.
11618          * When we encounter our validation signature, we know the addressing
11619          * has wrapped around, and thus have our chip size.
11620          */
11621         cursize = 0x10;
11622
11623         while (cursize < tp->nvram_size) {
11624                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11625                         return;
11626
11627                 if (val == magic)
11628                         break;
11629
11630                 cursize <<= 1;
11631         }
11632
11633         tp->nvram_size = cursize;
11634 }
11635
11636 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11637 {
11638         u32 val;
11639
11640         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11641                 return;
11642
11643         /* Selfboot format */
11644         if (val != TG3_EEPROM_MAGIC) {
11645                 tg3_get_eeprom_size(tp);
11646                 return;
11647         }
11648
11649         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11650                 if (val != 0) {
11651                         /* This is confusing.  We want to operate on the
11652                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11653                          * call will read from NVRAM and byteswap the data
11654                          * according to the byteswapping settings for all
11655                          * other register accesses.  This ensures the data we
11656                          * want will always reside in the lower 16-bits.
11657                          * However, the data in NVRAM is in LE format, which
11658                          * means the data from the NVRAM read will always be
11659                          * opposite the endianness of the CPU.  The 16-bit
11660                          * byteswap then brings the data to CPU endianness.
11661                          */
11662                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11663                         return;
11664                 }
11665         }
11666         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11667 }
11668
11669 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11670 {
11671         u32 nvcfg1;
11672
11673         nvcfg1 = tr32(NVRAM_CFG1);
11674         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11675                 tg3_flag_set(tp, FLASH);
11676         } else {
11677                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11678                 tw32(NVRAM_CFG1, nvcfg1);
11679         }
11680
11681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11682             tg3_flag(tp, 5780_CLASS)) {
11683                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11684                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11685                         tp->nvram_jedecnum = JEDEC_ATMEL;
11686                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11687                         tg3_flag_set(tp, NVRAM_BUFFERED);
11688                         break;
11689                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11690                         tp->nvram_jedecnum = JEDEC_ATMEL;
11691                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11692                         break;
11693                 case FLASH_VENDOR_ATMEL_EEPROM:
11694                         tp->nvram_jedecnum = JEDEC_ATMEL;
11695                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11696                         tg3_flag_set(tp, NVRAM_BUFFERED);
11697                         break;
11698                 case FLASH_VENDOR_ST:
11699                         tp->nvram_jedecnum = JEDEC_ST;
11700                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11701                         tg3_flag_set(tp, NVRAM_BUFFERED);
11702                         break;
11703                 case FLASH_VENDOR_SAIFUN:
11704                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11705                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11706                         break;
11707                 case FLASH_VENDOR_SST_SMALL:
11708                 case FLASH_VENDOR_SST_LARGE:
11709                         tp->nvram_jedecnum = JEDEC_SST;
11710                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11711                         break;
11712                 }
11713         } else {
11714                 tp->nvram_jedecnum = JEDEC_ATMEL;
11715                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11716                 tg3_flag_set(tp, NVRAM_BUFFERED);
11717         }
11718 }
11719
11720 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11721 {
11722         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11723         case FLASH_5752PAGE_SIZE_256:
11724                 tp->nvram_pagesize = 256;
11725                 break;
11726         case FLASH_5752PAGE_SIZE_512:
11727                 tp->nvram_pagesize = 512;
11728                 break;
11729         case FLASH_5752PAGE_SIZE_1K:
11730                 tp->nvram_pagesize = 1024;
11731                 break;
11732         case FLASH_5752PAGE_SIZE_2K:
11733                 tp->nvram_pagesize = 2048;
11734                 break;
11735         case FLASH_5752PAGE_SIZE_4K:
11736                 tp->nvram_pagesize = 4096;
11737                 break;
11738         case FLASH_5752PAGE_SIZE_264:
11739                 tp->nvram_pagesize = 264;
11740                 break;
11741         case FLASH_5752PAGE_SIZE_528:
11742                 tp->nvram_pagesize = 528;
11743                 break;
11744         }
11745 }
11746
11747 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11748 {
11749         u32 nvcfg1;
11750
11751         nvcfg1 = tr32(NVRAM_CFG1);
11752
11753         /* NVRAM protection for TPM */
11754         if (nvcfg1 & (1 << 27))
11755                 tg3_flag_set(tp, PROTECTED_NVRAM);
11756
11757         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11758         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11759         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11760                 tp->nvram_jedecnum = JEDEC_ATMEL;
11761                 tg3_flag_set(tp, NVRAM_BUFFERED);
11762                 break;
11763         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11764                 tp->nvram_jedecnum = JEDEC_ATMEL;
11765                 tg3_flag_set(tp, NVRAM_BUFFERED);
11766                 tg3_flag_set(tp, FLASH);
11767                 break;
11768         case FLASH_5752VENDOR_ST_M45PE10:
11769         case FLASH_5752VENDOR_ST_M45PE20:
11770         case FLASH_5752VENDOR_ST_M45PE40:
11771                 tp->nvram_jedecnum = JEDEC_ST;
11772                 tg3_flag_set(tp, NVRAM_BUFFERED);
11773                 tg3_flag_set(tp, FLASH);
11774                 break;
11775         }
11776
11777         if (tg3_flag(tp, FLASH)) {
11778                 tg3_nvram_get_pagesize(tp, nvcfg1);
11779         } else {
11780                 /* For eeprom, set pagesize to maximum eeprom size */
11781                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11782
11783                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11784                 tw32(NVRAM_CFG1, nvcfg1);
11785         }
11786 }
11787
11788 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11789 {
11790         u32 nvcfg1, protect = 0;
11791
11792         nvcfg1 = tr32(NVRAM_CFG1);
11793
11794         /* NVRAM protection for TPM */
11795         if (nvcfg1 & (1 << 27)) {
11796                 tg3_flag_set(tp, PROTECTED_NVRAM);
11797                 protect = 1;
11798         }
11799
11800         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11801         switch (nvcfg1) {
11802         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11803         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11804         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11805         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11806                 tp->nvram_jedecnum = JEDEC_ATMEL;
11807                 tg3_flag_set(tp, NVRAM_BUFFERED);
11808                 tg3_flag_set(tp, FLASH);
11809                 tp->nvram_pagesize = 264;
11810                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11811                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11812                         tp->nvram_size = (protect ? 0x3e200 :
11813                                           TG3_NVRAM_SIZE_512KB);
11814                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11815                         tp->nvram_size = (protect ? 0x1f200 :
11816                                           TG3_NVRAM_SIZE_256KB);
11817                 else
11818                         tp->nvram_size = (protect ? 0x1f200 :
11819                                           TG3_NVRAM_SIZE_128KB);
11820                 break;
11821         case FLASH_5752VENDOR_ST_M45PE10:
11822         case FLASH_5752VENDOR_ST_M45PE20:
11823         case FLASH_5752VENDOR_ST_M45PE40:
11824                 tp->nvram_jedecnum = JEDEC_ST;
11825                 tg3_flag_set(tp, NVRAM_BUFFERED);
11826                 tg3_flag_set(tp, FLASH);
11827                 tp->nvram_pagesize = 256;
11828                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11829                         tp->nvram_size = (protect ?
11830                                           TG3_NVRAM_SIZE_64KB :
11831                                           TG3_NVRAM_SIZE_128KB);
11832                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11833                         tp->nvram_size = (protect ?
11834                                           TG3_NVRAM_SIZE_64KB :
11835                                           TG3_NVRAM_SIZE_256KB);
11836                 else
11837                         tp->nvram_size = (protect ?
11838                                           TG3_NVRAM_SIZE_128KB :
11839                                           TG3_NVRAM_SIZE_512KB);
11840                 break;
11841         }
11842 }
11843
11844 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11845 {
11846         u32 nvcfg1;
11847
11848         nvcfg1 = tr32(NVRAM_CFG1);
11849
11850         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11851         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11852         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11853         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11854         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11855                 tp->nvram_jedecnum = JEDEC_ATMEL;
11856                 tg3_flag_set(tp, NVRAM_BUFFERED);
11857                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11858
11859                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11860                 tw32(NVRAM_CFG1, nvcfg1);
11861                 break;
11862         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11863         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11864         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11865         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11866                 tp->nvram_jedecnum = JEDEC_ATMEL;
11867                 tg3_flag_set(tp, NVRAM_BUFFERED);
11868                 tg3_flag_set(tp, FLASH);
11869                 tp->nvram_pagesize = 264;
11870                 break;
11871         case FLASH_5752VENDOR_ST_M45PE10:
11872         case FLASH_5752VENDOR_ST_M45PE20:
11873         case FLASH_5752VENDOR_ST_M45PE40:
11874                 tp->nvram_jedecnum = JEDEC_ST;
11875                 tg3_flag_set(tp, NVRAM_BUFFERED);
11876                 tg3_flag_set(tp, FLASH);
11877                 tp->nvram_pagesize = 256;
11878                 break;
11879         }
11880 }
11881
11882 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11883 {
11884         u32 nvcfg1, protect = 0;
11885
11886         nvcfg1 = tr32(NVRAM_CFG1);
11887
11888         /* NVRAM protection for TPM */
11889         if (nvcfg1 & (1 << 27)) {
11890                 tg3_flag_set(tp, PROTECTED_NVRAM);
11891                 protect = 1;
11892         }
11893
11894         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11895         switch (nvcfg1) {
11896         case FLASH_5761VENDOR_ATMEL_ADB021D:
11897         case FLASH_5761VENDOR_ATMEL_ADB041D:
11898         case FLASH_5761VENDOR_ATMEL_ADB081D:
11899         case FLASH_5761VENDOR_ATMEL_ADB161D:
11900         case FLASH_5761VENDOR_ATMEL_MDB021D:
11901         case FLASH_5761VENDOR_ATMEL_MDB041D:
11902         case FLASH_5761VENDOR_ATMEL_MDB081D:
11903         case FLASH_5761VENDOR_ATMEL_MDB161D:
11904                 tp->nvram_jedecnum = JEDEC_ATMEL;
11905                 tg3_flag_set(tp, NVRAM_BUFFERED);
11906                 tg3_flag_set(tp, FLASH);
11907                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11908                 tp->nvram_pagesize = 256;
11909                 break;
11910         case FLASH_5761VENDOR_ST_A_M45PE20:
11911         case FLASH_5761VENDOR_ST_A_M45PE40:
11912         case FLASH_5761VENDOR_ST_A_M45PE80:
11913         case FLASH_5761VENDOR_ST_A_M45PE16:
11914         case FLASH_5761VENDOR_ST_M_M45PE20:
11915         case FLASH_5761VENDOR_ST_M_M45PE40:
11916         case FLASH_5761VENDOR_ST_M_M45PE80:
11917         case FLASH_5761VENDOR_ST_M_M45PE16:
11918                 tp->nvram_jedecnum = JEDEC_ST;
11919                 tg3_flag_set(tp, NVRAM_BUFFERED);
11920                 tg3_flag_set(tp, FLASH);
11921                 tp->nvram_pagesize = 256;
11922                 break;
11923         }
11924
11925         if (protect) {
11926                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11927         } else {
11928                 switch (nvcfg1) {
11929                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11930                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11931                 case FLASH_5761VENDOR_ST_A_M45PE16:
11932                 case FLASH_5761VENDOR_ST_M_M45PE16:
11933                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11934                         break;
11935                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11936                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11937                 case FLASH_5761VENDOR_ST_A_M45PE80:
11938                 case FLASH_5761VENDOR_ST_M_M45PE80:
11939                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11940                         break;
11941                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11942                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11943                 case FLASH_5761VENDOR_ST_A_M45PE40:
11944                 case FLASH_5761VENDOR_ST_M_M45PE40:
11945                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11946                         break;
11947                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11948                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11949                 case FLASH_5761VENDOR_ST_A_M45PE20:
11950                 case FLASH_5761VENDOR_ST_M_M45PE20:
11951                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11952                         break;
11953                 }
11954         }
11955 }
11956
11957 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11958 {
11959         tp->nvram_jedecnum = JEDEC_ATMEL;
11960         tg3_flag_set(tp, NVRAM_BUFFERED);
11961         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11962 }
11963
11964 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11965 {
11966         u32 nvcfg1;
11967
11968         nvcfg1 = tr32(NVRAM_CFG1);
11969
11970         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11971         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11972         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11973                 tp->nvram_jedecnum = JEDEC_ATMEL;
11974                 tg3_flag_set(tp, NVRAM_BUFFERED);
11975                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11976
11977                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11978                 tw32(NVRAM_CFG1, nvcfg1);
11979                 return;
11980         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11981         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11982         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11983         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11984         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11985         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11986         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11987                 tp->nvram_jedecnum = JEDEC_ATMEL;
11988                 tg3_flag_set(tp, NVRAM_BUFFERED);
11989                 tg3_flag_set(tp, FLASH);
11990
11991                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11992                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11993                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11994                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11995                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11996                         break;
11997                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11998                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11999                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12000                         break;
12001                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12002                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12003                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12004                         break;
12005                 }
12006                 break;
12007         case FLASH_5752VENDOR_ST_M45PE10:
12008         case FLASH_5752VENDOR_ST_M45PE20:
12009         case FLASH_5752VENDOR_ST_M45PE40:
12010                 tp->nvram_jedecnum = JEDEC_ST;
12011                 tg3_flag_set(tp, NVRAM_BUFFERED);
12012                 tg3_flag_set(tp, FLASH);
12013
12014                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12015                 case FLASH_5752VENDOR_ST_M45PE10:
12016                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12017                         break;
12018                 case FLASH_5752VENDOR_ST_M45PE20:
12019                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12020                         break;
12021                 case FLASH_5752VENDOR_ST_M45PE40:
12022                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12023                         break;
12024                 }
12025                 break;
12026         default:
12027                 tg3_flag_set(tp, NO_NVRAM);
12028                 return;
12029         }
12030
12031         tg3_nvram_get_pagesize(tp, nvcfg1);
12032         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12033                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12034 }
12035
12036
12037 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12038 {
12039         u32 nvcfg1;
12040
12041         nvcfg1 = tr32(NVRAM_CFG1);
12042
12043         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12044         case FLASH_5717VENDOR_ATMEL_EEPROM:
12045         case FLASH_5717VENDOR_MICRO_EEPROM:
12046                 tp->nvram_jedecnum = JEDEC_ATMEL;
12047                 tg3_flag_set(tp, NVRAM_BUFFERED);
12048                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12049
12050                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12051                 tw32(NVRAM_CFG1, nvcfg1);
12052                 return;
12053         case FLASH_5717VENDOR_ATMEL_MDB011D:
12054         case FLASH_5717VENDOR_ATMEL_ADB011B:
12055         case FLASH_5717VENDOR_ATMEL_ADB011D:
12056         case FLASH_5717VENDOR_ATMEL_MDB021D:
12057         case FLASH_5717VENDOR_ATMEL_ADB021B:
12058         case FLASH_5717VENDOR_ATMEL_ADB021D:
12059         case FLASH_5717VENDOR_ATMEL_45USPT:
12060                 tp->nvram_jedecnum = JEDEC_ATMEL;
12061                 tg3_flag_set(tp, NVRAM_BUFFERED);
12062                 tg3_flag_set(tp, FLASH);
12063
12064                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12065                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12066                         /* Detect size with tg3_nvram_get_size() */
12067                         break;
12068                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12069                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12070                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12071                         break;
12072                 default:
12073                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12074                         break;
12075                 }
12076                 break;
12077         case FLASH_5717VENDOR_ST_M_M25PE10:
12078         case FLASH_5717VENDOR_ST_A_M25PE10:
12079         case FLASH_5717VENDOR_ST_M_M45PE10:
12080         case FLASH_5717VENDOR_ST_A_M45PE10:
12081         case FLASH_5717VENDOR_ST_M_M25PE20:
12082         case FLASH_5717VENDOR_ST_A_M25PE20:
12083         case FLASH_5717VENDOR_ST_M_M45PE20:
12084         case FLASH_5717VENDOR_ST_A_M45PE20:
12085         case FLASH_5717VENDOR_ST_25USPT:
12086         case FLASH_5717VENDOR_ST_45USPT:
12087                 tp->nvram_jedecnum = JEDEC_ST;
12088                 tg3_flag_set(tp, NVRAM_BUFFERED);
12089                 tg3_flag_set(tp, FLASH);
12090
12091                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12092                 case FLASH_5717VENDOR_ST_M_M25PE20:
12093                 case FLASH_5717VENDOR_ST_M_M45PE20:
12094                         /* Detect size with tg3_nvram_get_size() */
12095                         break;
12096                 case FLASH_5717VENDOR_ST_A_M25PE20:
12097                 case FLASH_5717VENDOR_ST_A_M45PE20:
12098                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12099                         break;
12100                 default:
12101                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12102                         break;
12103                 }
12104                 break;
12105         default:
12106                 tg3_flag_set(tp, NO_NVRAM);
12107                 return;
12108         }
12109
12110         tg3_nvram_get_pagesize(tp, nvcfg1);
12111         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12112                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12113 }
12114
12115 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12116 {
12117         u32 nvcfg1, nvmpinstrp;
12118
12119         nvcfg1 = tr32(NVRAM_CFG1);
12120         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12121
12122         switch (nvmpinstrp) {
12123         case FLASH_5720_EEPROM_HD:
12124         case FLASH_5720_EEPROM_LD:
12125                 tp->nvram_jedecnum = JEDEC_ATMEL;
12126                 tg3_flag_set(tp, NVRAM_BUFFERED);
12127
12128                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12129                 tw32(NVRAM_CFG1, nvcfg1);
12130                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12131                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12132                 else
12133                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12134                 return;
12135         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12136         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12137         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12138         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12139         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12140         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12141         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12142         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12143         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12144         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12145         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12146         case FLASH_5720VENDOR_ATMEL_45USPT:
12147                 tp->nvram_jedecnum = JEDEC_ATMEL;
12148                 tg3_flag_set(tp, NVRAM_BUFFERED);
12149                 tg3_flag_set(tp, FLASH);
12150
12151                 switch (nvmpinstrp) {
12152                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12153                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12154                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12155                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12156                         break;
12157                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12158                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12159                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12160                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12161                         break;
12162                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12163                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12164                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12165                         break;
12166                 default:
12167                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12168                         break;
12169                 }
12170                 break;
12171         case FLASH_5720VENDOR_M_ST_M25PE10:
12172         case FLASH_5720VENDOR_M_ST_M45PE10:
12173         case FLASH_5720VENDOR_A_ST_M25PE10:
12174         case FLASH_5720VENDOR_A_ST_M45PE10:
12175         case FLASH_5720VENDOR_M_ST_M25PE20:
12176         case FLASH_5720VENDOR_M_ST_M45PE20:
12177         case FLASH_5720VENDOR_A_ST_M25PE20:
12178         case FLASH_5720VENDOR_A_ST_M45PE20:
12179         case FLASH_5720VENDOR_M_ST_M25PE40:
12180         case FLASH_5720VENDOR_M_ST_M45PE40:
12181         case FLASH_5720VENDOR_A_ST_M25PE40:
12182         case FLASH_5720VENDOR_A_ST_M45PE40:
12183         case FLASH_5720VENDOR_M_ST_M25PE80:
12184         case FLASH_5720VENDOR_M_ST_M45PE80:
12185         case FLASH_5720VENDOR_A_ST_M25PE80:
12186         case FLASH_5720VENDOR_A_ST_M45PE80:
12187         case FLASH_5720VENDOR_ST_25USPT:
12188         case FLASH_5720VENDOR_ST_45USPT:
12189                 tp->nvram_jedecnum = JEDEC_ST;
12190                 tg3_flag_set(tp, NVRAM_BUFFERED);
12191                 tg3_flag_set(tp, FLASH);
12192
12193                 switch (nvmpinstrp) {
12194                 case FLASH_5720VENDOR_M_ST_M25PE20:
12195                 case FLASH_5720VENDOR_M_ST_M45PE20:
12196                 case FLASH_5720VENDOR_A_ST_M25PE20:
12197                 case FLASH_5720VENDOR_A_ST_M45PE20:
12198                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12199                         break;
12200                 case FLASH_5720VENDOR_M_ST_M25PE40:
12201                 case FLASH_5720VENDOR_M_ST_M45PE40:
12202                 case FLASH_5720VENDOR_A_ST_M25PE40:
12203                 case FLASH_5720VENDOR_A_ST_M45PE40:
12204                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12205                         break;
12206                 case FLASH_5720VENDOR_M_ST_M25PE80:
12207                 case FLASH_5720VENDOR_M_ST_M45PE80:
12208                 case FLASH_5720VENDOR_A_ST_M25PE80:
12209                 case FLASH_5720VENDOR_A_ST_M45PE80:
12210                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12211                         break;
12212                 default:
12213                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12214                         break;
12215                 }
12216                 break;
12217         default:
12218                 tg3_flag_set(tp, NO_NVRAM);
12219                 return;
12220         }
12221
12222         tg3_nvram_get_pagesize(tp, nvcfg1);
12223         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12224                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12225 }
12226
12227 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12228 static void __devinit tg3_nvram_init(struct tg3 *tp)
12229 {
12230         tw32_f(GRC_EEPROM_ADDR,
12231              (EEPROM_ADDR_FSM_RESET |
12232               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12233                EEPROM_ADDR_CLKPERD_SHIFT)));
12234
12235         msleep(1);
12236
12237         /* Enable seeprom accesses. */
12238         tw32_f(GRC_LOCAL_CTRL,
12239              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12240         udelay(100);
12241
12242         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12243             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12244                 tg3_flag_set(tp, NVRAM);
12245
12246                 if (tg3_nvram_lock(tp)) {
12247                         netdev_warn(tp->dev,
12248                                     "Cannot get nvram lock, %s failed\n",
12249                                     __func__);
12250                         return;
12251                 }
12252                 tg3_enable_nvram_access(tp);
12253
12254                 tp->nvram_size = 0;
12255
12256                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12257                         tg3_get_5752_nvram_info(tp);
12258                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12259                         tg3_get_5755_nvram_info(tp);
12260                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12261                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12262                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12263                         tg3_get_5787_nvram_info(tp);
12264                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12265                         tg3_get_5761_nvram_info(tp);
12266                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12267                         tg3_get_5906_nvram_info(tp);
12268                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12269                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12270                         tg3_get_57780_nvram_info(tp);
12271                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12272                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12273                         tg3_get_5717_nvram_info(tp);
12274                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12275                         tg3_get_5720_nvram_info(tp);
12276                 else
12277                         tg3_get_nvram_info(tp);
12278
12279                 if (tp->nvram_size == 0)
12280                         tg3_get_nvram_size(tp);
12281
12282                 tg3_disable_nvram_access(tp);
12283                 tg3_nvram_unlock(tp);
12284
12285         } else {
12286                 tg3_flag_clear(tp, NVRAM);
12287                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12288
12289                 tg3_get_eeprom_size(tp);
12290         }
12291 }
12292
12293 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12294                                     u32 offset, u32 len, u8 *buf)
12295 {
12296         int i, j, rc = 0;
12297         u32 val;
12298
12299         for (i = 0; i < len; i += 4) {
12300                 u32 addr;
12301                 __be32 data;
12302
12303                 addr = offset + i;
12304
12305                 memcpy(&data, buf + i, 4);
12306
12307                 /*
12308                  * The SEEPROM interface expects the data to always be opposite
12309                  * the native endian format.  We accomplish this by reversing
12310                  * all the operations that would have been performed on the
12311                  * data from a call to tg3_nvram_read_be32().
12312                  */
12313                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12314
12315                 val = tr32(GRC_EEPROM_ADDR);
12316                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12317
12318                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12319                         EEPROM_ADDR_READ);
12320                 tw32(GRC_EEPROM_ADDR, val |
12321                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12322                         (addr & EEPROM_ADDR_ADDR_MASK) |
12323                         EEPROM_ADDR_START |
12324                         EEPROM_ADDR_WRITE);
12325
12326                 for (j = 0; j < 1000; j++) {
12327                         val = tr32(GRC_EEPROM_ADDR);
12328
12329                         if (val & EEPROM_ADDR_COMPLETE)
12330                                 break;
12331                         msleep(1);
12332                 }
12333                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12334                         rc = -EBUSY;
12335                         break;
12336                 }
12337         }
12338
12339         return rc;
12340 }
12341
12342 /* offset and length are dword aligned */
12343 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12344                 u8 *buf)
12345 {
12346         int ret = 0;
12347         u32 pagesize = tp->nvram_pagesize;
12348         u32 pagemask = pagesize - 1;
12349         u32 nvram_cmd;
12350         u8 *tmp;
12351
12352         tmp = kmalloc(pagesize, GFP_KERNEL);
12353         if (tmp == NULL)
12354                 return -ENOMEM;
12355
12356         while (len) {
12357                 int j;
12358                 u32 phy_addr, page_off, size;
12359
12360                 phy_addr = offset & ~pagemask;
12361
12362                 for (j = 0; j < pagesize; j += 4) {
12363                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12364                                                   (__be32 *) (tmp + j));
12365                         if (ret)
12366                                 break;
12367                 }
12368                 if (ret)
12369                         break;
12370
12371                 page_off = offset & pagemask;
12372                 size = pagesize;
12373                 if (len < size)
12374                         size = len;
12375
12376                 len -= size;
12377
12378                 memcpy(tmp + page_off, buf, size);
12379
12380                 offset = offset + (pagesize - page_off);
12381
12382                 tg3_enable_nvram_access(tp);
12383
12384                 /*
12385                  * Before we can erase the flash page, we need
12386                  * to issue a special "write enable" command.
12387                  */
12388                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12389
12390                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12391                         break;
12392
12393                 /* Erase the target page */
12394                 tw32(NVRAM_ADDR, phy_addr);
12395
12396                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12397                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12398
12399                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12400                         break;
12401
12402                 /* Issue another write enable to start the write. */
12403                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12404
12405                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12406                         break;
12407
12408                 for (j = 0; j < pagesize; j += 4) {
12409                         __be32 data;
12410
12411                         data = *((__be32 *) (tmp + j));
12412
12413                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12414
12415                         tw32(NVRAM_ADDR, phy_addr + j);
12416
12417                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12418                                 NVRAM_CMD_WR;
12419
12420                         if (j == 0)
12421                                 nvram_cmd |= NVRAM_CMD_FIRST;
12422                         else if (j == (pagesize - 4))
12423                                 nvram_cmd |= NVRAM_CMD_LAST;
12424
12425                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12426                                 break;
12427                 }
12428                 if (ret)
12429                         break;
12430         }
12431
12432         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12433         tg3_nvram_exec_cmd(tp, nvram_cmd);
12434
12435         kfree(tmp);
12436
12437         return ret;
12438 }
12439
12440 /* offset and length are dword aligned */
12441 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12442                 u8 *buf)
12443 {
12444         int i, ret = 0;
12445
12446         for (i = 0; i < len; i += 4, offset += 4) {
12447                 u32 page_off, phy_addr, nvram_cmd;
12448                 __be32 data;
12449
12450                 memcpy(&data, buf + i, 4);
12451                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12452
12453                 page_off = offset % tp->nvram_pagesize;
12454
12455                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12456
12457                 tw32(NVRAM_ADDR, phy_addr);
12458
12459                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12460
12461                 if (page_off == 0 || i == 0)
12462                         nvram_cmd |= NVRAM_CMD_FIRST;
12463                 if (page_off == (tp->nvram_pagesize - 4))
12464                         nvram_cmd |= NVRAM_CMD_LAST;
12465
12466                 if (i == (len - 4))
12467                         nvram_cmd |= NVRAM_CMD_LAST;
12468
12469                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12470                     !tg3_flag(tp, 5755_PLUS) &&
12471                     (tp->nvram_jedecnum == JEDEC_ST) &&
12472                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12473
12474                         if ((ret = tg3_nvram_exec_cmd(tp,
12475                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12476                                 NVRAM_CMD_DONE)))
12477
12478                                 break;
12479                 }
12480                 if (!tg3_flag(tp, FLASH)) {
12481                         /* We always do complete word writes to eeprom. */
12482                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12483                 }
12484
12485                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12486                         break;
12487         }
12488         return ret;
12489 }
12490
12491 /* offset and length are dword aligned */
12492 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12493 {
12494         int ret;
12495
12496         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12497                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12498                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12499                 udelay(40);
12500         }
12501
12502         if (!tg3_flag(tp, NVRAM)) {
12503                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12504         } else {
12505                 u32 grc_mode;
12506
12507                 ret = tg3_nvram_lock(tp);
12508                 if (ret)
12509                         return ret;
12510
12511                 tg3_enable_nvram_access(tp);
12512                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12513                         tw32(NVRAM_WRITE1, 0x406);
12514
12515                 grc_mode = tr32(GRC_MODE);
12516                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12517
12518                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12519                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12520                                 buf);
12521                 } else {
12522                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12523                                 buf);
12524                 }
12525
12526                 grc_mode = tr32(GRC_MODE);
12527                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12528
12529                 tg3_disable_nvram_access(tp);
12530                 tg3_nvram_unlock(tp);
12531         }
12532
12533         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12534                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12535                 udelay(40);
12536         }
12537
12538         return ret;
12539 }
12540
12541 struct subsys_tbl_ent {
12542         u16 subsys_vendor, subsys_devid;
12543         u32 phy_id;
12544 };
12545
12546 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12547         /* Broadcom boards. */
12548         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12549           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12550         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12551           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12552         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12553           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12554         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12555           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12556         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12557           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12558         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12559           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12560         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12561           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12562         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12563           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12564         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12565           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12566         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12567           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12568         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12569           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12570
12571         /* 3com boards. */
12572         { TG3PCI_SUBVENDOR_ID_3COM,
12573           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12574         { TG3PCI_SUBVENDOR_ID_3COM,
12575           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12576         { TG3PCI_SUBVENDOR_ID_3COM,
12577           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12578         { TG3PCI_SUBVENDOR_ID_3COM,
12579           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12580         { TG3PCI_SUBVENDOR_ID_3COM,
12581           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12582
12583         /* DELL boards. */
12584         { TG3PCI_SUBVENDOR_ID_DELL,
12585           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12586         { TG3PCI_SUBVENDOR_ID_DELL,
12587           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12588         { TG3PCI_SUBVENDOR_ID_DELL,
12589           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12590         { TG3PCI_SUBVENDOR_ID_DELL,
12591           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12592
12593         /* Compaq boards. */
12594         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12595           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12596         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12597           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12598         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12599           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12600         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12601           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12602         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12603           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12604
12605         /* IBM boards. */
12606         { TG3PCI_SUBVENDOR_ID_IBM,
12607           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12608 };
12609
12610 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12611 {
12612         int i;
12613
12614         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12615                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12616                      tp->pdev->subsystem_vendor) &&
12617                     (subsys_id_to_phy_id[i].subsys_devid ==
12618                      tp->pdev->subsystem_device))
12619                         return &subsys_id_to_phy_id[i];
12620         }
12621         return NULL;
12622 }
12623
12624 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12625 {
12626         u32 val;
12627         u16 pmcsr;
12628
12629         /* On some early chips the SRAM cannot be accessed in D3hot state,
12630          * so need make sure we're in D0.
12631          */
12632         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12633         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12634         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12635         msleep(1);
12636
12637         /* Make sure register accesses (indirect or otherwise)
12638          * will function correctly.
12639          */
12640         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12641                                tp->misc_host_ctrl);
12642
12643         /* The memory arbiter has to be enabled in order for SRAM accesses
12644          * to succeed.  Normally on powerup the tg3 chip firmware will make
12645          * sure it is enabled, but other entities such as system netboot
12646          * code might disable it.
12647          */
12648         val = tr32(MEMARB_MODE);
12649         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12650
12651         tp->phy_id = TG3_PHY_ID_INVALID;
12652         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12653
12654         /* Assume an onboard device and WOL capable by default.  */
12655         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12656         tg3_flag_set(tp, WOL_CAP);
12657
12658         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12659                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12660                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12661                         tg3_flag_set(tp, IS_NIC);
12662                 }
12663                 val = tr32(VCPU_CFGSHDW);
12664                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12665                         tg3_flag_set(tp, ASPM_WORKAROUND);
12666                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12667                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12668                         tg3_flag_set(tp, WOL_ENABLE);
12669                         device_set_wakeup_enable(&tp->pdev->dev, true);
12670                 }
12671                 goto done;
12672         }
12673
12674         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12675         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12676                 u32 nic_cfg, led_cfg;
12677                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12678                 int eeprom_phy_serdes = 0;
12679
12680                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12681                 tp->nic_sram_data_cfg = nic_cfg;
12682
12683                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12684                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12685                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12686                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12687                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12688                     (ver > 0) && (ver < 0x100))
12689                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12690
12691                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12692                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12693
12694                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12695                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12696                         eeprom_phy_serdes = 1;
12697
12698                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12699                 if (nic_phy_id != 0) {
12700                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12701                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12702
12703                         eeprom_phy_id  = (id1 >> 16) << 10;
12704                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12705                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12706                 } else
12707                         eeprom_phy_id = 0;
12708
12709                 tp->phy_id = eeprom_phy_id;
12710                 if (eeprom_phy_serdes) {
12711                         if (!tg3_flag(tp, 5705_PLUS))
12712                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12713                         else
12714                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12715                 }
12716
12717                 if (tg3_flag(tp, 5750_PLUS))
12718                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12719                                     SHASTA_EXT_LED_MODE_MASK);
12720                 else
12721                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12722
12723                 switch (led_cfg) {
12724                 default:
12725                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12726                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12727                         break;
12728
12729                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12730                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12731                         break;
12732
12733                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12734                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12735
12736                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12737                          * read on some older 5700/5701 bootcode.
12738                          */
12739                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12740                             ASIC_REV_5700 ||
12741                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12742                             ASIC_REV_5701)
12743                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12744
12745                         break;
12746
12747                 case SHASTA_EXT_LED_SHARED:
12748                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12749                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12750                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12751                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12752                                                  LED_CTRL_MODE_PHY_2);
12753                         break;
12754
12755                 case SHASTA_EXT_LED_MAC:
12756                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12757                         break;
12758
12759                 case SHASTA_EXT_LED_COMBO:
12760                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12761                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12762                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12763                                                  LED_CTRL_MODE_PHY_2);
12764                         break;
12765
12766                 }
12767
12768                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12769                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12770                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12771                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12772
12773                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12774                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12775
12776                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12777                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12778                         if ((tp->pdev->subsystem_vendor ==
12779                              PCI_VENDOR_ID_ARIMA) &&
12780                             (tp->pdev->subsystem_device == 0x205a ||
12781                              tp->pdev->subsystem_device == 0x2063))
12782                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12783                 } else {
12784                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12785                         tg3_flag_set(tp, IS_NIC);
12786                 }
12787
12788                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12789                         tg3_flag_set(tp, ENABLE_ASF);
12790                         if (tg3_flag(tp, 5750_PLUS))
12791                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12792                 }
12793
12794                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12795                     tg3_flag(tp, 5750_PLUS))
12796                         tg3_flag_set(tp, ENABLE_APE);
12797
12798                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12799                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12800                         tg3_flag_clear(tp, WOL_CAP);
12801
12802                 if (tg3_flag(tp, WOL_CAP) &&
12803                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12804                         tg3_flag_set(tp, WOL_ENABLE);
12805                         device_set_wakeup_enable(&tp->pdev->dev, true);
12806                 }
12807
12808                 if (cfg2 & (1 << 17))
12809                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12810
12811                 /* serdes signal pre-emphasis in register 0x590 set by */
12812                 /* bootcode if bit 18 is set */
12813                 if (cfg2 & (1 << 18))
12814                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12815
12816                 if ((tg3_flag(tp, 57765_PLUS) ||
12817                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12818                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12819                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12820                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12821
12822                 if (tg3_flag(tp, PCI_EXPRESS) &&
12823                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12824                     !tg3_flag(tp, 57765_PLUS)) {
12825                         u32 cfg3;
12826
12827                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12828                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12829                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12830                 }
12831
12832                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12833                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12834                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12835                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12836                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12837                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12838         }
12839 done:
12840         if (tg3_flag(tp, WOL_CAP))
12841                 device_set_wakeup_enable(&tp->pdev->dev,
12842                                          tg3_flag(tp, WOL_ENABLE));
12843         else
12844                 device_set_wakeup_capable(&tp->pdev->dev, false);
12845 }
12846
12847 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12848 {
12849         int i;
12850         u32 val;
12851
12852         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12853         tw32(OTP_CTRL, cmd);
12854
12855         /* Wait for up to 1 ms for command to execute. */
12856         for (i = 0; i < 100; i++) {
12857                 val = tr32(OTP_STATUS);
12858                 if (val & OTP_STATUS_CMD_DONE)
12859                         break;
12860                 udelay(10);
12861         }
12862
12863         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12864 }
12865
12866 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12867  * configuration is a 32-bit value that straddles the alignment boundary.
12868  * We do two 32-bit reads and then shift and merge the results.
12869  */
12870 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12871 {
12872         u32 bhalf_otp, thalf_otp;
12873
12874         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12875
12876         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12877                 return 0;
12878
12879         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12880
12881         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12882                 return 0;
12883
12884         thalf_otp = tr32(OTP_READ_DATA);
12885
12886         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12887
12888         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12889                 return 0;
12890
12891         bhalf_otp = tr32(OTP_READ_DATA);
12892
12893         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12894 }
12895
12896 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12897 {
12898         u32 adv = ADVERTISED_Autoneg |
12899                   ADVERTISED_Pause;
12900
12901         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12902                 adv |= ADVERTISED_1000baseT_Half |
12903                        ADVERTISED_1000baseT_Full;
12904
12905         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12906                 adv |= ADVERTISED_100baseT_Half |
12907                        ADVERTISED_100baseT_Full |
12908                        ADVERTISED_10baseT_Half |
12909                        ADVERTISED_10baseT_Full |
12910                        ADVERTISED_TP;
12911         else
12912                 adv |= ADVERTISED_FIBRE;
12913
12914         tp->link_config.advertising = adv;
12915         tp->link_config.speed = SPEED_INVALID;
12916         tp->link_config.duplex = DUPLEX_INVALID;
12917         tp->link_config.autoneg = AUTONEG_ENABLE;
12918         tp->link_config.active_speed = SPEED_INVALID;
12919         tp->link_config.active_duplex = DUPLEX_INVALID;
12920         tp->link_config.orig_speed = SPEED_INVALID;
12921         tp->link_config.orig_duplex = DUPLEX_INVALID;
12922         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12923 }
12924
12925 static int __devinit tg3_phy_probe(struct tg3 *tp)
12926 {
12927         u32 hw_phy_id_1, hw_phy_id_2;
12928         u32 hw_phy_id, hw_phy_id_masked;
12929         int err;
12930
12931         /* flow control autonegotiation is default behavior */
12932         tg3_flag_set(tp, PAUSE_AUTONEG);
12933         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12934
12935         if (tg3_flag(tp, USE_PHYLIB))
12936                 return tg3_phy_init(tp);
12937
12938         /* Reading the PHY ID register can conflict with ASF
12939          * firmware access to the PHY hardware.
12940          */
12941         err = 0;
12942         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12943                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12944         } else {
12945                 /* Now read the physical PHY_ID from the chip and verify
12946                  * that it is sane.  If it doesn't look good, we fall back
12947                  * to either the hard-coded table based PHY_ID and failing
12948                  * that the value found in the eeprom area.
12949                  */
12950                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12951                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12952
12953                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12954                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12955                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12956
12957                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12958         }
12959
12960         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12961                 tp->phy_id = hw_phy_id;
12962                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12963                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12964                 else
12965                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12966         } else {
12967                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12968                         /* Do nothing, phy ID already set up in
12969                          * tg3_get_eeprom_hw_cfg().
12970                          */
12971                 } else {
12972                         struct subsys_tbl_ent *p;
12973
12974                         /* No eeprom signature?  Try the hardcoded
12975                          * subsys device table.
12976                          */
12977                         p = tg3_lookup_by_subsys(tp);
12978                         if (!p)
12979                                 return -ENODEV;
12980
12981                         tp->phy_id = p->phy_id;
12982                         if (!tp->phy_id ||
12983                             tp->phy_id == TG3_PHY_ID_BCM8002)
12984                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12985                 }
12986         }
12987
12988         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12989             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12990               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12991              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12992               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12993                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12994
12995         tg3_phy_init_link_config(tp);
12996
12997         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12998             !tg3_flag(tp, ENABLE_APE) &&
12999             !tg3_flag(tp, ENABLE_ASF)) {
13000                 u32 bmsr, mask;
13001
13002                 tg3_readphy(tp, MII_BMSR, &bmsr);
13003                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13004                     (bmsr & BMSR_LSTATUS))
13005                         goto skip_phy_reset;
13006
13007                 err = tg3_phy_reset(tp);
13008                 if (err)
13009                         return err;
13010
13011                 tg3_phy_set_wirespeed(tp);
13012
13013                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13014                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13015                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13016                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13017                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13018                                             tp->link_config.flowctrl);
13019
13020                         tg3_writephy(tp, MII_BMCR,
13021                                      BMCR_ANENABLE | BMCR_ANRESTART);
13022                 }
13023         }
13024
13025 skip_phy_reset:
13026         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13027                 err = tg3_init_5401phy_dsp(tp);
13028                 if (err)
13029                         return err;
13030
13031                 err = tg3_init_5401phy_dsp(tp);
13032         }
13033
13034         return err;
13035 }
13036
13037 static void __devinit tg3_read_vpd(struct tg3 *tp)
13038 {
13039         u8 *vpd_data;
13040         unsigned int block_end, rosize, len;
13041         int j, i = 0;
13042
13043         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13044         if (!vpd_data)
13045                 goto out_no_vpd;
13046
13047         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13048                              PCI_VPD_LRDT_RO_DATA);
13049         if (i < 0)
13050                 goto out_not_found;
13051
13052         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13053         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13054         i += PCI_VPD_LRDT_TAG_SIZE;
13055
13056         if (block_end > TG3_NVM_VPD_LEN)
13057                 goto out_not_found;
13058
13059         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13060                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13061         if (j > 0) {
13062                 len = pci_vpd_info_field_size(&vpd_data[j]);
13063
13064                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13065                 if (j + len > block_end || len != 4 ||
13066                     memcmp(&vpd_data[j], "1028", 4))
13067                         goto partno;
13068
13069                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13070                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13071                 if (j < 0)
13072                         goto partno;
13073
13074                 len = pci_vpd_info_field_size(&vpd_data[j]);
13075
13076                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13077                 if (j + len > block_end)
13078                         goto partno;
13079
13080                 memcpy(tp->fw_ver, &vpd_data[j], len);
13081                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13082         }
13083
13084 partno:
13085         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13086                                       PCI_VPD_RO_KEYWORD_PARTNO);
13087         if (i < 0)
13088                 goto out_not_found;
13089
13090         len = pci_vpd_info_field_size(&vpd_data[i]);
13091
13092         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13093         if (len > TG3_BPN_SIZE ||
13094             (len + i) > TG3_NVM_VPD_LEN)
13095                 goto out_not_found;
13096
13097         memcpy(tp->board_part_number, &vpd_data[i], len);
13098
13099 out_not_found:
13100         kfree(vpd_data);
13101         if (tp->board_part_number[0])
13102                 return;
13103
13104 out_no_vpd:
13105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13106                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13107                         strcpy(tp->board_part_number, "BCM5717");
13108                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13109                         strcpy(tp->board_part_number, "BCM5718");
13110                 else
13111                         goto nomatch;
13112         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13113                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13114                         strcpy(tp->board_part_number, "BCM57780");
13115                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13116                         strcpy(tp->board_part_number, "BCM57760");
13117                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13118                         strcpy(tp->board_part_number, "BCM57790");
13119                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13120                         strcpy(tp->board_part_number, "BCM57788");
13121                 else
13122                         goto nomatch;
13123         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13124                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13125                         strcpy(tp->board_part_number, "BCM57761");
13126                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13127                         strcpy(tp->board_part_number, "BCM57765");
13128                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13129                         strcpy(tp->board_part_number, "BCM57781");
13130                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13131                         strcpy(tp->board_part_number, "BCM57785");
13132                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13133                         strcpy(tp->board_part_number, "BCM57791");
13134                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13135                         strcpy(tp->board_part_number, "BCM57795");
13136                 else
13137                         goto nomatch;
13138         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13139                 strcpy(tp->board_part_number, "BCM95906");
13140         } else {
13141 nomatch:
13142                 strcpy(tp->board_part_number, "none");
13143         }
13144 }
13145
13146 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13147 {
13148         u32 val;
13149
13150         if (tg3_nvram_read(tp, offset, &val) ||
13151             (val & 0xfc000000) != 0x0c000000 ||
13152             tg3_nvram_read(tp, offset + 4, &val) ||
13153             val != 0)
13154                 return 0;
13155
13156         return 1;
13157 }
13158
13159 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13160 {
13161         u32 val, offset, start, ver_offset;
13162         int i, dst_off;
13163         bool newver = false;
13164
13165         if (tg3_nvram_read(tp, 0xc, &offset) ||
13166             tg3_nvram_read(tp, 0x4, &start))
13167                 return;
13168
13169         offset = tg3_nvram_logical_addr(tp, offset);
13170
13171         if (tg3_nvram_read(tp, offset, &val))
13172                 return;
13173
13174         if ((val & 0xfc000000) == 0x0c000000) {
13175                 if (tg3_nvram_read(tp, offset + 4, &val))
13176                         return;
13177
13178                 if (val == 0)
13179                         newver = true;
13180         }
13181
13182         dst_off = strlen(tp->fw_ver);
13183
13184         if (newver) {
13185                 if (TG3_VER_SIZE - dst_off < 16 ||
13186                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13187                         return;
13188
13189                 offset = offset + ver_offset - start;
13190                 for (i = 0; i < 16; i += 4) {
13191                         __be32 v;
13192                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13193                                 return;
13194
13195                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13196                 }
13197         } else {
13198                 u32 major, minor;
13199
13200                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13201                         return;
13202
13203                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13204                         TG3_NVM_BCVER_MAJSFT;
13205                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13206                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13207                          "v%d.%02d", major, minor);
13208         }
13209 }
13210
13211 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13212 {
13213         u32 val, major, minor;
13214
13215         /* Use native endian representation */
13216         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13217                 return;
13218
13219         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13220                 TG3_NVM_HWSB_CFG1_MAJSFT;
13221         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13222                 TG3_NVM_HWSB_CFG1_MINSFT;
13223
13224         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13225 }
13226
13227 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13228 {
13229         u32 offset, major, minor, build;
13230
13231         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13232
13233         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13234                 return;
13235
13236         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13237         case TG3_EEPROM_SB_REVISION_0:
13238                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13239                 break;
13240         case TG3_EEPROM_SB_REVISION_2:
13241                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13242                 break;
13243         case TG3_EEPROM_SB_REVISION_3:
13244                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13245                 break;
13246         case TG3_EEPROM_SB_REVISION_4:
13247                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13248                 break;
13249         case TG3_EEPROM_SB_REVISION_5:
13250                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13251                 break;
13252         case TG3_EEPROM_SB_REVISION_6:
13253                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13254                 break;
13255         default:
13256                 return;
13257         }
13258
13259         if (tg3_nvram_read(tp, offset, &val))
13260                 return;
13261
13262         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13263                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13264         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13265                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13266         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13267
13268         if (minor > 99 || build > 26)
13269                 return;
13270
13271         offset = strlen(tp->fw_ver);
13272         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13273                  " v%d.%02d", major, minor);
13274
13275         if (build > 0) {
13276                 offset = strlen(tp->fw_ver);
13277                 if (offset < TG3_VER_SIZE - 1)
13278                         tp->fw_ver[offset] = 'a' + build - 1;
13279         }
13280 }
13281
13282 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13283 {
13284         u32 val, offset, start;
13285         int i, vlen;
13286
13287         for (offset = TG3_NVM_DIR_START;
13288              offset < TG3_NVM_DIR_END;
13289              offset += TG3_NVM_DIRENT_SIZE) {
13290                 if (tg3_nvram_read(tp, offset, &val))
13291                         return;
13292
13293                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13294                         break;
13295         }
13296
13297         if (offset == TG3_NVM_DIR_END)
13298                 return;
13299
13300         if (!tg3_flag(tp, 5705_PLUS))
13301                 start = 0x08000000;
13302         else if (tg3_nvram_read(tp, offset - 4, &start))
13303                 return;
13304
13305         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13306             !tg3_fw_img_is_valid(tp, offset) ||
13307             tg3_nvram_read(tp, offset + 8, &val))
13308                 return;
13309
13310         offset += val - start;
13311
13312         vlen = strlen(tp->fw_ver);
13313
13314         tp->fw_ver[vlen++] = ',';
13315         tp->fw_ver[vlen++] = ' ';
13316
13317         for (i = 0; i < 4; i++) {
13318                 __be32 v;
13319                 if (tg3_nvram_read_be32(tp, offset, &v))
13320                         return;
13321
13322                 offset += sizeof(v);
13323
13324                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13325                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13326                         break;
13327                 }
13328
13329                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13330                 vlen += sizeof(v);
13331         }
13332 }
13333
13334 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13335 {
13336         int vlen;
13337         u32 apedata;
13338         char *fwtype;
13339
13340         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13341                 return;
13342
13343         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13344         if (apedata != APE_SEG_SIG_MAGIC)
13345                 return;
13346
13347         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13348         if (!(apedata & APE_FW_STATUS_READY))
13349                 return;
13350
13351         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13352
13353         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13354                 tg3_flag_set(tp, APE_HAS_NCSI);
13355                 fwtype = "NCSI";
13356         } else {
13357                 fwtype = "DASH";
13358         }
13359
13360         vlen = strlen(tp->fw_ver);
13361
13362         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13363                  fwtype,
13364                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13365                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13366                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13367                  (apedata & APE_FW_VERSION_BLDMSK));
13368 }
13369
13370 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13371 {
13372         u32 val;
13373         bool vpd_vers = false;
13374
13375         if (tp->fw_ver[0] != 0)
13376                 vpd_vers = true;
13377
13378         if (tg3_flag(tp, NO_NVRAM)) {
13379                 strcat(tp->fw_ver, "sb");
13380                 return;
13381         }
13382
13383         if (tg3_nvram_read(tp, 0, &val))
13384                 return;
13385
13386         if (val == TG3_EEPROM_MAGIC)
13387                 tg3_read_bc_ver(tp);
13388         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13389                 tg3_read_sb_ver(tp, val);
13390         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13391                 tg3_read_hwsb_ver(tp);
13392         else
13393                 return;
13394
13395         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13396                 goto done;
13397
13398         tg3_read_mgmtfw_ver(tp);
13399
13400 done:
13401         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13402 }
13403
13404 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13405
13406 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13407 {
13408         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13409                 return TG3_RX_RET_MAX_SIZE_5717;
13410         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13411                 return TG3_RX_RET_MAX_SIZE_5700;
13412         else
13413                 return TG3_RX_RET_MAX_SIZE_5705;
13414 }
13415
13416 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13417         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13418         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13419         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13420         { },
13421 };
13422
13423 static int __devinit tg3_get_invariants(struct tg3 *tp)
13424 {
13425         u32 misc_ctrl_reg;
13426         u32 pci_state_reg, grc_misc_cfg;
13427         u32 val;
13428         u16 pci_cmd;
13429         int err;
13430
13431         /* Force memory write invalidate off.  If we leave it on,
13432          * then on 5700_BX chips we have to enable a workaround.
13433          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13434          * to match the cacheline size.  The Broadcom driver have this
13435          * workaround but turns MWI off all the times so never uses
13436          * it.  This seems to suggest that the workaround is insufficient.
13437          */
13438         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13439         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13440         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13441
13442         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13443          * has the register indirect write enable bit set before
13444          * we try to access any of the MMIO registers.  It is also
13445          * critical that the PCI-X hw workaround situation is decided
13446          * before that as well.
13447          */
13448         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13449                               &misc_ctrl_reg);
13450
13451         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13452                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13453         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13454                 u32 prod_id_asic_rev;
13455
13456                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13457                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13458                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13459                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13460                         pci_read_config_dword(tp->pdev,
13461                                               TG3PCI_GEN2_PRODID_ASICREV,
13462                                               &prod_id_asic_rev);
13463                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13464                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13465                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13466                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13467                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13468                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13469                         pci_read_config_dword(tp->pdev,
13470                                               TG3PCI_GEN15_PRODID_ASICREV,
13471                                               &prod_id_asic_rev);
13472                 else
13473                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13474                                               &prod_id_asic_rev);
13475
13476                 tp->pci_chip_rev_id = prod_id_asic_rev;
13477         }
13478
13479         /* Wrong chip ID in 5752 A0. This code can be removed later
13480          * as A0 is not in production.
13481          */
13482         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13483                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13484
13485         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13486          * we need to disable memory and use config. cycles
13487          * only to access all registers. The 5702/03 chips
13488          * can mistakenly decode the special cycles from the
13489          * ICH chipsets as memory write cycles, causing corruption
13490          * of register and memory space. Only certain ICH bridges
13491          * will drive special cycles with non-zero data during the
13492          * address phase which can fall within the 5703's address
13493          * range. This is not an ICH bug as the PCI spec allows
13494          * non-zero address during special cycles. However, only
13495          * these ICH bridges are known to drive non-zero addresses
13496          * during special cycles.
13497          *
13498          * Since special cycles do not cross PCI bridges, we only
13499          * enable this workaround if the 5703 is on the secondary
13500          * bus of these ICH bridges.
13501          */
13502         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13503             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13504                 static struct tg3_dev_id {
13505                         u32     vendor;
13506                         u32     device;
13507                         u32     rev;
13508                 } ich_chipsets[] = {
13509                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13510                           PCI_ANY_ID },
13511                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13512                           PCI_ANY_ID },
13513                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13514                           0xa },
13515                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13516                           PCI_ANY_ID },
13517                         { },
13518                 };
13519                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13520                 struct pci_dev *bridge = NULL;
13521
13522                 while (pci_id->vendor != 0) {
13523                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13524                                                 bridge);
13525                         if (!bridge) {
13526                                 pci_id++;
13527                                 continue;
13528                         }
13529                         if (pci_id->rev != PCI_ANY_ID) {
13530                                 if (bridge->revision > pci_id->rev)
13531                                         continue;
13532                         }
13533                         if (bridge->subordinate &&
13534                             (bridge->subordinate->number ==
13535                              tp->pdev->bus->number)) {
13536                                 tg3_flag_set(tp, ICH_WORKAROUND);
13537                                 pci_dev_put(bridge);
13538                                 break;
13539                         }
13540                 }
13541         }
13542
13543         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13544                 static struct tg3_dev_id {
13545                         u32     vendor;
13546                         u32     device;
13547                 } bridge_chipsets[] = {
13548                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13549                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13550                         { },
13551                 };
13552                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13553                 struct pci_dev *bridge = NULL;
13554
13555                 while (pci_id->vendor != 0) {
13556                         bridge = pci_get_device(pci_id->vendor,
13557                                                 pci_id->device,
13558                                                 bridge);
13559                         if (!bridge) {
13560                                 pci_id++;
13561                                 continue;
13562                         }
13563                         if (bridge->subordinate &&
13564                             (bridge->subordinate->number <=
13565                              tp->pdev->bus->number) &&
13566                             (bridge->subordinate->subordinate >=
13567                              tp->pdev->bus->number)) {
13568                                 tg3_flag_set(tp, 5701_DMA_BUG);
13569                                 pci_dev_put(bridge);
13570                                 break;
13571                         }
13572                 }
13573         }
13574
13575         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13576          * DMA addresses > 40-bit. This bridge may have other additional
13577          * 57xx devices behind it in some 4-port NIC designs for example.
13578          * Any tg3 device found behind the bridge will also need the 40-bit
13579          * DMA workaround.
13580          */
13581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13582             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13583                 tg3_flag_set(tp, 5780_CLASS);
13584                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13585                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13586         } else {
13587                 struct pci_dev *bridge = NULL;
13588
13589                 do {
13590                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13591                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13592                                                 bridge);
13593                         if (bridge && bridge->subordinate &&
13594                             (bridge->subordinate->number <=
13595                              tp->pdev->bus->number) &&
13596                             (bridge->subordinate->subordinate >=
13597                              tp->pdev->bus->number)) {
13598                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13599                                 pci_dev_put(bridge);
13600                                 break;
13601                         }
13602                 } while (bridge);
13603         }
13604
13605         /* Initialize misc host control in PCI block. */
13606         tp->misc_host_ctrl |= (misc_ctrl_reg &
13607                                MISC_HOST_CTRL_CHIPREV);
13608         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13609                                tp->misc_host_ctrl);
13610
13611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13612             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13614             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13615                 tp->pdev_peer = tg3_find_peer(tp);
13616
13617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13618             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13619             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13620                 tg3_flag_set(tp, 5717_PLUS);
13621
13622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13623             tg3_flag(tp, 5717_PLUS))
13624                 tg3_flag_set(tp, 57765_PLUS);
13625
13626         /* Intentionally exclude ASIC_REV_5906 */
13627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13628             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13629             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13630             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13632             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13633             tg3_flag(tp, 57765_PLUS))
13634                 tg3_flag_set(tp, 5755_PLUS);
13635
13636         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13637             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13638             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13639             tg3_flag(tp, 5755_PLUS) ||
13640             tg3_flag(tp, 5780_CLASS))
13641                 tg3_flag_set(tp, 5750_PLUS);
13642
13643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13644             tg3_flag(tp, 5750_PLUS))
13645                 tg3_flag_set(tp, 5705_PLUS);
13646
13647         /* Determine TSO capabilities */
13648         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13649                 ; /* Do nothing. HW bug. */
13650         else if (tg3_flag(tp, 57765_PLUS))
13651                 tg3_flag_set(tp, HW_TSO_3);
13652         else if (tg3_flag(tp, 5755_PLUS) ||
13653                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13654                 tg3_flag_set(tp, HW_TSO_2);
13655         else if (tg3_flag(tp, 5750_PLUS)) {
13656                 tg3_flag_set(tp, HW_TSO_1);
13657                 tg3_flag_set(tp, TSO_BUG);
13658                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13659                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13660                         tg3_flag_clear(tp, TSO_BUG);
13661         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13662                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13663                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13664                         tg3_flag_set(tp, TSO_BUG);
13665                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13666                         tp->fw_needed = FIRMWARE_TG3TSO5;
13667                 else
13668                         tp->fw_needed = FIRMWARE_TG3TSO;
13669         }
13670
13671         /* Selectively allow TSO based on operating conditions */
13672         if (tg3_flag(tp, HW_TSO_1) ||
13673             tg3_flag(tp, HW_TSO_2) ||
13674             tg3_flag(tp, HW_TSO_3) ||
13675             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13676                 tg3_flag_set(tp, TSO_CAPABLE);
13677         else {
13678                 tg3_flag_clear(tp, TSO_CAPABLE);
13679                 tg3_flag_clear(tp, TSO_BUG);
13680                 tp->fw_needed = NULL;
13681         }
13682
13683         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13684                 tp->fw_needed = FIRMWARE_TG3;
13685
13686         tp->irq_max = 1;
13687
13688         if (tg3_flag(tp, 5750_PLUS)) {
13689                 tg3_flag_set(tp, SUPPORT_MSI);
13690                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13691                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13692                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13693                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13694                      tp->pdev_peer == tp->pdev))
13695                         tg3_flag_clear(tp, SUPPORT_MSI);
13696
13697                 if (tg3_flag(tp, 5755_PLUS) ||
13698                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13699                         tg3_flag_set(tp, 1SHOT_MSI);
13700                 }
13701
13702                 if (tg3_flag(tp, 57765_PLUS)) {
13703                         tg3_flag_set(tp, SUPPORT_MSIX);
13704                         tp->irq_max = TG3_IRQ_MAX_VECS;
13705                 }
13706         }
13707
13708         /* All chips can get confused if TX buffers
13709          * straddle the 4GB address boundary.
13710          */
13711         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13712
13713         if (tg3_flag(tp, 5755_PLUS))
13714                 tg3_flag_set(tp, SHORT_DMA_BUG);
13715         else
13716                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13717
13718         if (tg3_flag(tp, 5717_PLUS))
13719                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13720
13721         if (tg3_flag(tp, 57765_PLUS) &&
13722             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13723                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13724
13725         if (!tg3_flag(tp, 5705_PLUS) ||
13726             tg3_flag(tp, 5780_CLASS) ||
13727             tg3_flag(tp, USE_JUMBO_BDFLAG))
13728                 tg3_flag_set(tp, JUMBO_CAPABLE);
13729
13730         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13731                               &pci_state_reg);
13732
13733         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13734         if (tp->pcie_cap != 0) {
13735                 u16 lnkctl;
13736
13737                 tg3_flag_set(tp, PCI_EXPRESS);
13738
13739                 tp->pcie_readrq = 4096;
13740                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13741                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13742                         tp->pcie_readrq = 2048;
13743
13744                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13745
13746                 pci_read_config_word(tp->pdev,
13747                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13748                                      &lnkctl);
13749                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13750                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13751                             ASIC_REV_5906) {
13752                                 tg3_flag_clear(tp, HW_TSO_2);
13753                                 tg3_flag_clear(tp, TSO_CAPABLE);
13754                         }
13755                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13756                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13757                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13758                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13759                                 tg3_flag_set(tp, CLKREQ_BUG);
13760                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13761                         tg3_flag_set(tp, L1PLLPD_EN);
13762                 }
13763         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13764                 tg3_flag_set(tp, PCI_EXPRESS);
13765         } else if (!tg3_flag(tp, 5705_PLUS) ||
13766                    tg3_flag(tp, 5780_CLASS)) {
13767                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13768                 if (!tp->pcix_cap) {
13769                         dev_err(&tp->pdev->dev,
13770                                 "Cannot find PCI-X capability, aborting\n");
13771                         return -EIO;
13772                 }
13773
13774                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13775                         tg3_flag_set(tp, PCIX_MODE);
13776         }
13777
13778         /* If we have an AMD 762 or VIA K8T800 chipset, write
13779          * reordering to the mailbox registers done by the host
13780          * controller can cause major troubles.  We read back from
13781          * every mailbox register write to force the writes to be
13782          * posted to the chip in order.
13783          */
13784         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13785             !tg3_flag(tp, PCI_EXPRESS))
13786                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13787
13788         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13789                              &tp->pci_cacheline_sz);
13790         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13791                              &tp->pci_lat_timer);
13792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13793             tp->pci_lat_timer < 64) {
13794                 tp->pci_lat_timer = 64;
13795                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13796                                       tp->pci_lat_timer);
13797         }
13798
13799         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13800                 /* 5700 BX chips need to have their TX producer index
13801                  * mailboxes written twice to workaround a bug.
13802                  */
13803                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13804
13805                 /* If we are in PCI-X mode, enable register write workaround.
13806                  *
13807                  * The workaround is to use indirect register accesses
13808                  * for all chip writes not to mailbox registers.
13809                  */
13810                 if (tg3_flag(tp, PCIX_MODE)) {
13811                         u32 pm_reg;
13812
13813                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13814
13815                         /* The chip can have it's power management PCI config
13816                          * space registers clobbered due to this bug.
13817                          * So explicitly force the chip into D0 here.
13818                          */
13819                         pci_read_config_dword(tp->pdev,
13820                                               tp->pm_cap + PCI_PM_CTRL,
13821                                               &pm_reg);
13822                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13823                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13824                         pci_write_config_dword(tp->pdev,
13825                                                tp->pm_cap + PCI_PM_CTRL,
13826                                                pm_reg);
13827
13828                         /* Also, force SERR#/PERR# in PCI command. */
13829                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13830                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13831                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13832                 }
13833         }
13834
13835         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13836                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13837         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13838                 tg3_flag_set(tp, PCI_32BIT);
13839
13840         /* Chip-specific fixup from Broadcom driver */
13841         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13842             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13843                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13844                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13845         }
13846
13847         /* Default fast path register access methods */
13848         tp->read32 = tg3_read32;
13849         tp->write32 = tg3_write32;
13850         tp->read32_mbox = tg3_read32;
13851         tp->write32_mbox = tg3_write32;
13852         tp->write32_tx_mbox = tg3_write32;
13853         tp->write32_rx_mbox = tg3_write32;
13854
13855         /* Various workaround register access methods */
13856         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13857                 tp->write32 = tg3_write_indirect_reg32;
13858         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13859                  (tg3_flag(tp, PCI_EXPRESS) &&
13860                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13861                 /*
13862                  * Back to back register writes can cause problems on these
13863                  * chips, the workaround is to read back all reg writes
13864                  * except those to mailbox regs.
13865                  *
13866                  * See tg3_write_indirect_reg32().
13867                  */
13868                 tp->write32 = tg3_write_flush_reg32;
13869         }
13870
13871         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13872                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13873                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13874                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13875         }
13876
13877         if (tg3_flag(tp, ICH_WORKAROUND)) {
13878                 tp->read32 = tg3_read_indirect_reg32;
13879                 tp->write32 = tg3_write_indirect_reg32;
13880                 tp->read32_mbox = tg3_read_indirect_mbox;
13881                 tp->write32_mbox = tg3_write_indirect_mbox;
13882                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13883                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13884
13885                 iounmap(tp->regs);
13886                 tp->regs = NULL;
13887
13888                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13889                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13890                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13891         }
13892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13893                 tp->read32_mbox = tg3_read32_mbox_5906;
13894                 tp->write32_mbox = tg3_write32_mbox_5906;
13895                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13896                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13897         }
13898
13899         if (tp->write32 == tg3_write_indirect_reg32 ||
13900             (tg3_flag(tp, PCIX_MODE) &&
13901              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13902               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13903                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13904
13905         /* Get eeprom hw config before calling tg3_set_power_state().
13906          * In particular, the TG3_FLAG_IS_NIC flag must be
13907          * determined before calling tg3_set_power_state() so that
13908          * we know whether or not to switch out of Vaux power.
13909          * When the flag is set, it means that GPIO1 is used for eeprom
13910          * write protect and also implies that it is a LOM where GPIOs
13911          * are not used to switch power.
13912          */
13913         tg3_get_eeprom_hw_cfg(tp);
13914
13915         if (tg3_flag(tp, ENABLE_APE)) {
13916                 /* Allow reads and writes to the
13917                  * APE register and memory space.
13918                  */
13919                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13920                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13921                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13922                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13923                                        pci_state_reg);
13924         }
13925
13926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13928             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13929             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13930             tg3_flag(tp, 57765_PLUS))
13931                 tg3_flag_set(tp, CPMU_PRESENT);
13932
13933         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13934          * GPIO1 driven high will bring 5700's external PHY out of reset.
13935          * It is also used as eeprom write protect on LOMs.
13936          */
13937         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13939             tg3_flag(tp, EEPROM_WRITE_PROT))
13940                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13941                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13942         /* Unused GPIO3 must be driven as output on 5752 because there
13943          * are no pull-up resistors on unused GPIO pins.
13944          */
13945         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13946                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13947
13948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13951                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13952
13953         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13954             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13955                 /* Turn off the debug UART. */
13956                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13957                 if (tg3_flag(tp, IS_NIC))
13958                         /* Keep VMain power. */
13959                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13960                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13961         }
13962
13963         /* Force the chip into D0. */
13964         err = tg3_power_up(tp);
13965         if (err) {
13966                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13967                 return err;
13968         }
13969
13970         /* Derive initial jumbo mode from MTU assigned in
13971          * ether_setup() via the alloc_etherdev() call
13972          */
13973         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13974                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13975
13976         /* Determine WakeOnLan speed to use. */
13977         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13978             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13979             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13980             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13981                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13982         } else {
13983                 tg3_flag_set(tp, WOL_SPEED_100MB);
13984         }
13985
13986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13987                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13988
13989         /* A few boards don't want Ethernet@WireSpeed phy feature */
13990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13991             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13992              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13993              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13994             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13995             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13996                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13997
13998         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13999             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14000                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14001         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14002                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14003
14004         if (tg3_flag(tp, 5705_PLUS) &&
14005             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14006             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14007             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14008             !tg3_flag(tp, 57765_PLUS)) {
14009                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14010                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14011                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14012                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14013                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14014                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14015                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14016                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14017                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14018                 } else
14019                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14020         }
14021
14022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14023             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14024                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14025                 if (tp->phy_otp == 0)
14026                         tp->phy_otp = TG3_OTP_DEFAULT;
14027         }
14028
14029         if (tg3_flag(tp, CPMU_PRESENT))
14030                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14031         else
14032                 tp->mi_mode = MAC_MI_MODE_BASE;
14033
14034         tp->coalesce_mode = 0;
14035         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14036             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14037                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14038
14039         /* Set these bits to enable statistics workaround. */
14040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14041             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14042             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14043                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14044                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14045         }
14046
14047         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14048             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14049                 tg3_flag_set(tp, USE_PHYLIB);
14050
14051         err = tg3_mdio_init(tp);
14052         if (err)
14053                 return err;
14054
14055         /* Initialize data/descriptor byte/word swapping. */
14056         val = tr32(GRC_MODE);
14057         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14058                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14059                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14060                         GRC_MODE_B2HRX_ENABLE |
14061                         GRC_MODE_HTX2B_ENABLE |
14062                         GRC_MODE_HOST_STACKUP);
14063         else
14064                 val &= GRC_MODE_HOST_STACKUP;
14065
14066         tw32(GRC_MODE, val | tp->grc_mode);
14067
14068         tg3_switch_clocks(tp);
14069
14070         /* Clear this out for sanity. */
14071         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14072
14073         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14074                               &pci_state_reg);
14075         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14076             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14077                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14078
14079                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14080                     chiprevid == CHIPREV_ID_5701_B0 ||
14081                     chiprevid == CHIPREV_ID_5701_B2 ||
14082                     chiprevid == CHIPREV_ID_5701_B5) {
14083                         void __iomem *sram_base;
14084
14085                         /* Write some dummy words into the SRAM status block
14086                          * area, see if it reads back correctly.  If the return
14087                          * value is bad, force enable the PCIX workaround.
14088                          */
14089                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14090
14091                         writel(0x00000000, sram_base);
14092                         writel(0x00000000, sram_base + 4);
14093                         writel(0xffffffff, sram_base + 4);
14094                         if (readl(sram_base) != 0x00000000)
14095                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14096                 }
14097         }
14098
14099         udelay(50);
14100         tg3_nvram_init(tp);
14101
14102         grc_misc_cfg = tr32(GRC_MISC_CFG);
14103         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14104
14105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14106             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14107              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14108                 tg3_flag_set(tp, IS_5788);
14109
14110         if (!tg3_flag(tp, IS_5788) &&
14111             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14112                 tg3_flag_set(tp, TAGGED_STATUS);
14113         if (tg3_flag(tp, TAGGED_STATUS)) {
14114                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14115                                       HOSTCC_MODE_CLRTICK_TXBD);
14116
14117                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14118                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14119                                        tp->misc_host_ctrl);
14120         }
14121
14122         /* Preserve the APE MAC_MODE bits */
14123         if (tg3_flag(tp, ENABLE_APE))
14124                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14125         else
14126                 tp->mac_mode = TG3_DEF_MAC_MODE;
14127
14128         /* these are limited to 10/100 only */
14129         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14130              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14131             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14132              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14133              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14134               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14135               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14136             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14137              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14138               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14139               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14140             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14141             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14142             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14143             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14144                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14145
14146         err = tg3_phy_probe(tp);
14147         if (err) {
14148                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14149                 /* ... but do not return immediately ... */
14150                 tg3_mdio_fini(tp);
14151         }
14152
14153         tg3_read_vpd(tp);
14154         tg3_read_fw_ver(tp);
14155
14156         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14157                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14158         } else {
14159                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14160                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14161                 else
14162                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14163         }
14164
14165         /* 5700 {AX,BX} chips have a broken status block link
14166          * change bit implementation, so we must use the
14167          * status register in those cases.
14168          */
14169         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14170                 tg3_flag_set(tp, USE_LINKCHG_REG);
14171         else
14172                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14173
14174         /* The led_ctrl is set during tg3_phy_probe, here we might
14175          * have to force the link status polling mechanism based
14176          * upon subsystem IDs.
14177          */
14178         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14180             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14181                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14182                 tg3_flag_set(tp, USE_LINKCHG_REG);
14183         }
14184
14185         /* For all SERDES we poll the MAC status register. */
14186         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14187                 tg3_flag_set(tp, POLL_SERDES);
14188         else
14189                 tg3_flag_clear(tp, POLL_SERDES);
14190
14191         tp->rx_offset = NET_IP_ALIGN;
14192         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14194             tg3_flag(tp, PCIX_MODE)) {
14195                 tp->rx_offset = 0;
14196 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14197                 tp->rx_copy_thresh = ~(u16)0;
14198 #endif
14199         }
14200
14201         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14202         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14203         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14204
14205         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14206
14207         /* Increment the rx prod index on the rx std ring by at most
14208          * 8 for these chips to workaround hw errata.
14209          */
14210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14212             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14213                 tp->rx_std_max_post = 8;
14214
14215         if (tg3_flag(tp, ASPM_WORKAROUND))
14216                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14217                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14218
14219         return err;
14220 }
14221
14222 #ifdef CONFIG_SPARC
14223 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14224 {
14225         struct net_device *dev = tp->dev;
14226         struct pci_dev *pdev = tp->pdev;
14227         struct device_node *dp = pci_device_to_OF_node(pdev);
14228         const unsigned char *addr;
14229         int len;
14230
14231         addr = of_get_property(dp, "local-mac-address", &len);
14232         if (addr && len == 6) {
14233                 memcpy(dev->dev_addr, addr, 6);
14234                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14235                 return 0;
14236         }
14237         return -ENODEV;
14238 }
14239
14240 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14241 {
14242         struct net_device *dev = tp->dev;
14243
14244         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14245         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14246         return 0;
14247 }
14248 #endif
14249
14250 static int __devinit tg3_get_device_address(struct tg3 *tp)
14251 {
14252         struct net_device *dev = tp->dev;
14253         u32 hi, lo, mac_offset;
14254         int addr_ok = 0;
14255
14256 #ifdef CONFIG_SPARC
14257         if (!tg3_get_macaddr_sparc(tp))
14258                 return 0;
14259 #endif
14260
14261         mac_offset = 0x7c;
14262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14263             tg3_flag(tp, 5780_CLASS)) {
14264                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14265                         mac_offset = 0xcc;
14266                 if (tg3_nvram_lock(tp))
14267                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14268                 else
14269                         tg3_nvram_unlock(tp);
14270         } else if (tg3_flag(tp, 5717_PLUS)) {
14271                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14272                         mac_offset = 0xcc;
14273                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14274                         mac_offset += 0x18c;
14275         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14276                 mac_offset = 0x10;
14277
14278         /* First try to get it from MAC address mailbox. */
14279         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14280         if ((hi >> 16) == 0x484b) {
14281                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14282                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14283
14284                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14285                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14286                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14287                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14288                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14289
14290                 /* Some old bootcode may report a 0 MAC address in SRAM */
14291                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14292         }
14293         if (!addr_ok) {
14294                 /* Next, try NVRAM. */
14295                 if (!tg3_flag(tp, NO_NVRAM) &&
14296                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14297                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14298                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14299                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14300                 }
14301                 /* Finally just fetch it out of the MAC control regs. */
14302                 else {
14303                         hi = tr32(MAC_ADDR_0_HIGH);
14304                         lo = tr32(MAC_ADDR_0_LOW);
14305
14306                         dev->dev_addr[5] = lo & 0xff;
14307                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14308                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14309                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14310                         dev->dev_addr[1] = hi & 0xff;
14311                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14312                 }
14313         }
14314
14315         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14316 #ifdef CONFIG_SPARC
14317                 if (!tg3_get_default_macaddr_sparc(tp))
14318                         return 0;
14319 #endif
14320                 return -EINVAL;
14321         }
14322         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14323         return 0;
14324 }
14325
14326 #define BOUNDARY_SINGLE_CACHELINE       1
14327 #define BOUNDARY_MULTI_CACHELINE        2
14328
14329 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14330 {
14331         int cacheline_size;
14332         u8 byte;
14333         int goal;
14334
14335         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14336         if (byte == 0)
14337                 cacheline_size = 1024;
14338         else
14339                 cacheline_size = (int) byte * 4;
14340
14341         /* On 5703 and later chips, the boundary bits have no
14342          * effect.
14343          */
14344         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14345             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14346             !tg3_flag(tp, PCI_EXPRESS))
14347                 goto out;
14348
14349 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14350         goal = BOUNDARY_MULTI_CACHELINE;
14351 #else
14352 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14353         goal = BOUNDARY_SINGLE_CACHELINE;
14354 #else
14355         goal = 0;
14356 #endif
14357 #endif
14358
14359         if (tg3_flag(tp, 57765_PLUS)) {
14360                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14361                 goto out;
14362         }
14363
14364         if (!goal)
14365                 goto out;
14366
14367         /* PCI controllers on most RISC systems tend to disconnect
14368          * when a device tries to burst across a cache-line boundary.
14369          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14370          *
14371          * Unfortunately, for PCI-E there are only limited
14372          * write-side controls for this, and thus for reads
14373          * we will still get the disconnects.  We'll also waste
14374          * these PCI cycles for both read and write for chips
14375          * other than 5700 and 5701 which do not implement the
14376          * boundary bits.
14377          */
14378         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14379                 switch (cacheline_size) {
14380                 case 16:
14381                 case 32:
14382                 case 64:
14383                 case 128:
14384                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14385                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14386                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14387                         } else {
14388                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14389                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14390                         }
14391                         break;
14392
14393                 case 256:
14394                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14395                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14396                         break;
14397
14398                 default:
14399                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14400                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14401                         break;
14402                 }
14403         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14404                 switch (cacheline_size) {
14405                 case 16:
14406                 case 32:
14407                 case 64:
14408                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14409                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14410                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14411                                 break;
14412                         }
14413                         /* fallthrough */
14414                 case 128:
14415                 default:
14416                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14417                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14418                         break;
14419                 }
14420         } else {
14421                 switch (cacheline_size) {
14422                 case 16:
14423                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14424                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14425                                         DMA_RWCTRL_WRITE_BNDRY_16);
14426                                 break;
14427                         }
14428                         /* fallthrough */
14429                 case 32:
14430                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14431                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14432                                         DMA_RWCTRL_WRITE_BNDRY_32);
14433                                 break;
14434                         }
14435                         /* fallthrough */
14436                 case 64:
14437                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14438                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14439                                         DMA_RWCTRL_WRITE_BNDRY_64);
14440                                 break;
14441                         }
14442                         /* fallthrough */
14443                 case 128:
14444                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14445                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14446                                         DMA_RWCTRL_WRITE_BNDRY_128);
14447                                 break;
14448                         }
14449                         /* fallthrough */
14450                 case 256:
14451                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14452                                 DMA_RWCTRL_WRITE_BNDRY_256);
14453                         break;
14454                 case 512:
14455                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14456                                 DMA_RWCTRL_WRITE_BNDRY_512);
14457                         break;
14458                 case 1024:
14459                 default:
14460                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14461                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14462                         break;
14463                 }
14464         }
14465
14466 out:
14467         return val;
14468 }
14469
14470 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14471 {
14472         struct tg3_internal_buffer_desc test_desc;
14473         u32 sram_dma_descs;
14474         int i, ret;
14475
14476         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14477
14478         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14479         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14480         tw32(RDMAC_STATUS, 0);
14481         tw32(WDMAC_STATUS, 0);
14482
14483         tw32(BUFMGR_MODE, 0);
14484         tw32(FTQ_RESET, 0);
14485
14486         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14487         test_desc.addr_lo = buf_dma & 0xffffffff;
14488         test_desc.nic_mbuf = 0x00002100;
14489         test_desc.len = size;
14490
14491         /*
14492          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14493          * the *second* time the tg3 driver was getting loaded after an
14494          * initial scan.
14495          *
14496          * Broadcom tells me:
14497          *   ...the DMA engine is connected to the GRC block and a DMA
14498          *   reset may affect the GRC block in some unpredictable way...
14499          *   The behavior of resets to individual blocks has not been tested.
14500          *
14501          * Broadcom noted the GRC reset will also reset all sub-components.
14502          */
14503         if (to_device) {
14504                 test_desc.cqid_sqid = (13 << 8) | 2;
14505
14506                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14507                 udelay(40);
14508         } else {
14509                 test_desc.cqid_sqid = (16 << 8) | 7;
14510
14511                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14512                 udelay(40);
14513         }
14514         test_desc.flags = 0x00000005;
14515
14516         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14517                 u32 val;
14518
14519                 val = *(((u32 *)&test_desc) + i);
14520                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14521                                        sram_dma_descs + (i * sizeof(u32)));
14522                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14523         }
14524         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14525
14526         if (to_device)
14527                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14528         else
14529                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14530
14531         ret = -ENODEV;
14532         for (i = 0; i < 40; i++) {
14533                 u32 val;
14534
14535                 if (to_device)
14536                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14537                 else
14538                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14539                 if ((val & 0xffff) == sram_dma_descs) {
14540                         ret = 0;
14541                         break;
14542                 }
14543
14544                 udelay(100);
14545         }
14546
14547         return ret;
14548 }
14549
14550 #define TEST_BUFFER_SIZE        0x2000
14551
14552 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14553         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14554         { },
14555 };
14556
14557 static int __devinit tg3_test_dma(struct tg3 *tp)
14558 {
14559         dma_addr_t buf_dma;
14560         u32 *buf, saved_dma_rwctrl;
14561         int ret = 0;
14562
14563         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14564                                  &buf_dma, GFP_KERNEL);
14565         if (!buf) {
14566                 ret = -ENOMEM;
14567                 goto out_nofree;
14568         }
14569
14570         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14571                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14572
14573         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14574
14575         if (tg3_flag(tp, 57765_PLUS))
14576                 goto out;
14577
14578         if (tg3_flag(tp, PCI_EXPRESS)) {
14579                 /* DMA read watermark not used on PCIE */
14580                 tp->dma_rwctrl |= 0x00180000;
14581         } else if (!tg3_flag(tp, PCIX_MODE)) {
14582                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14583                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14584                         tp->dma_rwctrl |= 0x003f0000;
14585                 else
14586                         tp->dma_rwctrl |= 0x003f000f;
14587         } else {
14588                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14589                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14590                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14591                         u32 read_water = 0x7;
14592
14593                         /* If the 5704 is behind the EPB bridge, we can
14594                          * do the less restrictive ONE_DMA workaround for
14595                          * better performance.
14596                          */
14597                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14598                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14599                                 tp->dma_rwctrl |= 0x8000;
14600                         else if (ccval == 0x6 || ccval == 0x7)
14601                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14602
14603                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14604                                 read_water = 4;
14605                         /* Set bit 23 to enable PCIX hw bug fix */
14606                         tp->dma_rwctrl |=
14607                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14608                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14609                                 (1 << 23);
14610                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14611                         /* 5780 always in PCIX mode */
14612                         tp->dma_rwctrl |= 0x00144000;
14613                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14614                         /* 5714 always in PCIX mode */
14615                         tp->dma_rwctrl |= 0x00148000;
14616                 } else {
14617                         tp->dma_rwctrl |= 0x001b000f;
14618                 }
14619         }
14620
14621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14622             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14623                 tp->dma_rwctrl &= 0xfffffff0;
14624
14625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14626             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14627                 /* Remove this if it causes problems for some boards. */
14628                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14629
14630                 /* On 5700/5701 chips, we need to set this bit.
14631                  * Otherwise the chip will issue cacheline transactions
14632                  * to streamable DMA memory with not all the byte
14633                  * enables turned on.  This is an error on several
14634                  * RISC PCI controllers, in particular sparc64.
14635                  *
14636                  * On 5703/5704 chips, this bit has been reassigned
14637                  * a different meaning.  In particular, it is used
14638                  * on those chips to enable a PCI-X workaround.
14639                  */
14640                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14641         }
14642
14643         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14644
14645 #if 0
14646         /* Unneeded, already done by tg3_get_invariants.  */
14647         tg3_switch_clocks(tp);
14648 #endif
14649
14650         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14651             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14652                 goto out;
14653
14654         /* It is best to perform DMA test with maximum write burst size
14655          * to expose the 5700/5701 write DMA bug.
14656          */
14657         saved_dma_rwctrl = tp->dma_rwctrl;
14658         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14659         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14660
14661         while (1) {
14662                 u32 *p = buf, i;
14663
14664                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14665                         p[i] = i;
14666
14667                 /* Send the buffer to the chip. */
14668                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14669                 if (ret) {
14670                         dev_err(&tp->pdev->dev,
14671                                 "%s: Buffer write failed. err = %d\n",
14672                                 __func__, ret);
14673                         break;
14674                 }
14675
14676 #if 0
14677                 /* validate data reached card RAM correctly. */
14678                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14679                         u32 val;
14680                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14681                         if (le32_to_cpu(val) != p[i]) {
14682                                 dev_err(&tp->pdev->dev,
14683                                         "%s: Buffer corrupted on device! "
14684                                         "(%d != %d)\n", __func__, val, i);
14685                                 /* ret = -ENODEV here? */
14686                         }
14687                         p[i] = 0;
14688                 }
14689 #endif
14690                 /* Now read it back. */
14691                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14692                 if (ret) {
14693                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14694                                 "err = %d\n", __func__, ret);
14695                         break;
14696                 }
14697
14698                 /* Verify it. */
14699                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14700                         if (p[i] == i)
14701                                 continue;
14702
14703                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14704                             DMA_RWCTRL_WRITE_BNDRY_16) {
14705                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14706                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14707                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14708                                 break;
14709                         } else {
14710                                 dev_err(&tp->pdev->dev,
14711                                         "%s: Buffer corrupted on read back! "
14712                                         "(%d != %d)\n", __func__, p[i], i);
14713                                 ret = -ENODEV;
14714                                 goto out;
14715                         }
14716                 }
14717
14718                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14719                         /* Success. */
14720                         ret = 0;
14721                         break;
14722                 }
14723         }
14724         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14725             DMA_RWCTRL_WRITE_BNDRY_16) {
14726                 /* DMA test passed without adjusting DMA boundary,
14727                  * now look for chipsets that are known to expose the
14728                  * DMA bug without failing the test.
14729                  */
14730                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14731                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14732                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14733                 } else {
14734                         /* Safe to use the calculated DMA boundary. */
14735                         tp->dma_rwctrl = saved_dma_rwctrl;
14736                 }
14737
14738                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14739         }
14740
14741 out:
14742         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14743 out_nofree:
14744         return ret;
14745 }
14746
14747 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14748 {
14749         if (tg3_flag(tp, 57765_PLUS)) {
14750                 tp->bufmgr_config.mbuf_read_dma_low_water =
14751                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14752                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14753                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14754                 tp->bufmgr_config.mbuf_high_water =
14755                         DEFAULT_MB_HIGH_WATER_57765;
14756
14757                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14758                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14759                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14760                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14761                 tp->bufmgr_config.mbuf_high_water_jumbo =
14762                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14763         } else if (tg3_flag(tp, 5705_PLUS)) {
14764                 tp->bufmgr_config.mbuf_read_dma_low_water =
14765                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14766                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14767                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14768                 tp->bufmgr_config.mbuf_high_water =
14769                         DEFAULT_MB_HIGH_WATER_5705;
14770                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14771                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14772                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14773                         tp->bufmgr_config.mbuf_high_water =
14774                                 DEFAULT_MB_HIGH_WATER_5906;
14775                 }
14776
14777                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14778                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14779                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14780                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14781                 tp->bufmgr_config.mbuf_high_water_jumbo =
14782                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14783         } else {
14784                 tp->bufmgr_config.mbuf_read_dma_low_water =
14785                         DEFAULT_MB_RDMA_LOW_WATER;
14786                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14787                         DEFAULT_MB_MACRX_LOW_WATER;
14788                 tp->bufmgr_config.mbuf_high_water =
14789                         DEFAULT_MB_HIGH_WATER;
14790
14791                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14792                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14793                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14794                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14795                 tp->bufmgr_config.mbuf_high_water_jumbo =
14796                         DEFAULT_MB_HIGH_WATER_JUMBO;
14797         }
14798
14799         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14800         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14801 }
14802
14803 static char * __devinit tg3_phy_string(struct tg3 *tp)
14804 {
14805         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14806         case TG3_PHY_ID_BCM5400:        return "5400";
14807         case TG3_PHY_ID_BCM5401:        return "5401";
14808         case TG3_PHY_ID_BCM5411:        return "5411";
14809         case TG3_PHY_ID_BCM5701:        return "5701";
14810         case TG3_PHY_ID_BCM5703:        return "5703";
14811         case TG3_PHY_ID_BCM5704:        return "5704";
14812         case TG3_PHY_ID_BCM5705:        return "5705";
14813         case TG3_PHY_ID_BCM5750:        return "5750";
14814         case TG3_PHY_ID_BCM5752:        return "5752";
14815         case TG3_PHY_ID_BCM5714:        return "5714";
14816         case TG3_PHY_ID_BCM5780:        return "5780";
14817         case TG3_PHY_ID_BCM5755:        return "5755";
14818         case TG3_PHY_ID_BCM5787:        return "5787";
14819         case TG3_PHY_ID_BCM5784:        return "5784";
14820         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14821         case TG3_PHY_ID_BCM5906:        return "5906";
14822         case TG3_PHY_ID_BCM5761:        return "5761";
14823         case TG3_PHY_ID_BCM5718C:       return "5718C";
14824         case TG3_PHY_ID_BCM5718S:       return "5718S";
14825         case TG3_PHY_ID_BCM57765:       return "57765";
14826         case TG3_PHY_ID_BCM5719C:       return "5719C";
14827         case TG3_PHY_ID_BCM5720C:       return "5720C";
14828         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14829         case 0:                 return "serdes";
14830         default:                return "unknown";
14831         }
14832 }
14833
14834 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14835 {
14836         if (tg3_flag(tp, PCI_EXPRESS)) {
14837                 strcpy(str, "PCI Express");
14838                 return str;
14839         } else if (tg3_flag(tp, PCIX_MODE)) {
14840                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14841
14842                 strcpy(str, "PCIX:");
14843
14844                 if ((clock_ctrl == 7) ||
14845                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14846                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14847                         strcat(str, "133MHz");
14848                 else if (clock_ctrl == 0)
14849                         strcat(str, "33MHz");
14850                 else if (clock_ctrl == 2)
14851                         strcat(str, "50MHz");
14852                 else if (clock_ctrl == 4)
14853                         strcat(str, "66MHz");
14854                 else if (clock_ctrl == 6)
14855                         strcat(str, "100MHz");
14856         } else {
14857                 strcpy(str, "PCI:");
14858                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14859                         strcat(str, "66MHz");
14860                 else
14861                         strcat(str, "33MHz");
14862         }
14863         if (tg3_flag(tp, PCI_32BIT))
14864                 strcat(str, ":32-bit");
14865         else
14866                 strcat(str, ":64-bit");
14867         return str;
14868 }
14869
14870 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14871 {
14872         struct pci_dev *peer;
14873         unsigned int func, devnr = tp->pdev->devfn & ~7;
14874
14875         for (func = 0; func < 8; func++) {
14876                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14877                 if (peer && peer != tp->pdev)
14878                         break;
14879                 pci_dev_put(peer);
14880         }
14881         /* 5704 can be configured in single-port mode, set peer to
14882          * tp->pdev in that case.
14883          */
14884         if (!peer) {
14885                 peer = tp->pdev;
14886                 return peer;
14887         }
14888
14889         /*
14890          * We don't need to keep the refcount elevated; there's no way
14891          * to remove one half of this device without removing the other
14892          */
14893         pci_dev_put(peer);
14894
14895         return peer;
14896 }
14897
14898 static void __devinit tg3_init_coal(struct tg3 *tp)
14899 {
14900         struct ethtool_coalesce *ec = &tp->coal;
14901
14902         memset(ec, 0, sizeof(*ec));
14903         ec->cmd = ETHTOOL_GCOALESCE;
14904         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14905         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14906         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14907         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14908         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14909         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14910         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14911         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14912         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14913
14914         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14915                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14916                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14917                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14918                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14919                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14920         }
14921
14922         if (tg3_flag(tp, 5705_PLUS)) {
14923                 ec->rx_coalesce_usecs_irq = 0;
14924                 ec->tx_coalesce_usecs_irq = 0;
14925                 ec->stats_block_coalesce_usecs = 0;
14926         }
14927 }
14928
14929 static const struct net_device_ops tg3_netdev_ops = {
14930         .ndo_open               = tg3_open,
14931         .ndo_stop               = tg3_close,
14932         .ndo_start_xmit         = tg3_start_xmit,
14933         .ndo_get_stats64        = tg3_get_stats64,
14934         .ndo_validate_addr      = eth_validate_addr,
14935         .ndo_set_multicast_list = tg3_set_rx_mode,
14936         .ndo_set_mac_address    = tg3_set_mac_addr,
14937         .ndo_do_ioctl           = tg3_ioctl,
14938         .ndo_tx_timeout         = tg3_tx_timeout,
14939         .ndo_change_mtu         = tg3_change_mtu,
14940         .ndo_fix_features       = tg3_fix_features,
14941         .ndo_set_features       = tg3_set_features,
14942 #ifdef CONFIG_NET_POLL_CONTROLLER
14943         .ndo_poll_controller    = tg3_poll_controller,
14944 #endif
14945 };
14946
14947 static int __devinit tg3_init_one(struct pci_dev *pdev,
14948                                   const struct pci_device_id *ent)
14949 {
14950         struct net_device *dev;
14951         struct tg3 *tp;
14952         int i, err, pm_cap;
14953         u32 sndmbx, rcvmbx, intmbx;
14954         char str[40];
14955         u64 dma_mask, persist_dma_mask;
14956         u32 features = 0;
14957
14958         printk_once(KERN_INFO "%s\n", version);
14959
14960         err = pci_enable_device(pdev);
14961         if (err) {
14962                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14963                 return err;
14964         }
14965
14966         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14967         if (err) {
14968                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14969                 goto err_out_disable_pdev;
14970         }
14971
14972         pci_set_master(pdev);
14973
14974         /* Find power-management capability. */
14975         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14976         if (pm_cap == 0) {
14977                 dev_err(&pdev->dev,
14978                         "Cannot find Power Management capability, aborting\n");
14979                 err = -EIO;
14980                 goto err_out_free_res;
14981         }
14982
14983         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14984         if (!dev) {
14985                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14986                 err = -ENOMEM;
14987                 goto err_out_free_res;
14988         }
14989
14990         SET_NETDEV_DEV(dev, &pdev->dev);
14991
14992         tp = netdev_priv(dev);
14993         tp->pdev = pdev;
14994         tp->dev = dev;
14995         tp->pm_cap = pm_cap;
14996         tp->rx_mode = TG3_DEF_RX_MODE;
14997         tp->tx_mode = TG3_DEF_TX_MODE;
14998
14999         if (tg3_debug > 0)
15000                 tp->msg_enable = tg3_debug;
15001         else
15002                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15003
15004         /* The word/byte swap controls here control register access byte
15005          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15006          * setting below.
15007          */
15008         tp->misc_host_ctrl =
15009                 MISC_HOST_CTRL_MASK_PCI_INT |
15010                 MISC_HOST_CTRL_WORD_SWAP |
15011                 MISC_HOST_CTRL_INDIR_ACCESS |
15012                 MISC_HOST_CTRL_PCISTATE_RW;
15013
15014         /* The NONFRM (non-frame) byte/word swap controls take effect
15015          * on descriptor entries, anything which isn't packet data.
15016          *
15017          * The StrongARM chips on the board (one for tx, one for rx)
15018          * are running in big-endian mode.
15019          */
15020         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15021                         GRC_MODE_WSWAP_NONFRM_DATA);
15022 #ifdef __BIG_ENDIAN
15023         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15024 #endif
15025         spin_lock_init(&tp->lock);
15026         spin_lock_init(&tp->indirect_lock);
15027         INIT_WORK(&tp->reset_task, tg3_reset_task);
15028
15029         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15030         if (!tp->regs) {
15031                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15032                 err = -ENOMEM;
15033                 goto err_out_free_dev;
15034         }
15035
15036         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15037         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15038
15039         dev->ethtool_ops = &tg3_ethtool_ops;
15040         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15041         dev->netdev_ops = &tg3_netdev_ops;
15042         dev->irq = pdev->irq;
15043
15044         err = tg3_get_invariants(tp);
15045         if (err) {
15046                 dev_err(&pdev->dev,
15047                         "Problem fetching invariants of chip, aborting\n");
15048                 goto err_out_iounmap;
15049         }
15050
15051         /* The EPB bridge inside 5714, 5715, and 5780 and any
15052          * device behind the EPB cannot support DMA addresses > 40-bit.
15053          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15054          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15055          * do DMA address check in tg3_start_xmit().
15056          */
15057         if (tg3_flag(tp, IS_5788))
15058                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15059         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15060                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15061 #ifdef CONFIG_HIGHMEM
15062                 dma_mask = DMA_BIT_MASK(64);
15063 #endif
15064         } else
15065                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15066
15067         /* Configure DMA attributes. */
15068         if (dma_mask > DMA_BIT_MASK(32)) {
15069                 err = pci_set_dma_mask(pdev, dma_mask);
15070                 if (!err) {
15071                         features |= NETIF_F_HIGHDMA;
15072                         err = pci_set_consistent_dma_mask(pdev,
15073                                                           persist_dma_mask);
15074                         if (err < 0) {
15075                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15076                                         "DMA for consistent allocations\n");
15077                                 goto err_out_iounmap;
15078                         }
15079                 }
15080         }
15081         if (err || dma_mask == DMA_BIT_MASK(32)) {
15082                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15083                 if (err) {
15084                         dev_err(&pdev->dev,
15085                                 "No usable DMA configuration, aborting\n");
15086                         goto err_out_iounmap;
15087                 }
15088         }
15089
15090         tg3_init_bufmgr_config(tp);
15091
15092         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15093
15094         /* 5700 B0 chips do not support checksumming correctly due
15095          * to hardware bugs.
15096          */
15097         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15098                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15099
15100                 if (tg3_flag(tp, 5755_PLUS))
15101                         features |= NETIF_F_IPV6_CSUM;
15102         }
15103
15104         /* TSO is on by default on chips that support hardware TSO.
15105          * Firmware TSO on older chips gives lower performance, so it
15106          * is off by default, but can be enabled using ethtool.
15107          */
15108         if ((tg3_flag(tp, HW_TSO_1) ||
15109              tg3_flag(tp, HW_TSO_2) ||
15110              tg3_flag(tp, HW_TSO_3)) &&
15111             (features & NETIF_F_IP_CSUM))
15112                 features |= NETIF_F_TSO;
15113         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15114                 if (features & NETIF_F_IPV6_CSUM)
15115                         features |= NETIF_F_TSO6;
15116                 if (tg3_flag(tp, HW_TSO_3) ||
15117                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15118                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15119                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15120                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15121                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15122                         features |= NETIF_F_TSO_ECN;
15123         }
15124
15125         dev->features |= features;
15126         dev->vlan_features |= features;
15127
15128         /*
15129          * Add loopback capability only for a subset of devices that support
15130          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15131          * loopback for the remaining devices.
15132          */
15133         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15134             !tg3_flag(tp, CPMU_PRESENT))
15135                 /* Add the loopback capability */
15136                 features |= NETIF_F_LOOPBACK;
15137
15138         dev->hw_features |= features;
15139
15140         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15141             !tg3_flag(tp, TSO_CAPABLE) &&
15142             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15143                 tg3_flag_set(tp, MAX_RXPEND_64);
15144                 tp->rx_pending = 63;
15145         }
15146
15147         err = tg3_get_device_address(tp);
15148         if (err) {
15149                 dev_err(&pdev->dev,
15150                         "Could not obtain valid ethernet address, aborting\n");
15151                 goto err_out_iounmap;
15152         }
15153
15154         if (tg3_flag(tp, ENABLE_APE)) {
15155                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15156                 if (!tp->aperegs) {
15157                         dev_err(&pdev->dev,
15158                                 "Cannot map APE registers, aborting\n");
15159                         err = -ENOMEM;
15160                         goto err_out_iounmap;
15161                 }
15162
15163                 tg3_ape_lock_init(tp);
15164
15165                 if (tg3_flag(tp, ENABLE_ASF))
15166                         tg3_read_dash_ver(tp);
15167         }
15168
15169         /*
15170          * Reset chip in case UNDI or EFI driver did not shutdown
15171          * DMA self test will enable WDMAC and we'll see (spurious)
15172          * pending DMA on the PCI bus at that point.
15173          */
15174         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15175             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15176                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15177                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15178         }
15179
15180         err = tg3_test_dma(tp);
15181         if (err) {
15182                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15183                 goto err_out_apeunmap;
15184         }
15185
15186         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15187         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15188         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15189         for (i = 0; i < tp->irq_max; i++) {
15190                 struct tg3_napi *tnapi = &tp->napi[i];
15191
15192                 tnapi->tp = tp;
15193                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15194
15195                 tnapi->int_mbox = intmbx;
15196                 if (i < 4)
15197                         intmbx += 0x8;
15198                 else
15199                         intmbx += 0x4;
15200
15201                 tnapi->consmbox = rcvmbx;
15202                 tnapi->prodmbox = sndmbx;
15203
15204                 if (i)
15205                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15206                 else
15207                         tnapi->coal_now = HOSTCC_MODE_NOW;
15208
15209                 if (!tg3_flag(tp, SUPPORT_MSIX))
15210                         break;
15211
15212                 /*
15213                  * If we support MSIX, we'll be using RSS.  If we're using
15214                  * RSS, the first vector only handles link interrupts and the
15215                  * remaining vectors handle rx and tx interrupts.  Reuse the
15216                  * mailbox values for the next iteration.  The values we setup
15217                  * above are still useful for the single vectored mode.
15218                  */
15219                 if (!i)
15220                         continue;
15221
15222                 rcvmbx += 0x8;
15223
15224                 if (sndmbx & 0x4)
15225                         sndmbx -= 0x4;
15226                 else
15227                         sndmbx += 0xc;
15228         }
15229
15230         tg3_init_coal(tp);
15231
15232         pci_set_drvdata(pdev, dev);
15233
15234         err = register_netdev(dev);
15235         if (err) {
15236                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15237                 goto err_out_apeunmap;
15238         }
15239
15240         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15241                     tp->board_part_number,
15242                     tp->pci_chip_rev_id,
15243                     tg3_bus_string(tp, str),
15244                     dev->dev_addr);
15245
15246         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15247                 struct phy_device *phydev;
15248                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15249                 netdev_info(dev,
15250                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15251                             phydev->drv->name, dev_name(&phydev->dev));
15252         } else {
15253                 char *ethtype;
15254
15255                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15256                         ethtype = "10/100Base-TX";
15257                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15258                         ethtype = "1000Base-SX";
15259                 else
15260                         ethtype = "10/100/1000Base-T";
15261
15262                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15263                             "(WireSpeed[%d], EEE[%d])\n",
15264                             tg3_phy_string(tp), ethtype,
15265                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15266                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15267         }
15268
15269         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15270                     (dev->features & NETIF_F_RXCSUM) != 0,
15271                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15272                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15273                     tg3_flag(tp, ENABLE_ASF) != 0,
15274                     tg3_flag(tp, TSO_CAPABLE) != 0);
15275         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15276                     tp->dma_rwctrl,
15277                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15278                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15279
15280         pci_save_state(pdev);
15281
15282         return 0;
15283
15284 err_out_apeunmap:
15285         if (tp->aperegs) {
15286                 iounmap(tp->aperegs);
15287                 tp->aperegs = NULL;
15288         }
15289
15290 err_out_iounmap:
15291         if (tp->regs) {
15292                 iounmap(tp->regs);
15293                 tp->regs = NULL;
15294         }
15295
15296 err_out_free_dev:
15297         free_netdev(dev);
15298
15299 err_out_free_res:
15300         pci_release_regions(pdev);
15301
15302 err_out_disable_pdev:
15303         pci_disable_device(pdev);
15304         pci_set_drvdata(pdev, NULL);
15305         return err;
15306 }
15307
15308 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15309 {
15310         struct net_device *dev = pci_get_drvdata(pdev);
15311
15312         if (dev) {
15313                 struct tg3 *tp = netdev_priv(dev);
15314
15315                 if (tp->fw)
15316                         release_firmware(tp->fw);
15317
15318                 cancel_work_sync(&tp->reset_task);
15319
15320                 if (!tg3_flag(tp, USE_PHYLIB)) {
15321                         tg3_phy_fini(tp);
15322                         tg3_mdio_fini(tp);
15323                 }
15324
15325                 unregister_netdev(dev);
15326                 if (tp->aperegs) {
15327                         iounmap(tp->aperegs);
15328                         tp->aperegs = NULL;
15329                 }
15330                 if (tp->regs) {
15331                         iounmap(tp->regs);
15332                         tp->regs = NULL;
15333                 }
15334                 free_netdev(dev);
15335                 pci_release_regions(pdev);
15336                 pci_disable_device(pdev);
15337                 pci_set_drvdata(pdev, NULL);
15338         }
15339 }
15340
15341 #ifdef CONFIG_PM_SLEEP
15342 static int tg3_suspend(struct device *device)
15343 {
15344         struct pci_dev *pdev = to_pci_dev(device);
15345         struct net_device *dev = pci_get_drvdata(pdev);
15346         struct tg3 *tp = netdev_priv(dev);
15347         int err;
15348
15349         if (!netif_running(dev))
15350                 return 0;
15351
15352         flush_work_sync(&tp->reset_task);
15353         tg3_phy_stop(tp);
15354         tg3_netif_stop(tp);
15355
15356         del_timer_sync(&tp->timer);
15357
15358         tg3_full_lock(tp, 1);
15359         tg3_disable_ints(tp);
15360         tg3_full_unlock(tp);
15361
15362         netif_device_detach(dev);
15363
15364         tg3_full_lock(tp, 0);
15365         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15366         tg3_flag_clear(tp, INIT_COMPLETE);
15367         tg3_full_unlock(tp);
15368
15369         err = tg3_power_down_prepare(tp);
15370         if (err) {
15371                 int err2;
15372
15373                 tg3_full_lock(tp, 0);
15374
15375                 tg3_flag_set(tp, INIT_COMPLETE);
15376                 err2 = tg3_restart_hw(tp, 1);
15377                 if (err2)
15378                         goto out;
15379
15380                 tp->timer.expires = jiffies + tp->timer_offset;
15381                 add_timer(&tp->timer);
15382
15383                 netif_device_attach(dev);
15384                 tg3_netif_start(tp);
15385
15386 out:
15387                 tg3_full_unlock(tp);
15388
15389                 if (!err2)
15390                         tg3_phy_start(tp);
15391         }
15392
15393         return err;
15394 }
15395
15396 static int tg3_resume(struct device *device)
15397 {
15398         struct pci_dev *pdev = to_pci_dev(device);
15399         struct net_device *dev = pci_get_drvdata(pdev);
15400         struct tg3 *tp = netdev_priv(dev);
15401         int err;
15402
15403         if (!netif_running(dev))
15404                 return 0;
15405
15406         netif_device_attach(dev);
15407
15408         tg3_full_lock(tp, 0);
15409
15410         tg3_flag_set(tp, INIT_COMPLETE);
15411         err = tg3_restart_hw(tp, 1);
15412         if (err)
15413                 goto out;
15414
15415         tp->timer.expires = jiffies + tp->timer_offset;
15416         add_timer(&tp->timer);
15417
15418         tg3_netif_start(tp);
15419
15420 out:
15421         tg3_full_unlock(tp);
15422
15423         if (!err)
15424                 tg3_phy_start(tp);
15425
15426         return err;
15427 }
15428
15429 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15430 #define TG3_PM_OPS (&tg3_pm_ops)
15431
15432 #else
15433
15434 #define TG3_PM_OPS NULL
15435
15436 #endif /* CONFIG_PM_SLEEP */
15437
15438 /**
15439  * tg3_io_error_detected - called when PCI error is detected
15440  * @pdev: Pointer to PCI device
15441  * @state: The current pci connection state
15442  *
15443  * This function is called after a PCI bus error affecting
15444  * this device has been detected.
15445  */
15446 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15447                                               pci_channel_state_t state)
15448 {
15449         struct net_device *netdev = pci_get_drvdata(pdev);
15450         struct tg3 *tp = netdev_priv(netdev);
15451         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15452
15453         netdev_info(netdev, "PCI I/O error detected\n");
15454
15455         rtnl_lock();
15456
15457         if (!netif_running(netdev))
15458                 goto done;
15459
15460         tg3_phy_stop(tp);
15461
15462         tg3_netif_stop(tp);
15463
15464         del_timer_sync(&tp->timer);
15465         tg3_flag_clear(tp, RESTART_TIMER);
15466
15467         /* Want to make sure that the reset task doesn't run */
15468         cancel_work_sync(&tp->reset_task);
15469         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15470         tg3_flag_clear(tp, RESTART_TIMER);
15471
15472         netif_device_detach(netdev);
15473
15474         /* Clean up software state, even if MMIO is blocked */
15475         tg3_full_lock(tp, 0);
15476         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15477         tg3_full_unlock(tp);
15478
15479 done:
15480         if (state == pci_channel_io_perm_failure)
15481                 err = PCI_ERS_RESULT_DISCONNECT;
15482         else
15483                 pci_disable_device(pdev);
15484
15485         rtnl_unlock();
15486
15487         return err;
15488 }
15489
15490 /**
15491  * tg3_io_slot_reset - called after the pci bus has been reset.
15492  * @pdev: Pointer to PCI device
15493  *
15494  * Restart the card from scratch, as if from a cold-boot.
15495  * At this point, the card has exprienced a hard reset,
15496  * followed by fixups by BIOS, and has its config space
15497  * set up identically to what it was at cold boot.
15498  */
15499 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15500 {
15501         struct net_device *netdev = pci_get_drvdata(pdev);
15502         struct tg3 *tp = netdev_priv(netdev);
15503         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15504         int err;
15505
15506         rtnl_lock();
15507
15508         if (pci_enable_device(pdev)) {
15509                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15510                 goto done;
15511         }
15512
15513         pci_set_master(pdev);
15514         pci_restore_state(pdev);
15515         pci_save_state(pdev);
15516
15517         if (!netif_running(netdev)) {
15518                 rc = PCI_ERS_RESULT_RECOVERED;
15519                 goto done;
15520         }
15521
15522         err = tg3_power_up(tp);
15523         if (err) {
15524                 netdev_err(netdev, "Failed to restore register access.\n");
15525                 goto done;
15526         }
15527
15528         rc = PCI_ERS_RESULT_RECOVERED;
15529
15530 done:
15531         rtnl_unlock();
15532
15533         return rc;
15534 }
15535
15536 /**
15537  * tg3_io_resume - called when traffic can start flowing again.
15538  * @pdev: Pointer to PCI device
15539  *
15540  * This callback is called when the error recovery driver tells
15541  * us that its OK to resume normal operation.
15542  */
15543 static void tg3_io_resume(struct pci_dev *pdev)
15544 {
15545         struct net_device *netdev = pci_get_drvdata(pdev);
15546         struct tg3 *tp = netdev_priv(netdev);
15547         int err;
15548
15549         rtnl_lock();
15550
15551         if (!netif_running(netdev))
15552                 goto done;
15553
15554         tg3_full_lock(tp, 0);
15555         tg3_flag_set(tp, INIT_COMPLETE);
15556         err = tg3_restart_hw(tp, 1);
15557         tg3_full_unlock(tp);
15558         if (err) {
15559                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15560                 goto done;
15561         }
15562
15563         netif_device_attach(netdev);
15564
15565         tp->timer.expires = jiffies + tp->timer_offset;
15566         add_timer(&tp->timer);
15567
15568         tg3_netif_start(tp);
15569
15570         tg3_phy_start(tp);
15571
15572 done:
15573         rtnl_unlock();
15574 }
15575
15576 static struct pci_error_handlers tg3_err_handler = {
15577         .error_detected = tg3_io_error_detected,
15578         .slot_reset     = tg3_io_slot_reset,
15579         .resume         = tg3_io_resume
15580 };
15581
15582 static struct pci_driver tg3_driver = {
15583         .name           = DRV_MODULE_NAME,
15584         .id_table       = tg3_pci_tbl,
15585         .probe          = tg3_init_one,
15586         .remove         = __devexit_p(tg3_remove_one),
15587         .err_handler    = &tg3_err_handler,
15588         .driver.pm      = TG3_PM_OPS,
15589 };
15590
15591 static int __init tg3_init(void)
15592 {
15593         return pci_register_driver(&tg3_driver);
15594 }
15595
15596 static void __exit tg3_cleanup(void)
15597 {
15598         pci_unregister_driver(&tg3_driver);
15599 }
15600
15601 module_init(tg3_init);
15602 module_exit(tg3_cleanup);