e5c1bc919b4d8587dfc2a24ef9c1a3f7b2731d93
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase, bit;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++) {
620                 if (i == TG3_APE_LOCK_GPIO)
621                         continue;
622                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
623         }
624
625         /* Clear the correct bit of the GPIO lock too. */
626         if (!tp->pci_fn)
627                 bit = APE_LOCK_GRANT_DRIVER;
628         else
629                 bit = 1 << tp->pci_fn;
630
631         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
632 }
633
634 static int tg3_ape_lock(struct tg3 *tp, int locknum)
635 {
636         int i, off;
637         int ret = 0;
638         u32 status, req, gnt, bit;
639
640         if (!tg3_flag(tp, ENABLE_APE))
641                 return 0;
642
643         switch (locknum) {
644         case TG3_APE_LOCK_GPIO:
645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
646                         return 0;
647         case TG3_APE_LOCK_GRC:
648         case TG3_APE_LOCK_MEM:
649                 break;
650         default:
651                 return -EINVAL;
652         }
653
654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
655                 req = TG3_APE_LOCK_REQ;
656                 gnt = TG3_APE_LOCK_GRANT;
657         } else {
658                 req = TG3_APE_PER_LOCK_REQ;
659                 gnt = TG3_APE_PER_LOCK_GRANT;
660         }
661
662         off = 4 * locknum;
663
664         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
665                 bit = APE_LOCK_REQ_DRIVER;
666         else
667                 bit = 1 << tp->pci_fn;
668
669         tg3_ape_write32(tp, req + off, bit);
670
671         /* Wait for up to 1 millisecond to acquire lock. */
672         for (i = 0; i < 100; i++) {
673                 status = tg3_ape_read32(tp, gnt + off);
674                 if (status == bit)
675                         break;
676                 udelay(10);
677         }
678
679         if (status != bit) {
680                 /* Revoke the lock request. */
681                 tg3_ape_write32(tp, gnt + off, bit);
682                 ret = -EBUSY;
683         }
684
685         return ret;
686 }
687
688 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
689 {
690         u32 gnt, bit;
691
692         if (!tg3_flag(tp, ENABLE_APE))
693                 return;
694
695         switch (locknum) {
696         case TG3_APE_LOCK_GPIO:
697                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
698                         return;
699         case TG3_APE_LOCK_GRC:
700         case TG3_APE_LOCK_MEM:
701                 break;
702         default:
703                 return;
704         }
705
706         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
707                 gnt = TG3_APE_LOCK_GRANT;
708         else
709                 gnt = TG3_APE_PER_LOCK_GRANT;
710
711         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
712                 bit = APE_LOCK_GRANT_DRIVER;
713         else
714                 bit = 1 << tp->pci_fn;
715
716         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
717 }
718
719 static void tg3_disable_ints(struct tg3 *tp)
720 {
721         int i;
722
723         tw32(TG3PCI_MISC_HOST_CTRL,
724              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
725         for (i = 0; i < tp->irq_max; i++)
726                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
727 }
728
729 static void tg3_enable_ints(struct tg3 *tp)
730 {
731         int i;
732
733         tp->irq_sync = 0;
734         wmb();
735
736         tw32(TG3PCI_MISC_HOST_CTRL,
737              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
738
739         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
740         for (i = 0; i < tp->irq_cnt; i++) {
741                 struct tg3_napi *tnapi = &tp->napi[i];
742
743                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
744                 if (tg3_flag(tp, 1SHOT_MSI))
745                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
746
747                 tp->coal_now |= tnapi->coal_now;
748         }
749
750         /* Force an initial interrupt */
751         if (!tg3_flag(tp, TAGGED_STATUS) &&
752             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
753                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
754         else
755                 tw32(HOSTCC_MODE, tp->coal_now);
756
757         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
758 }
759
760 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
761 {
762         struct tg3 *tp = tnapi->tp;
763         struct tg3_hw_status *sblk = tnapi->hw_status;
764         unsigned int work_exists = 0;
765
766         /* check for phy events */
767         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
768                 if (sblk->status & SD_STATUS_LINK_CHG)
769                         work_exists = 1;
770         }
771         /* check for RX/TX work to do */
772         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
773             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
774                 work_exists = 1;
775
776         return work_exists;
777 }
778
779 /* tg3_int_reenable
780  *  similar to tg3_enable_ints, but it accurately determines whether there
781  *  is new work pending and can return without flushing the PIO write
782  *  which reenables interrupts
783  */
784 static void tg3_int_reenable(struct tg3_napi *tnapi)
785 {
786         struct tg3 *tp = tnapi->tp;
787
788         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
789         mmiowb();
790
791         /* When doing tagged status, this work check is unnecessary.
792          * The last_tag we write above tells the chip which piece of
793          * work we've completed.
794          */
795         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
796                 tw32(HOSTCC_MODE, tp->coalesce_mode |
797                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
798 }
799
800 static void tg3_switch_clocks(struct tg3 *tp)
801 {
802         u32 clock_ctrl;
803         u32 orig_clock_ctrl;
804
805         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
806                 return;
807
808         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
809
810         orig_clock_ctrl = clock_ctrl;
811         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
812                        CLOCK_CTRL_CLKRUN_OENABLE |
813                        0x1f);
814         tp->pci_clock_ctrl = clock_ctrl;
815
816         if (tg3_flag(tp, 5705_PLUS)) {
817                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
818                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
819                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
820                 }
821         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
822                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
823                             clock_ctrl |
824                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
825                             40);
826                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
827                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
828                             40);
829         }
830         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
831 }
832
833 #define PHY_BUSY_LOOPS  5000
834
835 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
836 {
837         u32 frame_val;
838         unsigned int loops;
839         int ret;
840
841         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
842                 tw32_f(MAC_MI_MODE,
843                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
844                 udelay(80);
845         }
846
847         *val = 0x0;
848
849         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
850                       MI_COM_PHY_ADDR_MASK);
851         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
852                       MI_COM_REG_ADDR_MASK);
853         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
854
855         tw32_f(MAC_MI_COM, frame_val);
856
857         loops = PHY_BUSY_LOOPS;
858         while (loops != 0) {
859                 udelay(10);
860                 frame_val = tr32(MAC_MI_COM);
861
862                 if ((frame_val & MI_COM_BUSY) == 0) {
863                         udelay(5);
864                         frame_val = tr32(MAC_MI_COM);
865                         break;
866                 }
867                 loops -= 1;
868         }
869
870         ret = -EBUSY;
871         if (loops != 0) {
872                 *val = frame_val & MI_COM_DATA_MASK;
873                 ret = 0;
874         }
875
876         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
877                 tw32_f(MAC_MI_MODE, tp->mi_mode);
878                 udelay(80);
879         }
880
881         return ret;
882 }
883
884 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
885 {
886         u32 frame_val;
887         unsigned int loops;
888         int ret;
889
890         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
891             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
892                 return 0;
893
894         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
895                 tw32_f(MAC_MI_MODE,
896                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
897                 udelay(80);
898         }
899
900         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
901                       MI_COM_PHY_ADDR_MASK);
902         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
903                       MI_COM_REG_ADDR_MASK);
904         frame_val |= (val & MI_COM_DATA_MASK);
905         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
906
907         tw32_f(MAC_MI_COM, frame_val);
908
909         loops = PHY_BUSY_LOOPS;
910         while (loops != 0) {
911                 udelay(10);
912                 frame_val = tr32(MAC_MI_COM);
913                 if ((frame_val & MI_COM_BUSY) == 0) {
914                         udelay(5);
915                         frame_val = tr32(MAC_MI_COM);
916                         break;
917                 }
918                 loops -= 1;
919         }
920
921         ret = -EBUSY;
922         if (loops != 0)
923                 ret = 0;
924
925         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
926                 tw32_f(MAC_MI_MODE, tp->mi_mode);
927                 udelay(80);
928         }
929
930         return ret;
931 }
932
933 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
961         if (err)
962                 goto done;
963
964         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
965         if (err)
966                 goto done;
967
968         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
969                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
970         if (err)
971                 goto done;
972
973         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
974
975 done:
976         return err;
977 }
978
979 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
980 {
981         int err;
982
983         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
984         if (!err)
985                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
986
987         return err;
988 }
989
990 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
991 {
992         int err;
993
994         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
995         if (!err)
996                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
997
998         return err;
999 }
1000
1001 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1002 {
1003         int err;
1004
1005         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1006                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1007                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1008         if (!err)
1009                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1010
1011         return err;
1012 }
1013
1014 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1015 {
1016         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1017                 set |= MII_TG3_AUXCTL_MISC_WREN;
1018
1019         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1020 }
1021
1022 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1023         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1024                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1025                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1026
1027 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1028         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1029                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1030
1031 static int tg3_bmcr_reset(struct tg3 *tp)
1032 {
1033         u32 phy_control;
1034         int limit, err;
1035
1036         /* OK, reset it, and poll the BMCR_RESET bit until it
1037          * clears or we time out.
1038          */
1039         phy_control = BMCR_RESET;
1040         err = tg3_writephy(tp, MII_BMCR, phy_control);
1041         if (err != 0)
1042                 return -EBUSY;
1043
1044         limit = 5000;
1045         while (limit--) {
1046                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1047                 if (err != 0)
1048                         return -EBUSY;
1049
1050                 if ((phy_control & BMCR_RESET) == 0) {
1051                         udelay(40);
1052                         break;
1053                 }
1054                 udelay(10);
1055         }
1056         if (limit < 0)
1057                 return -EBUSY;
1058
1059         return 0;
1060 }
1061
1062 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1063 {
1064         struct tg3 *tp = bp->priv;
1065         u32 val;
1066
1067         spin_lock_bh(&tp->lock);
1068
1069         if (tg3_readphy(tp, reg, &val))
1070                 val = -EIO;
1071
1072         spin_unlock_bh(&tp->lock);
1073
1074         return val;
1075 }
1076
1077 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1078 {
1079         struct tg3 *tp = bp->priv;
1080         u32 ret = 0;
1081
1082         spin_lock_bh(&tp->lock);
1083
1084         if (tg3_writephy(tp, reg, val))
1085                 ret = -EIO;
1086
1087         spin_unlock_bh(&tp->lock);
1088
1089         return ret;
1090 }
1091
1092 static int tg3_mdio_reset(struct mii_bus *bp)
1093 {
1094         return 0;
1095 }
1096
1097 static void tg3_mdio_config_5785(struct tg3 *tp)
1098 {
1099         u32 val;
1100         struct phy_device *phydev;
1101
1102         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1103         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1104         case PHY_ID_BCM50610:
1105         case PHY_ID_BCM50610M:
1106                 val = MAC_PHYCFG2_50610_LED_MODES;
1107                 break;
1108         case PHY_ID_BCMAC131:
1109                 val = MAC_PHYCFG2_AC131_LED_MODES;
1110                 break;
1111         case PHY_ID_RTL8211C:
1112                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1113                 break;
1114         case PHY_ID_RTL8201E:
1115                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1116                 break;
1117         default:
1118                 return;
1119         }
1120
1121         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1122                 tw32(MAC_PHYCFG2, val);
1123
1124                 val = tr32(MAC_PHYCFG1);
1125                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1126                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1127                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1128                 tw32(MAC_PHYCFG1, val);
1129
1130                 return;
1131         }
1132
1133         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1134                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1135                        MAC_PHYCFG2_FMODE_MASK_MASK |
1136                        MAC_PHYCFG2_GMODE_MASK_MASK |
1137                        MAC_PHYCFG2_ACT_MASK_MASK   |
1138                        MAC_PHYCFG2_QUAL_MASK_MASK |
1139                        MAC_PHYCFG2_INBAND_ENABLE;
1140
1141         tw32(MAC_PHYCFG2, val);
1142
1143         val = tr32(MAC_PHYCFG1);
1144         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1145                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1146         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1148                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1149                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1150                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1151         }
1152         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1153                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1154         tw32(MAC_PHYCFG1, val);
1155
1156         val = tr32(MAC_EXT_RGMII_MODE);
1157         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1158                  MAC_RGMII_MODE_RX_QUALITY |
1159                  MAC_RGMII_MODE_RX_ACTIVITY |
1160                  MAC_RGMII_MODE_RX_ENG_DET |
1161                  MAC_RGMII_MODE_TX_ENABLE |
1162                  MAC_RGMII_MODE_TX_LOWPWR |
1163                  MAC_RGMII_MODE_TX_RESET);
1164         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1165                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1166                         val |= MAC_RGMII_MODE_RX_INT_B |
1167                                MAC_RGMII_MODE_RX_QUALITY |
1168                                MAC_RGMII_MODE_RX_ACTIVITY |
1169                                MAC_RGMII_MODE_RX_ENG_DET;
1170                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1171                         val |= MAC_RGMII_MODE_TX_ENABLE |
1172                                MAC_RGMII_MODE_TX_LOWPWR |
1173                                MAC_RGMII_MODE_TX_RESET;
1174         }
1175         tw32(MAC_EXT_RGMII_MODE, val);
1176 }
1177
1178 static void tg3_mdio_start(struct tg3 *tp)
1179 {
1180         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1181         tw32_f(MAC_MI_MODE, tp->mi_mode);
1182         udelay(80);
1183
1184         if (tg3_flag(tp, MDIOBUS_INITED) &&
1185             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1186                 tg3_mdio_config_5785(tp);
1187 }
1188
1189 static int tg3_mdio_init(struct tg3 *tp)
1190 {
1191         int i;
1192         u32 reg;
1193         struct phy_device *phydev;
1194
1195         if (tg3_flag(tp, 5717_PLUS)) {
1196                 u32 is_serdes;
1197
1198                 tp->phy_addr = tp->pci_fn + 1;
1199
1200                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1201                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1202                 else
1203                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1204                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1205                 if (is_serdes)
1206                         tp->phy_addr += 7;
1207         } else
1208                 tp->phy_addr = TG3_PHY_MII_ADDR;
1209
1210         tg3_mdio_start(tp);
1211
1212         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1213                 return 0;
1214
1215         tp->mdio_bus = mdiobus_alloc();
1216         if (tp->mdio_bus == NULL)
1217                 return -ENOMEM;
1218
1219         tp->mdio_bus->name     = "tg3 mdio bus";
1220         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1221                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1222         tp->mdio_bus->priv     = tp;
1223         tp->mdio_bus->parent   = &tp->pdev->dev;
1224         tp->mdio_bus->read     = &tg3_mdio_read;
1225         tp->mdio_bus->write    = &tg3_mdio_write;
1226         tp->mdio_bus->reset    = &tg3_mdio_reset;
1227         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1228         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1229
1230         for (i = 0; i < PHY_MAX_ADDR; i++)
1231                 tp->mdio_bus->irq[i] = PHY_POLL;
1232
1233         /* The bus registration will look for all the PHYs on the mdio bus.
1234          * Unfortunately, it does not ensure the PHY is powered up before
1235          * accessing the PHY ID registers.  A chip reset is the
1236          * quickest way to bring the device back to an operational state..
1237          */
1238         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1239                 tg3_bmcr_reset(tp);
1240
1241         i = mdiobus_register(tp->mdio_bus);
1242         if (i) {
1243                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1244                 mdiobus_free(tp->mdio_bus);
1245                 return i;
1246         }
1247
1248         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1249
1250         if (!phydev || !phydev->drv) {
1251                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1252                 mdiobus_unregister(tp->mdio_bus);
1253                 mdiobus_free(tp->mdio_bus);
1254                 return -ENODEV;
1255         }
1256
1257         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1258         case PHY_ID_BCM57780:
1259                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1260                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1261                 break;
1262         case PHY_ID_BCM50610:
1263         case PHY_ID_BCM50610M:
1264                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1265                                      PHY_BRCM_RX_REFCLK_UNUSED |
1266                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1267                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1268                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1269                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1270                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1271                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1272                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1273                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1274                 /* fallthru */
1275         case PHY_ID_RTL8211C:
1276                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1277                 break;
1278         case PHY_ID_RTL8201E:
1279         case PHY_ID_BCMAC131:
1280                 phydev->interface = PHY_INTERFACE_MODE_MII;
1281                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1282                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1283                 break;
1284         }
1285
1286         tg3_flag_set(tp, MDIOBUS_INITED);
1287
1288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1289                 tg3_mdio_config_5785(tp);
1290
1291         return 0;
1292 }
1293
1294 static void tg3_mdio_fini(struct tg3 *tp)
1295 {
1296         if (tg3_flag(tp, MDIOBUS_INITED)) {
1297                 tg3_flag_clear(tp, MDIOBUS_INITED);
1298                 mdiobus_unregister(tp->mdio_bus);
1299                 mdiobus_free(tp->mdio_bus);
1300         }
1301 }
1302
1303 /* tp->lock is held. */
1304 static inline void tg3_generate_fw_event(struct tg3 *tp)
1305 {
1306         u32 val;
1307
1308         val = tr32(GRC_RX_CPU_EVENT);
1309         val |= GRC_RX_CPU_DRIVER_EVENT;
1310         tw32_f(GRC_RX_CPU_EVENT, val);
1311
1312         tp->last_event_jiffies = jiffies;
1313 }
1314
1315 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1316
1317 /* tp->lock is held. */
1318 static void tg3_wait_for_event_ack(struct tg3 *tp)
1319 {
1320         int i;
1321         unsigned int delay_cnt;
1322         long time_remain;
1323
1324         /* If enough time has passed, no wait is necessary. */
1325         time_remain = (long)(tp->last_event_jiffies + 1 +
1326                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1327                       (long)jiffies;
1328         if (time_remain < 0)
1329                 return;
1330
1331         /* Check if we can shorten the wait time. */
1332         delay_cnt = jiffies_to_usecs(time_remain);
1333         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1334                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1335         delay_cnt = (delay_cnt >> 3) + 1;
1336
1337         for (i = 0; i < delay_cnt; i++) {
1338                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1339                         break;
1340                 udelay(8);
1341         }
1342 }
1343
1344 /* tp->lock is held. */
1345 static void tg3_ump_link_report(struct tg3 *tp)
1346 {
1347         u32 reg;
1348         u32 val;
1349
1350         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1351                 return;
1352
1353         tg3_wait_for_event_ack(tp);
1354
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1356
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1358
1359         val = 0;
1360         if (!tg3_readphy(tp, MII_BMCR, &reg))
1361                 val = reg << 16;
1362         if (!tg3_readphy(tp, MII_BMSR, &reg))
1363                 val |= (reg & 0xffff);
1364         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1365
1366         val = 0;
1367         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1368                 val = reg << 16;
1369         if (!tg3_readphy(tp, MII_LPA, &reg))
1370                 val |= (reg & 0xffff);
1371         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1372
1373         val = 0;
1374         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1375                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1376                         val = reg << 16;
1377                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1378                         val |= (reg & 0xffff);
1379         }
1380         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1381
1382         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1383                 val = reg << 16;
1384         else
1385                 val = 0;
1386         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1387
1388         tg3_generate_fw_event(tp);
1389 }
1390
1391 static void tg3_link_report(struct tg3 *tp)
1392 {
1393         if (!netif_carrier_ok(tp->dev)) {
1394                 netif_info(tp, link, tp->dev, "Link is down\n");
1395                 tg3_ump_link_report(tp);
1396         } else if (netif_msg_link(tp)) {
1397                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1398                             (tp->link_config.active_speed == SPEED_1000 ?
1399                              1000 :
1400                              (tp->link_config.active_speed == SPEED_100 ?
1401                               100 : 10)),
1402                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1403                              "full" : "half"));
1404
1405                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1406                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1407                             "on" : "off",
1408                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1409                             "on" : "off");
1410
1411                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1412                         netdev_info(tp->dev, "EEE is %s\n",
1413                                     tp->setlpicnt ? "enabled" : "disabled");
1414
1415                 tg3_ump_link_report(tp);
1416         }
1417 }
1418
1419 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1420 {
1421         u16 miireg;
1422
1423         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1424                 miireg = ADVERTISE_PAUSE_CAP;
1425         else if (flow_ctrl & FLOW_CTRL_TX)
1426                 miireg = ADVERTISE_PAUSE_ASYM;
1427         else if (flow_ctrl & FLOW_CTRL_RX)
1428                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1429         else
1430                 miireg = 0;
1431
1432         return miireg;
1433 }
1434
1435 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1436 {
1437         u16 miireg;
1438
1439         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1440                 miireg = ADVERTISE_1000XPAUSE;
1441         else if (flow_ctrl & FLOW_CTRL_TX)
1442                 miireg = ADVERTISE_1000XPSE_ASYM;
1443         else if (flow_ctrl & FLOW_CTRL_RX)
1444                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1445         else
1446                 miireg = 0;
1447
1448         return miireg;
1449 }
1450
1451 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1452 {
1453         u8 cap = 0;
1454
1455         if (lcladv & ADVERTISE_1000XPAUSE) {
1456                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1457                         if (rmtadv & LPA_1000XPAUSE)
1458                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1459                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1460                                 cap = FLOW_CTRL_RX;
1461                 } else {
1462                         if (rmtadv & LPA_1000XPAUSE)
1463                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1464                 }
1465         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1466                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1467                         cap = FLOW_CTRL_TX;
1468         }
1469
1470         return cap;
1471 }
1472
1473 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1474 {
1475         u8 autoneg;
1476         u8 flowctrl = 0;
1477         u32 old_rx_mode = tp->rx_mode;
1478         u32 old_tx_mode = tp->tx_mode;
1479
1480         if (tg3_flag(tp, USE_PHYLIB))
1481                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1482         else
1483                 autoneg = tp->link_config.autoneg;
1484
1485         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1486                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1487                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1488                 else
1489                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1490         } else
1491                 flowctrl = tp->link_config.flowctrl;
1492
1493         tp->link_config.active_flowctrl = flowctrl;
1494
1495         if (flowctrl & FLOW_CTRL_RX)
1496                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1497         else
1498                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1499
1500         if (old_rx_mode != tp->rx_mode)
1501                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1502
1503         if (flowctrl & FLOW_CTRL_TX)
1504                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1505         else
1506                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1507
1508         if (old_tx_mode != tp->tx_mode)
1509                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1510 }
1511
1512 static void tg3_adjust_link(struct net_device *dev)
1513 {
1514         u8 oldflowctrl, linkmesg = 0;
1515         u32 mac_mode, lcl_adv, rmt_adv;
1516         struct tg3 *tp = netdev_priv(dev);
1517         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1518
1519         spin_lock_bh(&tp->lock);
1520
1521         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1522                                     MAC_MODE_HALF_DUPLEX);
1523
1524         oldflowctrl = tp->link_config.active_flowctrl;
1525
1526         if (phydev->link) {
1527                 lcl_adv = 0;
1528                 rmt_adv = 0;
1529
1530                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1531                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1532                 else if (phydev->speed == SPEED_1000 ||
1533                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1534                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1535                 else
1536                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1537
1538                 if (phydev->duplex == DUPLEX_HALF)
1539                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1540                 else {
1541                         lcl_adv = tg3_advert_flowctrl_1000T(
1542                                   tp->link_config.flowctrl);
1543
1544                         if (phydev->pause)
1545                                 rmt_adv = LPA_PAUSE_CAP;
1546                         if (phydev->asym_pause)
1547                                 rmt_adv |= LPA_PAUSE_ASYM;
1548                 }
1549
1550                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1551         } else
1552                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1553
1554         if (mac_mode != tp->mac_mode) {
1555                 tp->mac_mode = mac_mode;
1556                 tw32_f(MAC_MODE, tp->mac_mode);
1557                 udelay(40);
1558         }
1559
1560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1561                 if (phydev->speed == SPEED_10)
1562                         tw32(MAC_MI_STAT,
1563                              MAC_MI_STAT_10MBPS_MODE |
1564                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1565                 else
1566                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1567         }
1568
1569         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1570                 tw32(MAC_TX_LENGTHS,
1571                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1572                       (6 << TX_LENGTHS_IPG_SHIFT) |
1573                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1574         else
1575                 tw32(MAC_TX_LENGTHS,
1576                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1577                       (6 << TX_LENGTHS_IPG_SHIFT) |
1578                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1579
1580         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1581             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1582             phydev->speed != tp->link_config.active_speed ||
1583             phydev->duplex != tp->link_config.active_duplex ||
1584             oldflowctrl != tp->link_config.active_flowctrl)
1585                 linkmesg = 1;
1586
1587         tp->link_config.active_speed = phydev->speed;
1588         tp->link_config.active_duplex = phydev->duplex;
1589
1590         spin_unlock_bh(&tp->lock);
1591
1592         if (linkmesg)
1593                 tg3_link_report(tp);
1594 }
1595
1596 static int tg3_phy_init(struct tg3 *tp)
1597 {
1598         struct phy_device *phydev;
1599
1600         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1601                 return 0;
1602
1603         /* Bring the PHY back to a known state. */
1604         tg3_bmcr_reset(tp);
1605
1606         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1607
1608         /* Attach the MAC to the PHY. */
1609         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1610                              phydev->dev_flags, phydev->interface);
1611         if (IS_ERR(phydev)) {
1612                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1613                 return PTR_ERR(phydev);
1614         }
1615
1616         /* Mask with MAC supported features. */
1617         switch (phydev->interface) {
1618         case PHY_INTERFACE_MODE_GMII:
1619         case PHY_INTERFACE_MODE_RGMII:
1620                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1621                         phydev->supported &= (PHY_GBIT_FEATURES |
1622                                               SUPPORTED_Pause |
1623                                               SUPPORTED_Asym_Pause);
1624                         break;
1625                 }
1626                 /* fallthru */
1627         case PHY_INTERFACE_MODE_MII:
1628                 phydev->supported &= (PHY_BASIC_FEATURES |
1629                                       SUPPORTED_Pause |
1630                                       SUPPORTED_Asym_Pause);
1631                 break;
1632         default:
1633                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1634                 return -EINVAL;
1635         }
1636
1637         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1638
1639         phydev->advertising = phydev->supported;
1640
1641         return 0;
1642 }
1643
1644 static void tg3_phy_start(struct tg3 *tp)
1645 {
1646         struct phy_device *phydev;
1647
1648         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1649                 return;
1650
1651         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1652
1653         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1654                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1655                 phydev->speed = tp->link_config.orig_speed;
1656                 phydev->duplex = tp->link_config.orig_duplex;
1657                 phydev->autoneg = tp->link_config.orig_autoneg;
1658                 phydev->advertising = tp->link_config.orig_advertising;
1659         }
1660
1661         phy_start(phydev);
1662
1663         phy_start_aneg(phydev);
1664 }
1665
1666 static void tg3_phy_stop(struct tg3 *tp)
1667 {
1668         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1669                 return;
1670
1671         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1672 }
1673
1674 static void tg3_phy_fini(struct tg3 *tp)
1675 {
1676         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1677                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1678                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1679         }
1680 }
1681
1682 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1683 {
1684         u32 phytest;
1685
1686         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1687                 u32 phy;
1688
1689                 tg3_writephy(tp, MII_TG3_FET_TEST,
1690                              phytest | MII_TG3_FET_SHADOW_EN);
1691                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1692                         if (enable)
1693                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1694                         else
1695                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1696                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1697                 }
1698                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1699         }
1700 }
1701
1702 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1703 {
1704         u32 reg;
1705
1706         if (!tg3_flag(tp, 5705_PLUS) ||
1707             (tg3_flag(tp, 5717_PLUS) &&
1708              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1709                 return;
1710
1711         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1712                 tg3_phy_fet_toggle_apd(tp, enable);
1713                 return;
1714         }
1715
1716         reg = MII_TG3_MISC_SHDW_WREN |
1717               MII_TG3_MISC_SHDW_SCR5_SEL |
1718               MII_TG3_MISC_SHDW_SCR5_LPED |
1719               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1720               MII_TG3_MISC_SHDW_SCR5_SDTL |
1721               MII_TG3_MISC_SHDW_SCR5_C125OE;
1722         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1723                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1724
1725         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1726
1727
1728         reg = MII_TG3_MISC_SHDW_WREN |
1729               MII_TG3_MISC_SHDW_APD_SEL |
1730               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1731         if (enable)
1732                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1733
1734         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1735 }
1736
1737 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1738 {
1739         u32 phy;
1740
1741         if (!tg3_flag(tp, 5705_PLUS) ||
1742             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1743                 return;
1744
1745         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1746                 u32 ephy;
1747
1748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1749                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1750
1751                         tg3_writephy(tp, MII_TG3_FET_TEST,
1752                                      ephy | MII_TG3_FET_SHADOW_EN);
1753                         if (!tg3_readphy(tp, reg, &phy)) {
1754                                 if (enable)
1755                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1756                                 else
1757                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1758                                 tg3_writephy(tp, reg, phy);
1759                         }
1760                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1761                 }
1762         } else {
1763                 int ret;
1764
1765                 ret = tg3_phy_auxctl_read(tp,
1766                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1767                 if (!ret) {
1768                         if (enable)
1769                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1770                         else
1771                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1772                         tg3_phy_auxctl_write(tp,
1773                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1774                 }
1775         }
1776 }
1777
1778 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1779 {
1780         int ret;
1781         u32 val;
1782
1783         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1784                 return;
1785
1786         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1787         if (!ret)
1788                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1789                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1790 }
1791
1792 static void tg3_phy_apply_otp(struct tg3 *tp)
1793 {
1794         u32 otp, phy;
1795
1796         if (!tp->phy_otp)
1797                 return;
1798
1799         otp = tp->phy_otp;
1800
1801         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1802                 return;
1803
1804         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1805         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1806         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1807
1808         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1809               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1810         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1811
1812         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1813         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1814         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1815
1816         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1817         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1818
1819         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1820         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1821
1822         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1823               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1824         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1825
1826         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1827 }
1828
1829 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1830 {
1831         u32 val;
1832
1833         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1834                 return;
1835
1836         tp->setlpicnt = 0;
1837
1838         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1839             current_link_up == 1 &&
1840             tp->link_config.active_duplex == DUPLEX_FULL &&
1841             (tp->link_config.active_speed == SPEED_100 ||
1842              tp->link_config.active_speed == SPEED_1000)) {
1843                 u32 eeectl;
1844
1845                 if (tp->link_config.active_speed == SPEED_1000)
1846                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1847                 else
1848                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1849
1850                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1851
1852                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1853                                   TG3_CL45_D7_EEERES_STAT, &val);
1854
1855                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1856                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1857                         tp->setlpicnt = 2;
1858         }
1859
1860         if (!tp->setlpicnt) {
1861                 if (current_link_up == 1 &&
1862                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1863                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1864                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1865                 }
1866
1867                 val = tr32(TG3_CPMU_EEE_MODE);
1868                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1869         }
1870 }
1871
1872 static void tg3_phy_eee_enable(struct tg3 *tp)
1873 {
1874         u32 val;
1875
1876         if (tp->link_config.active_speed == SPEED_1000 &&
1877             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1878              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1879              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1880             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1881                 val = MII_TG3_DSP_TAP26_ALNOKO |
1882                       MII_TG3_DSP_TAP26_RMRXSTO;
1883                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
1884                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1885         }
1886
1887         val = tr32(TG3_CPMU_EEE_MODE);
1888         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1889 }
1890
1891 static int tg3_wait_macro_done(struct tg3 *tp)
1892 {
1893         int limit = 100;
1894
1895         while (limit--) {
1896                 u32 tmp32;
1897
1898                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1899                         if ((tmp32 & 0x1000) == 0)
1900                                 break;
1901                 }
1902         }
1903         if (limit < 0)
1904                 return -EBUSY;
1905
1906         return 0;
1907 }
1908
1909 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1910 {
1911         static const u32 test_pat[4][6] = {
1912         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1913         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1914         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1915         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1916         };
1917         int chan;
1918
1919         for (chan = 0; chan < 4; chan++) {
1920                 int i;
1921
1922                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1923                              (chan * 0x2000) | 0x0200);
1924                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1925
1926                 for (i = 0; i < 6; i++)
1927                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1928                                      test_pat[chan][i]);
1929
1930                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1931                 if (tg3_wait_macro_done(tp)) {
1932                         *resetp = 1;
1933                         return -EBUSY;
1934                 }
1935
1936                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1937                              (chan * 0x2000) | 0x0200);
1938                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1939                 if (tg3_wait_macro_done(tp)) {
1940                         *resetp = 1;
1941                         return -EBUSY;
1942                 }
1943
1944                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1945                 if (tg3_wait_macro_done(tp)) {
1946                         *resetp = 1;
1947                         return -EBUSY;
1948                 }
1949
1950                 for (i = 0; i < 6; i += 2) {
1951                         u32 low, high;
1952
1953                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1954                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1955                             tg3_wait_macro_done(tp)) {
1956                                 *resetp = 1;
1957                                 return -EBUSY;
1958                         }
1959                         low &= 0x7fff;
1960                         high &= 0x000f;
1961                         if (low != test_pat[chan][i] ||
1962                             high != test_pat[chan][i+1]) {
1963                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1964                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1965                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1966
1967                                 return -EBUSY;
1968                         }
1969                 }
1970         }
1971
1972         return 0;
1973 }
1974
1975 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1976 {
1977         int chan;
1978
1979         for (chan = 0; chan < 4; chan++) {
1980                 int i;
1981
1982                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1983                              (chan * 0x2000) | 0x0200);
1984                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1985                 for (i = 0; i < 6; i++)
1986                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1987                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1988                 if (tg3_wait_macro_done(tp))
1989                         return -EBUSY;
1990         }
1991
1992         return 0;
1993 }
1994
1995 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1996 {
1997         u32 reg32, phy9_orig;
1998         int retries, do_phy_reset, err;
1999
2000         retries = 10;
2001         do_phy_reset = 1;
2002         do {
2003                 if (do_phy_reset) {
2004                         err = tg3_bmcr_reset(tp);
2005                         if (err)
2006                                 return err;
2007                         do_phy_reset = 0;
2008                 }
2009
2010                 /* Disable transmitter and interrupt.  */
2011                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2012                         continue;
2013
2014                 reg32 |= 0x3000;
2015                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2016
2017                 /* Set full-duplex, 1000 mbps.  */
2018                 tg3_writephy(tp, MII_BMCR,
2019                              BMCR_FULLDPLX | BMCR_SPEED1000);
2020
2021                 /* Set to master mode.  */
2022                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2023                         continue;
2024
2025                 tg3_writephy(tp, MII_CTRL1000,
2026                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2027
2028                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2029                 if (err)
2030                         return err;
2031
2032                 /* Block the PHY control access.  */
2033                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2034
2035                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2036                 if (!err)
2037                         break;
2038         } while (--retries);
2039
2040         err = tg3_phy_reset_chanpat(tp);
2041         if (err)
2042                 return err;
2043
2044         tg3_phydsp_write(tp, 0x8005, 0x0000);
2045
2046         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2047         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2048
2049         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2050
2051         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2052
2053         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2054                 reg32 &= ~0x3000;
2055                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2056         } else if (!err)
2057                 err = -EBUSY;
2058
2059         return err;
2060 }
2061
2062 /* This will reset the tigon3 PHY if there is no valid
2063  * link unless the FORCE argument is non-zero.
2064  */
2065 static int tg3_phy_reset(struct tg3 *tp)
2066 {
2067         u32 val, cpmuctrl;
2068         int err;
2069
2070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2071                 val = tr32(GRC_MISC_CFG);
2072                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2073                 udelay(40);
2074         }
2075         err  = tg3_readphy(tp, MII_BMSR, &val);
2076         err |= tg3_readphy(tp, MII_BMSR, &val);
2077         if (err != 0)
2078                 return -EBUSY;
2079
2080         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2081                 netif_carrier_off(tp->dev);
2082                 tg3_link_report(tp);
2083         }
2084
2085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2088                 err = tg3_phy_reset_5703_4_5(tp);
2089                 if (err)
2090                         return err;
2091                 goto out;
2092         }
2093
2094         cpmuctrl = 0;
2095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2096             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2097                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2098                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2099                         tw32(TG3_CPMU_CTRL,
2100                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2101         }
2102
2103         err = tg3_bmcr_reset(tp);
2104         if (err)
2105                 return err;
2106
2107         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2108                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2109                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2110
2111                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2112         }
2113
2114         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2115             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2116                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2117                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2118                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2119                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2120                         udelay(40);
2121                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2122                 }
2123         }
2124
2125         if (tg3_flag(tp, 5717_PLUS) &&
2126             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2127                 return 0;
2128
2129         tg3_phy_apply_otp(tp);
2130
2131         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2132                 tg3_phy_toggle_apd(tp, true);
2133         else
2134                 tg3_phy_toggle_apd(tp, false);
2135
2136 out:
2137         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2138             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2139                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2140                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2141                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2142         }
2143
2144         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2145                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2146                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2147         }
2148
2149         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2150                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2151                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2152                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2153                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2154                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2155                 }
2156         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2157                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2159                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2160                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2161                                 tg3_writephy(tp, MII_TG3_TEST1,
2162                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2163                         } else
2164                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2165
2166                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167                 }
2168         }
2169
2170         /* Set Extended packet length bit (bit 14) on all chips that */
2171         /* support jumbo frames */
2172         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2173                 /* Cannot do read-modify-write on 5401 */
2174                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2175         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2176                 /* Set bit 14 with read-modify-write to preserve other bits */
2177                 err = tg3_phy_auxctl_read(tp,
2178                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179                 if (!err)
2180                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2182         }
2183
2184         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2185          * jumbo frames transmission.
2186          */
2187         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2188                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2189                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2190                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2191         }
2192
2193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2194                 /* adjust output voltage */
2195                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2196         }
2197
2198         tg3_phy_toggle_automdix(tp, 1);
2199         tg3_phy_set_wirespeed(tp);
2200         return 0;
2201 }
2202
2203 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2204 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2205 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2206                                           TG3_GPIO_MSG_NEED_VAUX)
2207 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2208         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2209          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2210          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2211          (TG3_GPIO_MSG_DRVR_PRES << 12))
2212
2213 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2214         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2215          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2216          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2217          (TG3_GPIO_MSG_NEED_VAUX << 12))
2218
2219 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2220 {
2221         u32 status, shift;
2222
2223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2225                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2226         else
2227                 status = tr32(TG3_CPMU_DRV_STATUS);
2228
2229         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2230         status &= ~(TG3_GPIO_MSG_MASK << shift);
2231         status |= (newstat << shift);
2232
2233         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2235                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2236         else
2237                 tw32(TG3_CPMU_DRV_STATUS, status);
2238
2239         return status >> TG3_APE_GPIO_MSG_SHIFT;
2240 }
2241
2242 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2243 {
2244         if (!tg3_flag(tp, IS_NIC))
2245                 return 0;
2246
2247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2248             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2250                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2251                         return -EIO;
2252
2253                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2254
2255                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2256                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2257
2258                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2259         } else {
2260                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2261                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2262         }
2263
2264         return 0;
2265 }
2266
2267 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2268 {
2269         u32 grc_local_ctrl;
2270
2271         if (!tg3_flag(tp, IS_NIC) ||
2272             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2273             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2274                 return;
2275
2276         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2277
2278         tw32_wait_f(GRC_LOCAL_CTRL,
2279                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2280                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2281
2282         tw32_wait_f(GRC_LOCAL_CTRL,
2283                     grc_local_ctrl,
2284                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2285
2286         tw32_wait_f(GRC_LOCAL_CTRL,
2287                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2288                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2289 }
2290
2291 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2292 {
2293         if (!tg3_flag(tp, IS_NIC))
2294                 return;
2295
2296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2298                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2299                             (GRC_LCLCTRL_GPIO_OE0 |
2300                              GRC_LCLCTRL_GPIO_OE1 |
2301                              GRC_LCLCTRL_GPIO_OE2 |
2302                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2303                              GRC_LCLCTRL_GPIO_OUTPUT1),
2304                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2305         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2306                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2307                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2308                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2309                                      GRC_LCLCTRL_GPIO_OE1 |
2310                                      GRC_LCLCTRL_GPIO_OE2 |
2311                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2312                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2313                                      tp->grc_local_ctrl;
2314                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2315                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2316
2317                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2318                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2319                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2320
2321                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2322                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2323                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2324         } else {
2325                 u32 no_gpio2;
2326                 u32 grc_local_ctrl = 0;
2327
2328                 /* Workaround to prevent overdrawing Amps. */
2329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2330                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2331                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2332                                     grc_local_ctrl,
2333                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2334                 }
2335
2336                 /* On 5753 and variants, GPIO2 cannot be used. */
2337                 no_gpio2 = tp->nic_sram_data_cfg &
2338                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2339
2340                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2341                                   GRC_LCLCTRL_GPIO_OE1 |
2342                                   GRC_LCLCTRL_GPIO_OE2 |
2343                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2344                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2345                 if (no_gpio2) {
2346                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2347                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2348                 }
2349                 tw32_wait_f(GRC_LOCAL_CTRL,
2350                             tp->grc_local_ctrl | grc_local_ctrl,
2351                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2352
2353                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2354
2355                 tw32_wait_f(GRC_LOCAL_CTRL,
2356                             tp->grc_local_ctrl | grc_local_ctrl,
2357                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2358
2359                 if (!no_gpio2) {
2360                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2361                         tw32_wait_f(GRC_LOCAL_CTRL,
2362                                     tp->grc_local_ctrl | grc_local_ctrl,
2363                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2364                 }
2365         }
2366 }
2367
2368 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2369 {
2370         u32 msg = 0;
2371
2372         /* Serialize power state transitions */
2373         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2374                 return;
2375
2376         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2377                 msg = TG3_GPIO_MSG_NEED_VAUX;
2378
2379         msg = tg3_set_function_status(tp, msg);
2380
2381         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2382                 goto done;
2383
2384         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2385                 tg3_pwrsrc_switch_to_vaux(tp);
2386         else
2387                 tg3_pwrsrc_die_with_vmain(tp);
2388
2389 done:
2390         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2391 }
2392
2393 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2394 {
2395         bool need_vaux = false;
2396
2397         /* The GPIOs do something completely different on 57765. */
2398         if (!tg3_flag(tp, IS_NIC) ||
2399             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2400                 return;
2401
2402         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2405                 tg3_frob_aux_power_5717(tp, include_wol ?
2406                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2407                 return;
2408         }
2409
2410         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2411                 struct net_device *dev_peer;
2412
2413                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2414
2415                 /* remove_one() may have been run on the peer. */
2416                 if (dev_peer) {
2417                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2418
2419                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2420                                 return;
2421
2422                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2423                             tg3_flag(tp_peer, ENABLE_ASF))
2424                                 need_vaux = true;
2425                 }
2426         }
2427
2428         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2429             tg3_flag(tp, ENABLE_ASF))
2430                 need_vaux = true;
2431
2432         if (need_vaux)
2433                 tg3_pwrsrc_switch_to_vaux(tp);
2434         else
2435                 tg3_pwrsrc_die_with_vmain(tp);
2436 }
2437
2438 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2439 {
2440         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2441                 return 1;
2442         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2443                 if (speed != SPEED_10)
2444                         return 1;
2445         } else if (speed == SPEED_10)
2446                 return 1;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_setup_phy(struct tg3 *, int);
2452
2453 #define RESET_KIND_SHUTDOWN     0
2454 #define RESET_KIND_INIT         1
2455 #define RESET_KIND_SUSPEND      2
2456
2457 static void tg3_write_sig_post_reset(struct tg3 *, int);
2458 static int tg3_halt_cpu(struct tg3 *, u32);
2459
2460 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2461 {
2462         u32 val;
2463
2464         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2465                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2466                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2467                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2468
2469                         sg_dig_ctrl |=
2470                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2471                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2472                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2473                 }
2474                 return;
2475         }
2476
2477         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478                 tg3_bmcr_reset(tp);
2479                 val = tr32(GRC_MISC_CFG);
2480                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2481                 udelay(40);
2482                 return;
2483         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2484                 u32 phytest;
2485                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2486                         u32 phy;
2487
2488                         tg3_writephy(tp, MII_ADVERTISE, 0);
2489                         tg3_writephy(tp, MII_BMCR,
2490                                      BMCR_ANENABLE | BMCR_ANRESTART);
2491
2492                         tg3_writephy(tp, MII_TG3_FET_TEST,
2493                                      phytest | MII_TG3_FET_SHADOW_EN);
2494                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2495                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2496                                 tg3_writephy(tp,
2497                                              MII_TG3_FET_SHDW_AUXMODE4,
2498                                              phy);
2499                         }
2500                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2501                 }
2502                 return;
2503         } else if (do_low_power) {
2504                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2505                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2506
2507                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2508                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2509                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2510                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2511         }
2512
2513         /* The PHY should not be powered down on some chips because
2514          * of bugs.
2515          */
2516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2517             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2518             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2519              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2520                 return;
2521
2522         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2523             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2524                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2525                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2526                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2527                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2528         }
2529
2530         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2531 }
2532
2533 /* tp->lock is held. */
2534 static int tg3_nvram_lock(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, NVRAM)) {
2537                 int i;
2538
2539                 if (tp->nvram_lock_cnt == 0) {
2540                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2541                         for (i = 0; i < 8000; i++) {
2542                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2543                                         break;
2544                                 udelay(20);
2545                         }
2546                         if (i == 8000) {
2547                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2548                                 return -ENODEV;
2549                         }
2550                 }
2551                 tp->nvram_lock_cnt++;
2552         }
2553         return 0;
2554 }
2555
2556 /* tp->lock is held. */
2557 static void tg3_nvram_unlock(struct tg3 *tp)
2558 {
2559         if (tg3_flag(tp, NVRAM)) {
2560                 if (tp->nvram_lock_cnt > 0)
2561                         tp->nvram_lock_cnt--;
2562                 if (tp->nvram_lock_cnt == 0)
2563                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2564         }
2565 }
2566
2567 /* tp->lock is held. */
2568 static void tg3_enable_nvram_access(struct tg3 *tp)
2569 {
2570         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2571                 u32 nvaccess = tr32(NVRAM_ACCESS);
2572
2573                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2574         }
2575 }
2576
2577 /* tp->lock is held. */
2578 static void tg3_disable_nvram_access(struct tg3 *tp)
2579 {
2580         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2581                 u32 nvaccess = tr32(NVRAM_ACCESS);
2582
2583                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2584         }
2585 }
2586
2587 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2588                                         u32 offset, u32 *val)
2589 {
2590         u32 tmp;
2591         int i;
2592
2593         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2594                 return -EINVAL;
2595
2596         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2597                                         EEPROM_ADDR_DEVID_MASK |
2598                                         EEPROM_ADDR_READ);
2599         tw32(GRC_EEPROM_ADDR,
2600              tmp |
2601              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2602              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2603               EEPROM_ADDR_ADDR_MASK) |
2604              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2605
2606         for (i = 0; i < 1000; i++) {
2607                 tmp = tr32(GRC_EEPROM_ADDR);
2608
2609                 if (tmp & EEPROM_ADDR_COMPLETE)
2610                         break;
2611                 msleep(1);
2612         }
2613         if (!(tmp & EEPROM_ADDR_COMPLETE))
2614                 return -EBUSY;
2615
2616         tmp = tr32(GRC_EEPROM_DATA);
2617
2618         /*
2619          * The data will always be opposite the native endian
2620          * format.  Perform a blind byteswap to compensate.
2621          */
2622         *val = swab32(tmp);
2623
2624         return 0;
2625 }
2626
2627 #define NVRAM_CMD_TIMEOUT 10000
2628
2629 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2630 {
2631         int i;
2632
2633         tw32(NVRAM_CMD, nvram_cmd);
2634         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2635                 udelay(10);
2636                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2637                         udelay(10);
2638                         break;
2639                 }
2640         }
2641
2642         if (i == NVRAM_CMD_TIMEOUT)
2643                 return -EBUSY;
2644
2645         return 0;
2646 }
2647
2648 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2649 {
2650         if (tg3_flag(tp, NVRAM) &&
2651             tg3_flag(tp, NVRAM_BUFFERED) &&
2652             tg3_flag(tp, FLASH) &&
2653             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2654             (tp->nvram_jedecnum == JEDEC_ATMEL))
2655
2656                 addr = ((addr / tp->nvram_pagesize) <<
2657                         ATMEL_AT45DB0X1B_PAGE_POS) +
2658                        (addr % tp->nvram_pagesize);
2659
2660         return addr;
2661 }
2662
2663 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2664 {
2665         if (tg3_flag(tp, NVRAM) &&
2666             tg3_flag(tp, NVRAM_BUFFERED) &&
2667             tg3_flag(tp, FLASH) &&
2668             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2669             (tp->nvram_jedecnum == JEDEC_ATMEL))
2670
2671                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2672                         tp->nvram_pagesize) +
2673                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2674
2675         return addr;
2676 }
2677
2678 /* NOTE: Data read in from NVRAM is byteswapped according to
2679  * the byteswapping settings for all other register accesses.
2680  * tg3 devices are BE devices, so on a BE machine, the data
2681  * returned will be exactly as it is seen in NVRAM.  On a LE
2682  * machine, the 32-bit value will be byteswapped.
2683  */
2684 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2685 {
2686         int ret;
2687
2688         if (!tg3_flag(tp, NVRAM))
2689                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2690
2691         offset = tg3_nvram_phys_addr(tp, offset);
2692
2693         if (offset > NVRAM_ADDR_MSK)
2694                 return -EINVAL;
2695
2696         ret = tg3_nvram_lock(tp);
2697         if (ret)
2698                 return ret;
2699
2700         tg3_enable_nvram_access(tp);
2701
2702         tw32(NVRAM_ADDR, offset);
2703         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2704                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2705
2706         if (ret == 0)
2707                 *val = tr32(NVRAM_RDDATA);
2708
2709         tg3_disable_nvram_access(tp);
2710
2711         tg3_nvram_unlock(tp);
2712
2713         return ret;
2714 }
2715
2716 /* Ensures NVRAM data is in bytestream format. */
2717 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2718 {
2719         u32 v;
2720         int res = tg3_nvram_read(tp, offset, &v);
2721         if (!res)
2722                 *val = cpu_to_be32(v);
2723         return res;
2724 }
2725
2726 /* tp->lock is held. */
2727 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2728 {
2729         u32 addr_high, addr_low;
2730         int i;
2731
2732         addr_high = ((tp->dev->dev_addr[0] << 8) |
2733                      tp->dev->dev_addr[1]);
2734         addr_low = ((tp->dev->dev_addr[2] << 24) |
2735                     (tp->dev->dev_addr[3] << 16) |
2736                     (tp->dev->dev_addr[4] <<  8) |
2737                     (tp->dev->dev_addr[5] <<  0));
2738         for (i = 0; i < 4; i++) {
2739                 if (i == 1 && skip_mac_1)
2740                         continue;
2741                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2742                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2743         }
2744
2745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2746             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2747                 for (i = 0; i < 12; i++) {
2748                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2749                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2750                 }
2751         }
2752
2753         addr_high = (tp->dev->dev_addr[0] +
2754                      tp->dev->dev_addr[1] +
2755                      tp->dev->dev_addr[2] +
2756                      tp->dev->dev_addr[3] +
2757                      tp->dev->dev_addr[4] +
2758                      tp->dev->dev_addr[5]) &
2759                 TX_BACKOFF_SEED_MASK;
2760         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2761 }
2762
2763 static void tg3_enable_register_access(struct tg3 *tp)
2764 {
2765         /*
2766          * Make sure register accesses (indirect or otherwise) will function
2767          * correctly.
2768          */
2769         pci_write_config_dword(tp->pdev,
2770                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2771 }
2772
2773 static int tg3_power_up(struct tg3 *tp)
2774 {
2775         int err;
2776
2777         tg3_enable_register_access(tp);
2778
2779         err = pci_set_power_state(tp->pdev, PCI_D0);
2780         if (!err) {
2781                 /* Switch out of Vaux if it is a NIC */
2782                 tg3_pwrsrc_switch_to_vmain(tp);
2783         } else {
2784                 netdev_err(tp->dev, "Transition to D0 failed\n");
2785         }
2786
2787         return err;
2788 }
2789
2790 static int tg3_power_down_prepare(struct tg3 *tp)
2791 {
2792         u32 misc_host_ctrl;
2793         bool device_should_wake, do_low_power;
2794
2795         tg3_enable_register_access(tp);
2796
2797         /* Restore the CLKREQ setting. */
2798         if (tg3_flag(tp, CLKREQ_BUG)) {
2799                 u16 lnkctl;
2800
2801                 pci_read_config_word(tp->pdev,
2802                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2803                                      &lnkctl);
2804                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2805                 pci_write_config_word(tp->pdev,
2806                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2807                                       lnkctl);
2808         }
2809
2810         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2811         tw32(TG3PCI_MISC_HOST_CTRL,
2812              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2813
2814         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2815                              tg3_flag(tp, WOL_ENABLE);
2816
2817         if (tg3_flag(tp, USE_PHYLIB)) {
2818                 do_low_power = false;
2819                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2820                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2821                         struct phy_device *phydev;
2822                         u32 phyid, advertising;
2823
2824                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2825
2826                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2827
2828                         tp->link_config.orig_speed = phydev->speed;
2829                         tp->link_config.orig_duplex = phydev->duplex;
2830                         tp->link_config.orig_autoneg = phydev->autoneg;
2831                         tp->link_config.orig_advertising = phydev->advertising;
2832
2833                         advertising = ADVERTISED_TP |
2834                                       ADVERTISED_Pause |
2835                                       ADVERTISED_Autoneg |
2836                                       ADVERTISED_10baseT_Half;
2837
2838                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2839                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2840                                         advertising |=
2841                                                 ADVERTISED_100baseT_Half |
2842                                                 ADVERTISED_100baseT_Full |
2843                                                 ADVERTISED_10baseT_Full;
2844                                 else
2845                                         advertising |= ADVERTISED_10baseT_Full;
2846                         }
2847
2848                         phydev->advertising = advertising;
2849
2850                         phy_start_aneg(phydev);
2851
2852                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2853                         if (phyid != PHY_ID_BCMAC131) {
2854                                 phyid &= PHY_BCM_OUI_MASK;
2855                                 if (phyid == PHY_BCM_OUI_1 ||
2856                                     phyid == PHY_BCM_OUI_2 ||
2857                                     phyid == PHY_BCM_OUI_3)
2858                                         do_low_power = true;
2859                         }
2860                 }
2861         } else {
2862                 do_low_power = true;
2863
2864                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2865                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2866                         tp->link_config.orig_speed = tp->link_config.speed;
2867                         tp->link_config.orig_duplex = tp->link_config.duplex;
2868                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2869                 }
2870
2871                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2872                         tp->link_config.speed = SPEED_10;
2873                         tp->link_config.duplex = DUPLEX_HALF;
2874                         tp->link_config.autoneg = AUTONEG_ENABLE;
2875                         tg3_setup_phy(tp, 0);
2876                 }
2877         }
2878
2879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2880                 u32 val;
2881
2882                 val = tr32(GRC_VCPU_EXT_CTRL);
2883                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2884         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2885                 int i;
2886                 u32 val;
2887
2888                 for (i = 0; i < 200; i++) {
2889                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2890                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2891                                 break;
2892                         msleep(1);
2893                 }
2894         }
2895         if (tg3_flag(tp, WOL_CAP))
2896                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2897                                                      WOL_DRV_STATE_SHUTDOWN |
2898                                                      WOL_DRV_WOL |
2899                                                      WOL_SET_MAGIC_PKT);
2900
2901         if (device_should_wake) {
2902                 u32 mac_mode;
2903
2904                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2905                         if (do_low_power &&
2906                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2907                                 tg3_phy_auxctl_write(tp,
2908                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2909                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2910                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2911                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2912                                 udelay(40);
2913                         }
2914
2915                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2916                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2917                         else
2918                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2919
2920                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2921                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2922                             ASIC_REV_5700) {
2923                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2924                                              SPEED_100 : SPEED_10;
2925                                 if (tg3_5700_link_polarity(tp, speed))
2926                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2927                                 else
2928                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2929                         }
2930                 } else {
2931                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2932                 }
2933
2934                 if (!tg3_flag(tp, 5750_PLUS))
2935                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2936
2937                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2938                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2939                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2940                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2941
2942                 if (tg3_flag(tp, ENABLE_APE))
2943                         mac_mode |= MAC_MODE_APE_TX_EN |
2944                                     MAC_MODE_APE_RX_EN |
2945                                     MAC_MODE_TDE_ENABLE;
2946
2947                 tw32_f(MAC_MODE, mac_mode);
2948                 udelay(100);
2949
2950                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2951                 udelay(10);
2952         }
2953
2954         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2955             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2956              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2957                 u32 base_val;
2958
2959                 base_val = tp->pci_clock_ctrl;
2960                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2961                              CLOCK_CTRL_TXCLK_DISABLE);
2962
2963                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2964                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2965         } else if (tg3_flag(tp, 5780_CLASS) ||
2966                    tg3_flag(tp, CPMU_PRESENT) ||
2967                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2968                 /* do nothing */
2969         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2970                 u32 newbits1, newbits2;
2971
2972                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2973                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2974                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2975                                     CLOCK_CTRL_TXCLK_DISABLE |
2976                                     CLOCK_CTRL_ALTCLK);
2977                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2978                 } else if (tg3_flag(tp, 5705_PLUS)) {
2979                         newbits1 = CLOCK_CTRL_625_CORE;
2980                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2981                 } else {
2982                         newbits1 = CLOCK_CTRL_ALTCLK;
2983                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2984                 }
2985
2986                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2987                             40);
2988
2989                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2990                             40);
2991
2992                 if (!tg3_flag(tp, 5705_PLUS)) {
2993                         u32 newbits3;
2994
2995                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2996                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2997                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2998                                             CLOCK_CTRL_TXCLK_DISABLE |
2999                                             CLOCK_CTRL_44MHZ_CORE);
3000                         } else {
3001                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3002                         }
3003
3004                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3005                                     tp->pci_clock_ctrl | newbits3, 40);
3006                 }
3007         }
3008
3009         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3010                 tg3_power_down_phy(tp, do_low_power);
3011
3012         tg3_frob_aux_power(tp, true);
3013
3014         /* Workaround for unstable PLL clock */
3015         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3016             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3017                 u32 val = tr32(0x7d00);
3018
3019                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3020                 tw32(0x7d00, val);
3021                 if (!tg3_flag(tp, ENABLE_ASF)) {
3022                         int err;
3023
3024                         err = tg3_nvram_lock(tp);
3025                         tg3_halt_cpu(tp, RX_CPU_BASE);
3026                         if (!err)
3027                                 tg3_nvram_unlock(tp);
3028                 }
3029         }
3030
3031         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3032
3033         return 0;
3034 }
3035
3036 static void tg3_power_down(struct tg3 *tp)
3037 {
3038         tg3_power_down_prepare(tp);
3039
3040         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3041         pci_set_power_state(tp->pdev, PCI_D3hot);
3042 }
3043
3044 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3045 {
3046         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3047         case MII_TG3_AUX_STAT_10HALF:
3048                 *speed = SPEED_10;
3049                 *duplex = DUPLEX_HALF;
3050                 break;
3051
3052         case MII_TG3_AUX_STAT_10FULL:
3053                 *speed = SPEED_10;
3054                 *duplex = DUPLEX_FULL;
3055                 break;
3056
3057         case MII_TG3_AUX_STAT_100HALF:
3058                 *speed = SPEED_100;
3059                 *duplex = DUPLEX_HALF;
3060                 break;
3061
3062         case MII_TG3_AUX_STAT_100FULL:
3063                 *speed = SPEED_100;
3064                 *duplex = DUPLEX_FULL;
3065                 break;
3066
3067         case MII_TG3_AUX_STAT_1000HALF:
3068                 *speed = SPEED_1000;
3069                 *duplex = DUPLEX_HALF;
3070                 break;
3071
3072         case MII_TG3_AUX_STAT_1000FULL:
3073                 *speed = SPEED_1000;
3074                 *duplex = DUPLEX_FULL;
3075                 break;
3076
3077         default:
3078                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3079                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3080                                  SPEED_10;
3081                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3082                                   DUPLEX_HALF;
3083                         break;
3084                 }
3085                 *speed = SPEED_INVALID;
3086                 *duplex = DUPLEX_INVALID;
3087                 break;
3088         }
3089 }
3090
3091 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3092 {
3093         int err = 0;
3094         u32 val, new_adv;
3095
3096         new_adv = ADVERTISE_CSMA;
3097         if (advertise & ADVERTISED_10baseT_Half)
3098                 new_adv |= ADVERTISE_10HALF;
3099         if (advertise & ADVERTISED_10baseT_Full)
3100                 new_adv |= ADVERTISE_10FULL;
3101         if (advertise & ADVERTISED_100baseT_Half)
3102                 new_adv |= ADVERTISE_100HALF;
3103         if (advertise & ADVERTISED_100baseT_Full)
3104                 new_adv |= ADVERTISE_100FULL;
3105
3106         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3107
3108         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3109         if (err)
3110                 goto done;
3111
3112         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3113                 goto done;
3114
3115         new_adv = 0;
3116         if (advertise & ADVERTISED_1000baseT_Half)
3117                 new_adv |= ADVERTISE_1000HALF;
3118         if (advertise & ADVERTISED_1000baseT_Full)
3119                 new_adv |= ADVERTISE_1000FULL;
3120
3121         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3122             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3123                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3124
3125         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3126         if (err)
3127                 goto done;
3128
3129         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3130                 goto done;
3131
3132         tw32(TG3_CPMU_EEE_MODE,
3133              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3134
3135         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3136         if (!err) {
3137                 u32 err2;
3138
3139                 val = 0;
3140                 /* Advertise 100-BaseTX EEE ability */
3141                 if (advertise & ADVERTISED_100baseT_Full)
3142                         val |= MDIO_AN_EEE_ADV_100TX;
3143                 /* Advertise 1000-BaseT EEE ability */
3144                 if (advertise & ADVERTISED_1000baseT_Full)
3145                         val |= MDIO_AN_EEE_ADV_1000T;
3146                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3147                 if (err)
3148                         val = 0;
3149
3150                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3151                 case ASIC_REV_5717:
3152                 case ASIC_REV_57765:
3153                 case ASIC_REV_5719:
3154                         /* If we advertised any eee advertisements above... */
3155                         if (val)
3156                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3157                                       MII_TG3_DSP_TAP26_RMRXSTO |
3158                                       MII_TG3_DSP_TAP26_OPCSINPT;
3159                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3160                         /* Fall through */
3161                 case ASIC_REV_5720:
3162                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3163                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3164                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3165                 }
3166
3167                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3168                 if (!err)
3169                         err = err2;
3170         }
3171
3172 done:
3173         return err;
3174 }
3175
3176 static void tg3_phy_copper_begin(struct tg3 *tp)
3177 {
3178         u32 new_adv;
3179         int i;
3180
3181         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3182                 new_adv = ADVERTISED_10baseT_Half |
3183                           ADVERTISED_10baseT_Full;
3184                 if (tg3_flag(tp, WOL_SPEED_100MB))
3185                         new_adv |= ADVERTISED_100baseT_Half |
3186                                    ADVERTISED_100baseT_Full;
3187
3188                 tg3_phy_autoneg_cfg(tp, new_adv,
3189                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3190         } else if (tp->link_config.speed == SPEED_INVALID) {
3191                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3192                         tp->link_config.advertising &=
3193                                 ~(ADVERTISED_1000baseT_Half |
3194                                   ADVERTISED_1000baseT_Full);
3195
3196                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3197                                     tp->link_config.flowctrl);
3198         } else {
3199                 /* Asking for a specific link mode. */
3200                 if (tp->link_config.speed == SPEED_1000) {
3201                         if (tp->link_config.duplex == DUPLEX_FULL)
3202                                 new_adv = ADVERTISED_1000baseT_Full;
3203                         else
3204                                 new_adv = ADVERTISED_1000baseT_Half;
3205                 } else if (tp->link_config.speed == SPEED_100) {
3206                         if (tp->link_config.duplex == DUPLEX_FULL)
3207                                 new_adv = ADVERTISED_100baseT_Full;
3208                         else
3209                                 new_adv = ADVERTISED_100baseT_Half;
3210                 } else {
3211                         if (tp->link_config.duplex == DUPLEX_FULL)
3212                                 new_adv = ADVERTISED_10baseT_Full;
3213                         else
3214                                 new_adv = ADVERTISED_10baseT_Half;
3215                 }
3216
3217                 tg3_phy_autoneg_cfg(tp, new_adv,
3218                                     tp->link_config.flowctrl);
3219         }
3220
3221         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3222             tp->link_config.speed != SPEED_INVALID) {
3223                 u32 bmcr, orig_bmcr;
3224
3225                 tp->link_config.active_speed = tp->link_config.speed;
3226                 tp->link_config.active_duplex = tp->link_config.duplex;
3227
3228                 bmcr = 0;
3229                 switch (tp->link_config.speed) {
3230                 default:
3231                 case SPEED_10:
3232                         break;
3233
3234                 case SPEED_100:
3235                         bmcr |= BMCR_SPEED100;
3236                         break;
3237
3238                 case SPEED_1000:
3239                         bmcr |= BMCR_SPEED1000;
3240                         break;
3241                 }
3242
3243                 if (tp->link_config.duplex == DUPLEX_FULL)
3244                         bmcr |= BMCR_FULLDPLX;
3245
3246                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3247                     (bmcr != orig_bmcr)) {
3248                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3249                         for (i = 0; i < 1500; i++) {
3250                                 u32 tmp;
3251
3252                                 udelay(10);
3253                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3254                                     tg3_readphy(tp, MII_BMSR, &tmp))
3255                                         continue;
3256                                 if (!(tmp & BMSR_LSTATUS)) {
3257                                         udelay(40);
3258                                         break;
3259                                 }
3260                         }
3261                         tg3_writephy(tp, MII_BMCR, bmcr);
3262                         udelay(40);
3263                 }
3264         } else {
3265                 tg3_writephy(tp, MII_BMCR,
3266                              BMCR_ANENABLE | BMCR_ANRESTART);
3267         }
3268 }
3269
3270 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3271 {
3272         int err;
3273
3274         /* Turn off tap power management. */
3275         /* Set Extended packet length bit */
3276         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3277
3278         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3279         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3280         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3281         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3282         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3283
3284         udelay(40);
3285
3286         return err;
3287 }
3288
3289 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3290 {
3291         u32 adv_reg, all_mask = 0;
3292
3293         if (mask & ADVERTISED_10baseT_Half)
3294                 all_mask |= ADVERTISE_10HALF;
3295         if (mask & ADVERTISED_10baseT_Full)
3296                 all_mask |= ADVERTISE_10FULL;
3297         if (mask & ADVERTISED_100baseT_Half)
3298                 all_mask |= ADVERTISE_100HALF;
3299         if (mask & ADVERTISED_100baseT_Full)
3300                 all_mask |= ADVERTISE_100FULL;
3301
3302         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3303                 return 0;
3304
3305         if ((adv_reg & all_mask) != all_mask)
3306                 return 0;
3307         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3308                 u32 tg3_ctrl;
3309
3310                 all_mask = 0;
3311                 if (mask & ADVERTISED_1000baseT_Half)
3312                         all_mask |= ADVERTISE_1000HALF;
3313                 if (mask & ADVERTISED_1000baseT_Full)
3314                         all_mask |= ADVERTISE_1000FULL;
3315
3316                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3317                         return 0;
3318
3319                 if ((tg3_ctrl & all_mask) != all_mask)
3320                         return 0;
3321         }
3322         return 1;
3323 }
3324
3325 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3326 {
3327         u32 curadv, reqadv;
3328
3329         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3330                 return 1;
3331
3332         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3333         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3334
3335         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3336                 if (curadv != reqadv)
3337                         return 0;
3338
3339                 if (tg3_flag(tp, PAUSE_AUTONEG))
3340                         tg3_readphy(tp, MII_LPA, rmtadv);
3341         } else {
3342                 /* Reprogram the advertisement register, even if it
3343                  * does not affect the current link.  If the link
3344                  * gets renegotiated in the future, we can save an
3345                  * additional renegotiation cycle by advertising
3346                  * it correctly in the first place.
3347                  */
3348                 if (curadv != reqadv) {
3349                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3350                                      ADVERTISE_PAUSE_ASYM);
3351                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3352                 }
3353         }
3354
3355         return 1;
3356 }
3357
3358 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3359 {
3360         int current_link_up;
3361         u32 bmsr, val;
3362         u32 lcl_adv, rmt_adv;
3363         u16 current_speed;
3364         u8 current_duplex;
3365         int i, err;
3366
3367         tw32(MAC_EVENT, 0);
3368
3369         tw32_f(MAC_STATUS,
3370              (MAC_STATUS_SYNC_CHANGED |
3371               MAC_STATUS_CFG_CHANGED |
3372               MAC_STATUS_MI_COMPLETION |
3373               MAC_STATUS_LNKSTATE_CHANGED));
3374         udelay(40);
3375
3376         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3377                 tw32_f(MAC_MI_MODE,
3378                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3379                 udelay(80);
3380         }
3381
3382         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3383
3384         /* Some third-party PHYs need to be reset on link going
3385          * down.
3386          */
3387         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3388              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3389              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3390             netif_carrier_ok(tp->dev)) {
3391                 tg3_readphy(tp, MII_BMSR, &bmsr);
3392                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3393                     !(bmsr & BMSR_LSTATUS))
3394                         force_reset = 1;
3395         }
3396         if (force_reset)
3397                 tg3_phy_reset(tp);
3398
3399         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3400                 tg3_readphy(tp, MII_BMSR, &bmsr);
3401                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3402                     !tg3_flag(tp, INIT_COMPLETE))
3403                         bmsr = 0;
3404
3405                 if (!(bmsr & BMSR_LSTATUS)) {
3406                         err = tg3_init_5401phy_dsp(tp);
3407                         if (err)
3408                                 return err;
3409
3410                         tg3_readphy(tp, MII_BMSR, &bmsr);
3411                         for (i = 0; i < 1000; i++) {
3412                                 udelay(10);
3413                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3414                                     (bmsr & BMSR_LSTATUS)) {
3415                                         udelay(40);
3416                                         break;
3417                                 }
3418                         }
3419
3420                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3421                             TG3_PHY_REV_BCM5401_B0 &&
3422                             !(bmsr & BMSR_LSTATUS) &&
3423                             tp->link_config.active_speed == SPEED_1000) {
3424                                 err = tg3_phy_reset(tp);
3425                                 if (!err)
3426                                         err = tg3_init_5401phy_dsp(tp);
3427                                 if (err)
3428                                         return err;
3429                         }
3430                 }
3431         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3432                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3433                 /* 5701 {A0,B0} CRC bug workaround */
3434                 tg3_writephy(tp, 0x15, 0x0a75);
3435                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3436                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3437                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3438         }
3439
3440         /* Clear pending interrupts... */
3441         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3442         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3443
3444         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3445                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3446         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3447                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3448
3449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3452                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3453                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3454                 else
3455                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3456         }
3457
3458         current_link_up = 0;
3459         current_speed = SPEED_INVALID;
3460         current_duplex = DUPLEX_INVALID;
3461
3462         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3463                 err = tg3_phy_auxctl_read(tp,
3464                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3465                                           &val);
3466                 if (!err && !(val & (1 << 10))) {
3467                         tg3_phy_auxctl_write(tp,
3468                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3469                                              val | (1 << 10));
3470                         goto relink;
3471                 }
3472         }
3473
3474         bmsr = 0;
3475         for (i = 0; i < 100; i++) {
3476                 tg3_readphy(tp, MII_BMSR, &bmsr);
3477                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3478                     (bmsr & BMSR_LSTATUS))
3479                         break;
3480                 udelay(40);
3481         }
3482
3483         if (bmsr & BMSR_LSTATUS) {
3484                 u32 aux_stat, bmcr;
3485
3486                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3487                 for (i = 0; i < 2000; i++) {
3488                         udelay(10);
3489                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3490                             aux_stat)
3491                                 break;
3492                 }
3493
3494                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3495                                              &current_speed,
3496                                              &current_duplex);
3497
3498                 bmcr = 0;
3499                 for (i = 0; i < 200; i++) {
3500                         tg3_readphy(tp, MII_BMCR, &bmcr);
3501                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3502                                 continue;
3503                         if (bmcr && bmcr != 0x7fff)
3504                                 break;
3505                         udelay(10);
3506                 }
3507
3508                 lcl_adv = 0;
3509                 rmt_adv = 0;
3510
3511                 tp->link_config.active_speed = current_speed;
3512                 tp->link_config.active_duplex = current_duplex;
3513
3514                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3515                         if ((bmcr & BMCR_ANENABLE) &&
3516                             tg3_copper_is_advertising_all(tp,
3517                                                 tp->link_config.advertising)) {
3518                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3519                                                                   &rmt_adv))
3520                                         current_link_up = 1;
3521                         }
3522                 } else {
3523                         if (!(bmcr & BMCR_ANENABLE) &&
3524                             tp->link_config.speed == current_speed &&
3525                             tp->link_config.duplex == current_duplex &&
3526                             tp->link_config.flowctrl ==
3527                             tp->link_config.active_flowctrl) {
3528                                 current_link_up = 1;
3529                         }
3530                 }
3531
3532                 if (current_link_up == 1 &&
3533                     tp->link_config.active_duplex == DUPLEX_FULL)
3534                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3535         }
3536
3537 relink:
3538         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3539                 tg3_phy_copper_begin(tp);
3540
3541                 tg3_readphy(tp, MII_BMSR, &bmsr);
3542                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3543                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3544                         current_link_up = 1;
3545         }
3546
3547         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3548         if (current_link_up == 1) {
3549                 if (tp->link_config.active_speed == SPEED_100 ||
3550                     tp->link_config.active_speed == SPEED_10)
3551                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3552                 else
3553                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3554         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3555                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3556         else
3557                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3558
3559         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3560         if (tp->link_config.active_duplex == DUPLEX_HALF)
3561                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3562
3563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3564                 if (current_link_up == 1 &&
3565                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3566                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3567                 else
3568                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3569         }
3570
3571         /* ??? Without this setting Netgear GA302T PHY does not
3572          * ??? send/receive packets...
3573          */
3574         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3575             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3576                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3577                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3578                 udelay(80);
3579         }
3580
3581         tw32_f(MAC_MODE, tp->mac_mode);
3582         udelay(40);
3583
3584         tg3_phy_eee_adjust(tp, current_link_up);
3585
3586         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3587                 /* Polled via timer. */
3588                 tw32_f(MAC_EVENT, 0);
3589         } else {
3590                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3591         }
3592         udelay(40);
3593
3594         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3595             current_link_up == 1 &&
3596             tp->link_config.active_speed == SPEED_1000 &&
3597             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3598                 udelay(120);
3599                 tw32_f(MAC_STATUS,
3600                      (MAC_STATUS_SYNC_CHANGED |
3601                       MAC_STATUS_CFG_CHANGED));
3602                 udelay(40);
3603                 tg3_write_mem(tp,
3604                               NIC_SRAM_FIRMWARE_MBOX,
3605                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3606         }
3607
3608         /* Prevent send BD corruption. */
3609         if (tg3_flag(tp, CLKREQ_BUG)) {
3610                 u16 oldlnkctl, newlnkctl;
3611
3612                 pci_read_config_word(tp->pdev,
3613                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3614                                      &oldlnkctl);
3615                 if (tp->link_config.active_speed == SPEED_100 ||
3616                     tp->link_config.active_speed == SPEED_10)
3617                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3618                 else
3619                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3620                 if (newlnkctl != oldlnkctl)
3621                         pci_write_config_word(tp->pdev,
3622                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3623                                               newlnkctl);
3624         }
3625
3626         if (current_link_up != netif_carrier_ok(tp->dev)) {
3627                 if (current_link_up)
3628                         netif_carrier_on(tp->dev);
3629                 else
3630                         netif_carrier_off(tp->dev);
3631                 tg3_link_report(tp);
3632         }
3633
3634         return 0;
3635 }
3636
3637 struct tg3_fiber_aneginfo {
3638         int state;
3639 #define ANEG_STATE_UNKNOWN              0
3640 #define ANEG_STATE_AN_ENABLE            1
3641 #define ANEG_STATE_RESTART_INIT         2
3642 #define ANEG_STATE_RESTART              3
3643 #define ANEG_STATE_DISABLE_LINK_OK      4
3644 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3645 #define ANEG_STATE_ABILITY_DETECT       6
3646 #define ANEG_STATE_ACK_DETECT_INIT      7
3647 #define ANEG_STATE_ACK_DETECT           8
3648 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3649 #define ANEG_STATE_COMPLETE_ACK         10
3650 #define ANEG_STATE_IDLE_DETECT_INIT     11
3651 #define ANEG_STATE_IDLE_DETECT          12
3652 #define ANEG_STATE_LINK_OK              13
3653 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3654 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3655
3656         u32 flags;
3657 #define MR_AN_ENABLE            0x00000001
3658 #define MR_RESTART_AN           0x00000002
3659 #define MR_AN_COMPLETE          0x00000004
3660 #define MR_PAGE_RX              0x00000008
3661 #define MR_NP_LOADED            0x00000010
3662 #define MR_TOGGLE_TX            0x00000020
3663 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3664 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3665 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3666 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3667 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3668 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3669 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3670 #define MR_TOGGLE_RX            0x00002000
3671 #define MR_NP_RX                0x00004000
3672
3673 #define MR_LINK_OK              0x80000000
3674
3675         unsigned long link_time, cur_time;
3676
3677         u32 ability_match_cfg;
3678         int ability_match_count;
3679
3680         char ability_match, idle_match, ack_match;
3681
3682         u32 txconfig, rxconfig;
3683 #define ANEG_CFG_NP             0x00000080
3684 #define ANEG_CFG_ACK            0x00000040
3685 #define ANEG_CFG_RF2            0x00000020
3686 #define ANEG_CFG_RF1            0x00000010
3687 #define ANEG_CFG_PS2            0x00000001
3688 #define ANEG_CFG_PS1            0x00008000
3689 #define ANEG_CFG_HD             0x00004000
3690 #define ANEG_CFG_FD             0x00002000
3691 #define ANEG_CFG_INVAL          0x00001f06
3692
3693 };
3694 #define ANEG_OK         0
3695 #define ANEG_DONE       1
3696 #define ANEG_TIMER_ENAB 2
3697 #define ANEG_FAILED     -1
3698
3699 #define ANEG_STATE_SETTLE_TIME  10000
3700
3701 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3702                                    struct tg3_fiber_aneginfo *ap)
3703 {
3704         u16 flowctrl;
3705         unsigned long delta;
3706         u32 rx_cfg_reg;
3707         int ret;
3708
3709         if (ap->state == ANEG_STATE_UNKNOWN) {
3710                 ap->rxconfig = 0;
3711                 ap->link_time = 0;
3712                 ap->cur_time = 0;
3713                 ap->ability_match_cfg = 0;
3714                 ap->ability_match_count = 0;
3715                 ap->ability_match = 0;
3716                 ap->idle_match = 0;
3717                 ap->ack_match = 0;
3718         }
3719         ap->cur_time++;
3720
3721         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3722                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3723
3724                 if (rx_cfg_reg != ap->ability_match_cfg) {
3725                         ap->ability_match_cfg = rx_cfg_reg;
3726                         ap->ability_match = 0;
3727                         ap->ability_match_count = 0;
3728                 } else {
3729                         if (++ap->ability_match_count > 1) {
3730                                 ap->ability_match = 1;
3731                                 ap->ability_match_cfg = rx_cfg_reg;
3732                         }
3733                 }
3734                 if (rx_cfg_reg & ANEG_CFG_ACK)
3735                         ap->ack_match = 1;
3736                 else
3737                         ap->ack_match = 0;
3738
3739                 ap->idle_match = 0;
3740         } else {
3741                 ap->idle_match = 1;
3742                 ap->ability_match_cfg = 0;
3743                 ap->ability_match_count = 0;
3744                 ap->ability_match = 0;
3745                 ap->ack_match = 0;
3746
3747                 rx_cfg_reg = 0;
3748         }
3749
3750         ap->rxconfig = rx_cfg_reg;
3751         ret = ANEG_OK;
3752
3753         switch (ap->state) {
3754         case ANEG_STATE_UNKNOWN:
3755                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3756                         ap->state = ANEG_STATE_AN_ENABLE;
3757
3758                 /* fallthru */
3759         case ANEG_STATE_AN_ENABLE:
3760                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3761                 if (ap->flags & MR_AN_ENABLE) {
3762                         ap->link_time = 0;
3763                         ap->cur_time = 0;
3764                         ap->ability_match_cfg = 0;
3765                         ap->ability_match_count = 0;
3766                         ap->ability_match = 0;
3767                         ap->idle_match = 0;
3768                         ap->ack_match = 0;
3769
3770                         ap->state = ANEG_STATE_RESTART_INIT;
3771                 } else {
3772                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3773                 }
3774                 break;
3775
3776         case ANEG_STATE_RESTART_INIT:
3777                 ap->link_time = ap->cur_time;
3778                 ap->flags &= ~(MR_NP_LOADED);
3779                 ap->txconfig = 0;
3780                 tw32(MAC_TX_AUTO_NEG, 0);
3781                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3782                 tw32_f(MAC_MODE, tp->mac_mode);
3783                 udelay(40);
3784
3785                 ret = ANEG_TIMER_ENAB;
3786                 ap->state = ANEG_STATE_RESTART;
3787
3788                 /* fallthru */
3789         case ANEG_STATE_RESTART:
3790                 delta = ap->cur_time - ap->link_time;
3791                 if (delta > ANEG_STATE_SETTLE_TIME)
3792                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3793                 else
3794                         ret = ANEG_TIMER_ENAB;
3795                 break;
3796
3797         case ANEG_STATE_DISABLE_LINK_OK:
3798                 ret = ANEG_DONE;
3799                 break;
3800
3801         case ANEG_STATE_ABILITY_DETECT_INIT:
3802                 ap->flags &= ~(MR_TOGGLE_TX);
3803                 ap->txconfig = ANEG_CFG_FD;
3804                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3805                 if (flowctrl & ADVERTISE_1000XPAUSE)
3806                         ap->txconfig |= ANEG_CFG_PS1;
3807                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3808                         ap->txconfig |= ANEG_CFG_PS2;
3809                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3810                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3811                 tw32_f(MAC_MODE, tp->mac_mode);
3812                 udelay(40);
3813
3814                 ap->state = ANEG_STATE_ABILITY_DETECT;
3815                 break;
3816
3817         case ANEG_STATE_ABILITY_DETECT:
3818                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3819                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3820                 break;
3821
3822         case ANEG_STATE_ACK_DETECT_INIT:
3823                 ap->txconfig |= ANEG_CFG_ACK;
3824                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3825                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3826                 tw32_f(MAC_MODE, tp->mac_mode);
3827                 udelay(40);
3828
3829                 ap->state = ANEG_STATE_ACK_DETECT;
3830
3831                 /* fallthru */
3832         case ANEG_STATE_ACK_DETECT:
3833                 if (ap->ack_match != 0) {
3834                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3835                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3836                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3837                         } else {
3838                                 ap->state = ANEG_STATE_AN_ENABLE;
3839                         }
3840                 } else if (ap->ability_match != 0 &&
3841                            ap->rxconfig == 0) {
3842                         ap->state = ANEG_STATE_AN_ENABLE;
3843                 }
3844                 break;
3845
3846         case ANEG_STATE_COMPLETE_ACK_INIT:
3847                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3848                         ret = ANEG_FAILED;
3849                         break;
3850                 }
3851                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3852                                MR_LP_ADV_HALF_DUPLEX |
3853                                MR_LP_ADV_SYM_PAUSE |
3854                                MR_LP_ADV_ASYM_PAUSE |
3855                                MR_LP_ADV_REMOTE_FAULT1 |
3856                                MR_LP_ADV_REMOTE_FAULT2 |
3857                                MR_LP_ADV_NEXT_PAGE |
3858                                MR_TOGGLE_RX |
3859                                MR_NP_RX);
3860                 if (ap->rxconfig & ANEG_CFG_FD)
3861                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3862                 if (ap->rxconfig & ANEG_CFG_HD)
3863                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3864                 if (ap->rxconfig & ANEG_CFG_PS1)
3865                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3866                 if (ap->rxconfig & ANEG_CFG_PS2)
3867                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3868                 if (ap->rxconfig & ANEG_CFG_RF1)
3869                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3870                 if (ap->rxconfig & ANEG_CFG_RF2)
3871                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3872                 if (ap->rxconfig & ANEG_CFG_NP)
3873                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3874
3875                 ap->link_time = ap->cur_time;
3876
3877                 ap->flags ^= (MR_TOGGLE_TX);
3878                 if (ap->rxconfig & 0x0008)
3879                         ap->flags |= MR_TOGGLE_RX;
3880                 if (ap->rxconfig & ANEG_CFG_NP)
3881                         ap->flags |= MR_NP_RX;
3882                 ap->flags |= MR_PAGE_RX;
3883
3884                 ap->state = ANEG_STATE_COMPLETE_ACK;
3885                 ret = ANEG_TIMER_ENAB;
3886                 break;
3887
3888         case ANEG_STATE_COMPLETE_ACK:
3889                 if (ap->ability_match != 0 &&
3890                     ap->rxconfig == 0) {
3891                         ap->state = ANEG_STATE_AN_ENABLE;
3892                         break;
3893                 }
3894                 delta = ap->cur_time - ap->link_time;
3895                 if (delta > ANEG_STATE_SETTLE_TIME) {
3896                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3897                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3898                         } else {
3899                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3900                                     !(ap->flags & MR_NP_RX)) {
3901                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3902                                 } else {
3903                                         ret = ANEG_FAILED;
3904                                 }
3905                         }
3906                 }
3907                 break;
3908
3909         case ANEG_STATE_IDLE_DETECT_INIT:
3910                 ap->link_time = ap->cur_time;
3911                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3912                 tw32_f(MAC_MODE, tp->mac_mode);
3913                 udelay(40);
3914
3915                 ap->state = ANEG_STATE_IDLE_DETECT;
3916                 ret = ANEG_TIMER_ENAB;
3917                 break;
3918
3919         case ANEG_STATE_IDLE_DETECT:
3920                 if (ap->ability_match != 0 &&
3921                     ap->rxconfig == 0) {
3922                         ap->state = ANEG_STATE_AN_ENABLE;
3923                         break;
3924                 }
3925                 delta = ap->cur_time - ap->link_time;
3926                 if (delta > ANEG_STATE_SETTLE_TIME) {
3927                         /* XXX another gem from the Broadcom driver :( */
3928                         ap->state = ANEG_STATE_LINK_OK;
3929                 }
3930                 break;
3931
3932         case ANEG_STATE_LINK_OK:
3933                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3934                 ret = ANEG_DONE;
3935                 break;
3936
3937         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3938                 /* ??? unimplemented */
3939                 break;
3940
3941         case ANEG_STATE_NEXT_PAGE_WAIT:
3942                 /* ??? unimplemented */
3943                 break;
3944
3945         default:
3946                 ret = ANEG_FAILED;
3947                 break;
3948         }
3949
3950         return ret;
3951 }
3952
3953 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3954 {
3955         int res = 0;
3956         struct tg3_fiber_aneginfo aninfo;
3957         int status = ANEG_FAILED;
3958         unsigned int tick;
3959         u32 tmp;
3960
3961         tw32_f(MAC_TX_AUTO_NEG, 0);
3962
3963         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3964         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3965         udelay(40);
3966
3967         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3968         udelay(40);
3969
3970         memset(&aninfo, 0, sizeof(aninfo));
3971         aninfo.flags |= MR_AN_ENABLE;
3972         aninfo.state = ANEG_STATE_UNKNOWN;
3973         aninfo.cur_time = 0;
3974         tick = 0;
3975         while (++tick < 195000) {
3976                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3977                 if (status == ANEG_DONE || status == ANEG_FAILED)
3978                         break;
3979
3980                 udelay(1);
3981         }
3982
3983         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3984         tw32_f(MAC_MODE, tp->mac_mode);
3985         udelay(40);
3986
3987         *txflags = aninfo.txconfig;
3988         *rxflags = aninfo.flags;
3989
3990         if (status == ANEG_DONE &&
3991             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3992                              MR_LP_ADV_FULL_DUPLEX)))
3993                 res = 1;
3994
3995         return res;
3996 }
3997
3998 static void tg3_init_bcm8002(struct tg3 *tp)
3999 {
4000         u32 mac_status = tr32(MAC_STATUS);
4001         int i;
4002
4003         /* Reset when initting first time or we have a link. */
4004         if (tg3_flag(tp, INIT_COMPLETE) &&
4005             !(mac_status & MAC_STATUS_PCS_SYNCED))
4006                 return;
4007
4008         /* Set PLL lock range. */
4009         tg3_writephy(tp, 0x16, 0x8007);
4010
4011         /* SW reset */
4012         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4013
4014         /* Wait for reset to complete. */
4015         /* XXX schedule_timeout() ... */
4016         for (i = 0; i < 500; i++)
4017                 udelay(10);
4018
4019         /* Config mode; select PMA/Ch 1 regs. */
4020         tg3_writephy(tp, 0x10, 0x8411);
4021
4022         /* Enable auto-lock and comdet, select txclk for tx. */
4023         tg3_writephy(tp, 0x11, 0x0a10);
4024
4025         tg3_writephy(tp, 0x18, 0x00a0);
4026         tg3_writephy(tp, 0x16, 0x41ff);
4027
4028         /* Assert and deassert POR. */
4029         tg3_writephy(tp, 0x13, 0x0400);
4030         udelay(40);
4031         tg3_writephy(tp, 0x13, 0x0000);
4032
4033         tg3_writephy(tp, 0x11, 0x0a50);
4034         udelay(40);
4035         tg3_writephy(tp, 0x11, 0x0a10);
4036
4037         /* Wait for signal to stabilize */
4038         /* XXX schedule_timeout() ... */
4039         for (i = 0; i < 15000; i++)
4040                 udelay(10);
4041
4042         /* Deselect the channel register so we can read the PHYID
4043          * later.
4044          */
4045         tg3_writephy(tp, 0x10, 0x8011);
4046 }
4047
4048 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4049 {
4050         u16 flowctrl;
4051         u32 sg_dig_ctrl, sg_dig_status;
4052         u32 serdes_cfg, expected_sg_dig_ctrl;
4053         int workaround, port_a;
4054         int current_link_up;
4055
4056         serdes_cfg = 0;
4057         expected_sg_dig_ctrl = 0;
4058         workaround = 0;
4059         port_a = 1;
4060         current_link_up = 0;
4061
4062         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4063             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4064                 workaround = 1;
4065                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4066                         port_a = 0;
4067
4068                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4069                 /* preserve bits 20-23 for voltage regulator */
4070                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4071         }
4072
4073         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4074
4075         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4076                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4077                         if (workaround) {
4078                                 u32 val = serdes_cfg;
4079
4080                                 if (port_a)
4081                                         val |= 0xc010000;
4082                                 else
4083                                         val |= 0x4010000;
4084                                 tw32_f(MAC_SERDES_CFG, val);
4085                         }
4086
4087                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4088                 }
4089                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4090                         tg3_setup_flow_control(tp, 0, 0);
4091                         current_link_up = 1;
4092                 }
4093                 goto out;
4094         }
4095
4096         /* Want auto-negotiation.  */
4097         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4098
4099         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4100         if (flowctrl & ADVERTISE_1000XPAUSE)
4101                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4102         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4103                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4104
4105         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4106                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4107                     tp->serdes_counter &&
4108                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4109                                     MAC_STATUS_RCVD_CFG)) ==
4110                      MAC_STATUS_PCS_SYNCED)) {
4111                         tp->serdes_counter--;
4112                         current_link_up = 1;
4113                         goto out;
4114                 }
4115 restart_autoneg:
4116                 if (workaround)
4117                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4118                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4119                 udelay(5);
4120                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4121
4122                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4123                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4124         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4125                                  MAC_STATUS_SIGNAL_DET)) {
4126                 sg_dig_status = tr32(SG_DIG_STATUS);
4127                 mac_status = tr32(MAC_STATUS);
4128
4129                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4130                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4131                         u32 local_adv = 0, remote_adv = 0;
4132
4133                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4134                                 local_adv |= ADVERTISE_1000XPAUSE;
4135                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4136                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4137
4138                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4139                                 remote_adv |= LPA_1000XPAUSE;
4140                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4141                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4142
4143                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4144                         current_link_up = 1;
4145                         tp->serdes_counter = 0;
4146                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4147                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4148                         if (tp->serdes_counter)
4149                                 tp->serdes_counter--;
4150                         else {
4151                                 if (workaround) {
4152                                         u32 val = serdes_cfg;
4153
4154                                         if (port_a)
4155                                                 val |= 0xc010000;
4156                                         else
4157                                                 val |= 0x4010000;
4158
4159                                         tw32_f(MAC_SERDES_CFG, val);
4160                                 }
4161
4162                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4163                                 udelay(40);
4164
4165                                 /* Link parallel detection - link is up */
4166                                 /* only if we have PCS_SYNC and not */
4167                                 /* receiving config code words */
4168                                 mac_status = tr32(MAC_STATUS);
4169                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4170                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4171                                         tg3_setup_flow_control(tp, 0, 0);
4172                                         current_link_up = 1;
4173                                         tp->phy_flags |=
4174                                                 TG3_PHYFLG_PARALLEL_DETECT;
4175                                         tp->serdes_counter =
4176                                                 SERDES_PARALLEL_DET_TIMEOUT;
4177                                 } else
4178                                         goto restart_autoneg;
4179                         }
4180                 }
4181         } else {
4182                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4183                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4184         }
4185
4186 out:
4187         return current_link_up;
4188 }
4189
4190 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4191 {
4192         int current_link_up = 0;
4193
4194         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4195                 goto out;
4196
4197         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4198                 u32 txflags, rxflags;
4199                 int i;
4200
4201                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4202                         u32 local_adv = 0, remote_adv = 0;
4203
4204                         if (txflags & ANEG_CFG_PS1)
4205                                 local_adv |= ADVERTISE_1000XPAUSE;
4206                         if (txflags & ANEG_CFG_PS2)
4207                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4208
4209                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4210                                 remote_adv |= LPA_1000XPAUSE;
4211                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4212                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4213
4214                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4215
4216                         current_link_up = 1;
4217                 }
4218                 for (i = 0; i < 30; i++) {
4219                         udelay(20);
4220                         tw32_f(MAC_STATUS,
4221                                (MAC_STATUS_SYNC_CHANGED |
4222                                 MAC_STATUS_CFG_CHANGED));
4223                         udelay(40);
4224                         if ((tr32(MAC_STATUS) &
4225                              (MAC_STATUS_SYNC_CHANGED |
4226                               MAC_STATUS_CFG_CHANGED)) == 0)
4227                                 break;
4228                 }
4229
4230                 mac_status = tr32(MAC_STATUS);
4231                 if (current_link_up == 0 &&
4232                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4233                     !(mac_status & MAC_STATUS_RCVD_CFG))
4234                         current_link_up = 1;
4235         } else {
4236                 tg3_setup_flow_control(tp, 0, 0);
4237
4238                 /* Forcing 1000FD link up. */
4239                 current_link_up = 1;
4240
4241                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4242                 udelay(40);
4243
4244                 tw32_f(MAC_MODE, tp->mac_mode);
4245                 udelay(40);
4246         }
4247
4248 out:
4249         return current_link_up;
4250 }
4251
4252 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4253 {
4254         u32 orig_pause_cfg;
4255         u16 orig_active_speed;
4256         u8 orig_active_duplex;
4257         u32 mac_status;
4258         int current_link_up;
4259         int i;
4260
4261         orig_pause_cfg = tp->link_config.active_flowctrl;
4262         orig_active_speed = tp->link_config.active_speed;
4263         orig_active_duplex = tp->link_config.active_duplex;
4264
4265         if (!tg3_flag(tp, HW_AUTONEG) &&
4266             netif_carrier_ok(tp->dev) &&
4267             tg3_flag(tp, INIT_COMPLETE)) {
4268                 mac_status = tr32(MAC_STATUS);
4269                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4270                                MAC_STATUS_SIGNAL_DET |
4271                                MAC_STATUS_CFG_CHANGED |
4272                                MAC_STATUS_RCVD_CFG);
4273                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4274                                    MAC_STATUS_SIGNAL_DET)) {
4275                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4276                                             MAC_STATUS_CFG_CHANGED));
4277                         return 0;
4278                 }
4279         }
4280
4281         tw32_f(MAC_TX_AUTO_NEG, 0);
4282
4283         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4284         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4285         tw32_f(MAC_MODE, tp->mac_mode);
4286         udelay(40);
4287
4288         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4289                 tg3_init_bcm8002(tp);
4290
4291         /* Enable link change event even when serdes polling.  */
4292         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4293         udelay(40);
4294
4295         current_link_up = 0;
4296         mac_status = tr32(MAC_STATUS);
4297
4298         if (tg3_flag(tp, HW_AUTONEG))
4299                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4300         else
4301                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4302
4303         tp->napi[0].hw_status->status =
4304                 (SD_STATUS_UPDATED |
4305                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4306
4307         for (i = 0; i < 100; i++) {
4308                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4309                                     MAC_STATUS_CFG_CHANGED));
4310                 udelay(5);
4311                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4312                                          MAC_STATUS_CFG_CHANGED |
4313                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4314                         break;
4315         }
4316
4317         mac_status = tr32(MAC_STATUS);
4318         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4319                 current_link_up = 0;
4320                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4321                     tp->serdes_counter == 0) {
4322                         tw32_f(MAC_MODE, (tp->mac_mode |
4323                                           MAC_MODE_SEND_CONFIGS));
4324                         udelay(1);
4325                         tw32_f(MAC_MODE, tp->mac_mode);
4326                 }
4327         }
4328
4329         if (current_link_up == 1) {
4330                 tp->link_config.active_speed = SPEED_1000;
4331                 tp->link_config.active_duplex = DUPLEX_FULL;
4332                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4333                                     LED_CTRL_LNKLED_OVERRIDE |
4334                                     LED_CTRL_1000MBPS_ON));
4335         } else {
4336                 tp->link_config.active_speed = SPEED_INVALID;
4337                 tp->link_config.active_duplex = DUPLEX_INVALID;
4338                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4339                                     LED_CTRL_LNKLED_OVERRIDE |
4340                                     LED_CTRL_TRAFFIC_OVERRIDE));
4341         }
4342
4343         if (current_link_up != netif_carrier_ok(tp->dev)) {
4344                 if (current_link_up)
4345                         netif_carrier_on(tp->dev);
4346                 else
4347                         netif_carrier_off(tp->dev);
4348                 tg3_link_report(tp);
4349         } else {
4350                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4351                 if (orig_pause_cfg != now_pause_cfg ||
4352                     orig_active_speed != tp->link_config.active_speed ||
4353                     orig_active_duplex != tp->link_config.active_duplex)
4354                         tg3_link_report(tp);
4355         }
4356
4357         return 0;
4358 }
4359
4360 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4361 {
4362         int current_link_up, err = 0;
4363         u32 bmsr, bmcr;
4364         u16 current_speed;
4365         u8 current_duplex;
4366         u32 local_adv, remote_adv;
4367
4368         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4369         tw32_f(MAC_MODE, tp->mac_mode);
4370         udelay(40);
4371
4372         tw32(MAC_EVENT, 0);
4373
4374         tw32_f(MAC_STATUS,
4375              (MAC_STATUS_SYNC_CHANGED |
4376               MAC_STATUS_CFG_CHANGED |
4377               MAC_STATUS_MI_COMPLETION |
4378               MAC_STATUS_LNKSTATE_CHANGED));
4379         udelay(40);
4380
4381         if (force_reset)
4382                 tg3_phy_reset(tp);
4383
4384         current_link_up = 0;
4385         current_speed = SPEED_INVALID;
4386         current_duplex = DUPLEX_INVALID;
4387
4388         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4389         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4390         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4391                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4392                         bmsr |= BMSR_LSTATUS;
4393                 else
4394                         bmsr &= ~BMSR_LSTATUS;
4395         }
4396
4397         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4398
4399         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4400             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4401                 /* do nothing, just check for link up at the end */
4402         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4403                 u32 adv, new_adv;
4404
4405                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4406                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4407                                   ADVERTISE_1000XPAUSE |
4408                                   ADVERTISE_1000XPSE_ASYM |
4409                                   ADVERTISE_SLCT);
4410
4411                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4412
4413                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4414                         new_adv |= ADVERTISE_1000XHALF;
4415                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4416                         new_adv |= ADVERTISE_1000XFULL;
4417
4418                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4419                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4420                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4421                         tg3_writephy(tp, MII_BMCR, bmcr);
4422
4423                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4424                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4425                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4426
4427                         return err;
4428                 }
4429         } else {
4430                 u32 new_bmcr;
4431
4432                 bmcr &= ~BMCR_SPEED1000;
4433                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4434
4435                 if (tp->link_config.duplex == DUPLEX_FULL)
4436                         new_bmcr |= BMCR_FULLDPLX;
4437
4438                 if (new_bmcr != bmcr) {
4439                         /* BMCR_SPEED1000 is a reserved bit that needs
4440                          * to be set on write.
4441                          */
4442                         new_bmcr |= BMCR_SPEED1000;
4443
4444                         /* Force a linkdown */
4445                         if (netif_carrier_ok(tp->dev)) {
4446                                 u32 adv;
4447
4448                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4449                                 adv &= ~(ADVERTISE_1000XFULL |
4450                                          ADVERTISE_1000XHALF |
4451                                          ADVERTISE_SLCT);
4452                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4453                                 tg3_writephy(tp, MII_BMCR, bmcr |
4454                                                            BMCR_ANRESTART |
4455                                                            BMCR_ANENABLE);
4456                                 udelay(10);
4457                                 netif_carrier_off(tp->dev);
4458                         }
4459                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4460                         bmcr = new_bmcr;
4461                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4462                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4463                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4464                             ASIC_REV_5714) {
4465                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4466                                         bmsr |= BMSR_LSTATUS;
4467                                 else
4468                                         bmsr &= ~BMSR_LSTATUS;
4469                         }
4470                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4471                 }
4472         }
4473
4474         if (bmsr & BMSR_LSTATUS) {
4475                 current_speed = SPEED_1000;
4476                 current_link_up = 1;
4477                 if (bmcr & BMCR_FULLDPLX)
4478                         current_duplex = DUPLEX_FULL;
4479                 else
4480                         current_duplex = DUPLEX_HALF;
4481
4482                 local_adv = 0;
4483                 remote_adv = 0;
4484
4485                 if (bmcr & BMCR_ANENABLE) {
4486                         u32 common;
4487
4488                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4489                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4490                         common = local_adv & remote_adv;
4491                         if (common & (ADVERTISE_1000XHALF |
4492                                       ADVERTISE_1000XFULL)) {
4493                                 if (common & ADVERTISE_1000XFULL)
4494                                         current_duplex = DUPLEX_FULL;
4495                                 else
4496                                         current_duplex = DUPLEX_HALF;
4497                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4498                                 /* Link is up via parallel detect */
4499                         } else {
4500                                 current_link_up = 0;
4501                         }
4502                 }
4503         }
4504
4505         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4506                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4507
4508         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4509         if (tp->link_config.active_duplex == DUPLEX_HALF)
4510                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4511
4512         tw32_f(MAC_MODE, tp->mac_mode);
4513         udelay(40);
4514
4515         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4516
4517         tp->link_config.active_speed = current_speed;
4518         tp->link_config.active_duplex = current_duplex;
4519
4520         if (current_link_up != netif_carrier_ok(tp->dev)) {
4521                 if (current_link_up)
4522                         netif_carrier_on(tp->dev);
4523                 else {
4524                         netif_carrier_off(tp->dev);
4525                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4526                 }
4527                 tg3_link_report(tp);
4528         }
4529         return err;
4530 }
4531
4532 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4533 {
4534         if (tp->serdes_counter) {
4535                 /* Give autoneg time to complete. */
4536                 tp->serdes_counter--;
4537                 return;
4538         }
4539
4540         if (!netif_carrier_ok(tp->dev) &&
4541             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4542                 u32 bmcr;
4543
4544                 tg3_readphy(tp, MII_BMCR, &bmcr);
4545                 if (bmcr & BMCR_ANENABLE) {
4546                         u32 phy1, phy2;
4547
4548                         /* Select shadow register 0x1f */
4549                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4550                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4551
4552                         /* Select expansion interrupt status register */
4553                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4554                                          MII_TG3_DSP_EXP1_INT_STAT);
4555                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4556                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4557
4558                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4559                                 /* We have signal detect and not receiving
4560                                  * config code words, link is up by parallel
4561                                  * detection.
4562                                  */
4563
4564                                 bmcr &= ~BMCR_ANENABLE;
4565                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4566                                 tg3_writephy(tp, MII_BMCR, bmcr);
4567                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4568                         }
4569                 }
4570         } else if (netif_carrier_ok(tp->dev) &&
4571                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4572                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4573                 u32 phy2;
4574
4575                 /* Select expansion interrupt status register */
4576                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4577                                  MII_TG3_DSP_EXP1_INT_STAT);
4578                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4579                 if (phy2 & 0x20) {
4580                         u32 bmcr;
4581
4582                         /* Config code words received, turn on autoneg. */
4583                         tg3_readphy(tp, MII_BMCR, &bmcr);
4584                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4585
4586                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4587
4588                 }
4589         }
4590 }
4591
4592 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4593 {
4594         u32 val;
4595         int err;
4596
4597         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4598                 err = tg3_setup_fiber_phy(tp, force_reset);
4599         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4600                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4601         else
4602                 err = tg3_setup_copper_phy(tp, force_reset);
4603
4604         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4605                 u32 scale;
4606
4607                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4608                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4609                         scale = 65;
4610                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4611                         scale = 6;
4612                 else
4613                         scale = 12;
4614
4615                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4616                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4617                 tw32(GRC_MISC_CFG, val);
4618         }
4619
4620         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4621               (6 << TX_LENGTHS_IPG_SHIFT);
4622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4623                 val |= tr32(MAC_TX_LENGTHS) &
4624                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4625                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4626
4627         if (tp->link_config.active_speed == SPEED_1000 &&
4628             tp->link_config.active_duplex == DUPLEX_HALF)
4629                 tw32(MAC_TX_LENGTHS, val |
4630                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4631         else
4632                 tw32(MAC_TX_LENGTHS, val |
4633                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4634
4635         if (!tg3_flag(tp, 5705_PLUS)) {
4636                 if (netif_carrier_ok(tp->dev)) {
4637                         tw32(HOSTCC_STAT_COAL_TICKS,
4638                              tp->coal.stats_block_coalesce_usecs);
4639                 } else {
4640                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4641                 }
4642         }
4643
4644         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4645                 val = tr32(PCIE_PWR_MGMT_THRESH);
4646                 if (!netif_carrier_ok(tp->dev))
4647                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4648                               tp->pwrmgmt_thresh;
4649                 else
4650                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4651                 tw32(PCIE_PWR_MGMT_THRESH, val);
4652         }
4653
4654         return err;
4655 }
4656
4657 static inline int tg3_irq_sync(struct tg3 *tp)
4658 {
4659         return tp->irq_sync;
4660 }
4661
4662 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4663 {
4664         int i;
4665
4666         dst = (u32 *)((u8 *)dst + off);
4667         for (i = 0; i < len; i += sizeof(u32))
4668                 *dst++ = tr32(off + i);
4669 }
4670
4671 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4672 {
4673         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4674         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4675         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4676         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4677         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4678         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4679         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4680         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4681         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4682         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4683         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4684         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4685         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4686         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4687         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4688         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4689         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4690         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4691         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4692
4693         if (tg3_flag(tp, SUPPORT_MSIX))
4694                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4695
4696         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4697         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4698         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4699         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4700         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4701         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4702         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4703         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4704
4705         if (!tg3_flag(tp, 5705_PLUS)) {
4706                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4707                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4708                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4709         }
4710
4711         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4712         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4713         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4714         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4715         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4716
4717         if (tg3_flag(tp, NVRAM))
4718                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4719 }
4720
4721 static void tg3_dump_state(struct tg3 *tp)
4722 {
4723         int i;
4724         u32 *regs;
4725
4726         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4727         if (!regs) {
4728                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4729                 return;
4730         }
4731
4732         if (tg3_flag(tp, PCI_EXPRESS)) {
4733                 /* Read up to but not including private PCI registers */
4734                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4735                         regs[i / sizeof(u32)] = tr32(i);
4736         } else
4737                 tg3_dump_legacy_regs(tp, regs);
4738
4739         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4740                 if (!regs[i + 0] && !regs[i + 1] &&
4741                     !regs[i + 2] && !regs[i + 3])
4742                         continue;
4743
4744                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4745                            i * 4,
4746                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4747         }
4748
4749         kfree(regs);
4750
4751         for (i = 0; i < tp->irq_cnt; i++) {
4752                 struct tg3_napi *tnapi = &tp->napi[i];
4753
4754                 /* SW status block */
4755                 netdev_err(tp->dev,
4756                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4757                            i,
4758                            tnapi->hw_status->status,
4759                            tnapi->hw_status->status_tag,
4760                            tnapi->hw_status->rx_jumbo_consumer,
4761                            tnapi->hw_status->rx_consumer,
4762                            tnapi->hw_status->rx_mini_consumer,
4763                            tnapi->hw_status->idx[0].rx_producer,
4764                            tnapi->hw_status->idx[0].tx_consumer);
4765
4766                 netdev_err(tp->dev,
4767                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4768                            i,
4769                            tnapi->last_tag, tnapi->last_irq_tag,
4770                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4771                            tnapi->rx_rcb_ptr,
4772                            tnapi->prodring.rx_std_prod_idx,
4773                            tnapi->prodring.rx_std_cons_idx,
4774                            tnapi->prodring.rx_jmb_prod_idx,
4775                            tnapi->prodring.rx_jmb_cons_idx);
4776         }
4777 }
4778
4779 /* This is called whenever we suspect that the system chipset is re-
4780  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4781  * is bogus tx completions. We try to recover by setting the
4782  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4783  * in the workqueue.
4784  */
4785 static void tg3_tx_recover(struct tg3 *tp)
4786 {
4787         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4788                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4789
4790         netdev_warn(tp->dev,
4791                     "The system may be re-ordering memory-mapped I/O "
4792                     "cycles to the network device, attempting to recover. "
4793                     "Please report the problem to the driver maintainer "
4794                     "and include system chipset information.\n");
4795
4796         spin_lock(&tp->lock);
4797         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4798         spin_unlock(&tp->lock);
4799 }
4800
4801 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4802 {
4803         /* Tell compiler to fetch tx indices from memory. */
4804         barrier();
4805         return tnapi->tx_pending -
4806                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4807 }
4808
4809 /* Tigon3 never reports partial packet sends.  So we do not
4810  * need special logic to handle SKBs that have not had all
4811  * of their frags sent yet, like SunGEM does.
4812  */
4813 static void tg3_tx(struct tg3_napi *tnapi)
4814 {
4815         struct tg3 *tp = tnapi->tp;
4816         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4817         u32 sw_idx = tnapi->tx_cons;
4818         struct netdev_queue *txq;
4819         int index = tnapi - tp->napi;
4820
4821         if (tg3_flag(tp, ENABLE_TSS))
4822                 index--;
4823
4824         txq = netdev_get_tx_queue(tp->dev, index);
4825
4826         while (sw_idx != hw_idx) {
4827                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4828                 struct sk_buff *skb = ri->skb;
4829                 int i, tx_bug = 0;
4830
4831                 if (unlikely(skb == NULL)) {
4832                         tg3_tx_recover(tp);
4833                         return;
4834                 }
4835
4836                 pci_unmap_single(tp->pdev,
4837                                  dma_unmap_addr(ri, mapping),
4838                                  skb_headlen(skb),
4839                                  PCI_DMA_TODEVICE);
4840
4841                 ri->skb = NULL;
4842
4843                 sw_idx = NEXT_TX(sw_idx);
4844
4845                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4846                         ri = &tnapi->tx_buffers[sw_idx];
4847                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4848                                 tx_bug = 1;
4849
4850                         pci_unmap_page(tp->pdev,
4851                                        dma_unmap_addr(ri, mapping),
4852                                        skb_shinfo(skb)->frags[i].size,
4853                                        PCI_DMA_TODEVICE);
4854                         sw_idx = NEXT_TX(sw_idx);
4855                 }
4856
4857                 dev_kfree_skb(skb);
4858
4859                 if (unlikely(tx_bug)) {
4860                         tg3_tx_recover(tp);
4861                         return;
4862                 }
4863         }
4864
4865         tnapi->tx_cons = sw_idx;
4866
4867         /* Need to make the tx_cons update visible to tg3_start_xmit()
4868          * before checking for netif_queue_stopped().  Without the
4869          * memory barrier, there is a small possibility that tg3_start_xmit()
4870          * will miss it and cause the queue to be stopped forever.
4871          */
4872         smp_mb();
4873
4874         if (unlikely(netif_tx_queue_stopped(txq) &&
4875                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4876                 __netif_tx_lock(txq, smp_processor_id());
4877                 if (netif_tx_queue_stopped(txq) &&
4878                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4879                         netif_tx_wake_queue(txq);
4880                 __netif_tx_unlock(txq);
4881         }
4882 }
4883
4884 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4885 {
4886         if (!ri->skb)
4887                 return;
4888
4889         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4890                          map_sz, PCI_DMA_FROMDEVICE);
4891         dev_kfree_skb_any(ri->skb);
4892         ri->skb = NULL;
4893 }
4894
4895 /* Returns size of skb allocated or < 0 on error.
4896  *
4897  * We only need to fill in the address because the other members
4898  * of the RX descriptor are invariant, see tg3_init_rings.
4899  *
4900  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4901  * posting buffers we only dirty the first cache line of the RX
4902  * descriptor (containing the address).  Whereas for the RX status
4903  * buffers the cpu only reads the last cacheline of the RX descriptor
4904  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4905  */
4906 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4907                             u32 opaque_key, u32 dest_idx_unmasked)
4908 {
4909         struct tg3_rx_buffer_desc *desc;
4910         struct ring_info *map;
4911         struct sk_buff *skb;
4912         dma_addr_t mapping;
4913         int skb_size, dest_idx;
4914
4915         switch (opaque_key) {
4916         case RXD_OPAQUE_RING_STD:
4917                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4918                 desc = &tpr->rx_std[dest_idx];
4919                 map = &tpr->rx_std_buffers[dest_idx];
4920                 skb_size = tp->rx_pkt_map_sz;
4921                 break;
4922
4923         case RXD_OPAQUE_RING_JUMBO:
4924                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4925                 desc = &tpr->rx_jmb[dest_idx].std;
4926                 map = &tpr->rx_jmb_buffers[dest_idx];
4927                 skb_size = TG3_RX_JMB_MAP_SZ;
4928                 break;
4929
4930         default:
4931                 return -EINVAL;
4932         }
4933
4934         /* Do not overwrite any of the map or rp information
4935          * until we are sure we can commit to a new buffer.
4936          *
4937          * Callers depend upon this behavior and assume that
4938          * we leave everything unchanged if we fail.
4939          */
4940         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4941         if (skb == NULL)
4942                 return -ENOMEM;
4943
4944         skb_reserve(skb, tp->rx_offset);
4945
4946         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4947                                  PCI_DMA_FROMDEVICE);
4948         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4949                 dev_kfree_skb(skb);
4950                 return -EIO;
4951         }
4952
4953         map->skb = skb;
4954         dma_unmap_addr_set(map, mapping, mapping);
4955
4956         desc->addr_hi = ((u64)mapping >> 32);
4957         desc->addr_lo = ((u64)mapping & 0xffffffff);
4958
4959         return skb_size;
4960 }
4961
4962 /* We only need to move over in the address because the other
4963  * members of the RX descriptor are invariant.  See notes above
4964  * tg3_alloc_rx_skb for full details.
4965  */
4966 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4967                            struct tg3_rx_prodring_set *dpr,
4968                            u32 opaque_key, int src_idx,
4969                            u32 dest_idx_unmasked)
4970 {
4971         struct tg3 *tp = tnapi->tp;
4972         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4973         struct ring_info *src_map, *dest_map;
4974         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4975         int dest_idx;
4976
4977         switch (opaque_key) {
4978         case RXD_OPAQUE_RING_STD:
4979                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4980                 dest_desc = &dpr->rx_std[dest_idx];
4981                 dest_map = &dpr->rx_std_buffers[dest_idx];
4982                 src_desc = &spr->rx_std[src_idx];
4983                 src_map = &spr->rx_std_buffers[src_idx];
4984                 break;
4985
4986         case RXD_OPAQUE_RING_JUMBO:
4987                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4988                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4989                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4990                 src_desc = &spr->rx_jmb[src_idx].std;
4991                 src_map = &spr->rx_jmb_buffers[src_idx];
4992                 break;
4993
4994         default:
4995                 return;
4996         }
4997
4998         dest_map->skb = src_map->skb;
4999         dma_unmap_addr_set(dest_map, mapping,
5000                            dma_unmap_addr(src_map, mapping));
5001         dest_desc->addr_hi = src_desc->addr_hi;
5002         dest_desc->addr_lo = src_desc->addr_lo;
5003
5004         /* Ensure that the update to the skb happens after the physical
5005          * addresses have been transferred to the new BD location.
5006          */
5007         smp_wmb();
5008
5009         src_map->skb = NULL;
5010 }
5011
5012 /* The RX ring scheme is composed of multiple rings which post fresh
5013  * buffers to the chip, and one special ring the chip uses to report
5014  * status back to the host.
5015  *
5016  * The special ring reports the status of received packets to the
5017  * host.  The chip does not write into the original descriptor the
5018  * RX buffer was obtained from.  The chip simply takes the original
5019  * descriptor as provided by the host, updates the status and length
5020  * field, then writes this into the next status ring entry.
5021  *
5022  * Each ring the host uses to post buffers to the chip is described
5023  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5024  * it is first placed into the on-chip ram.  When the packet's length
5025  * is known, it walks down the TG3_BDINFO entries to select the ring.
5026  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5027  * which is within the range of the new packet's length is chosen.
5028  *
5029  * The "separate ring for rx status" scheme may sound queer, but it makes
5030  * sense from a cache coherency perspective.  If only the host writes
5031  * to the buffer post rings, and only the chip writes to the rx status
5032  * rings, then cache lines never move beyond shared-modified state.
5033  * If both the host and chip were to write into the same ring, cache line
5034  * eviction could occur since both entities want it in an exclusive state.
5035  */
5036 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5037 {
5038         struct tg3 *tp = tnapi->tp;
5039         u32 work_mask, rx_std_posted = 0;
5040         u32 std_prod_idx, jmb_prod_idx;
5041         u32 sw_idx = tnapi->rx_rcb_ptr;
5042         u16 hw_idx;
5043         int received;
5044         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5045
5046         hw_idx = *(tnapi->rx_rcb_prod_idx);
5047         /*
5048          * We need to order the read of hw_idx and the read of
5049          * the opaque cookie.
5050          */
5051         rmb();
5052         work_mask = 0;
5053         received = 0;
5054         std_prod_idx = tpr->rx_std_prod_idx;
5055         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5056         while (sw_idx != hw_idx && budget > 0) {
5057                 struct ring_info *ri;
5058                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5059                 unsigned int len;
5060                 struct sk_buff *skb;
5061                 dma_addr_t dma_addr;
5062                 u32 opaque_key, desc_idx, *post_ptr;
5063
5064                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5065                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5066                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5067                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5068                         dma_addr = dma_unmap_addr(ri, mapping);
5069                         skb = ri->skb;
5070                         post_ptr = &std_prod_idx;
5071                         rx_std_posted++;
5072                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5073                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5074                         dma_addr = dma_unmap_addr(ri, mapping);
5075                         skb = ri->skb;
5076                         post_ptr = &jmb_prod_idx;
5077                 } else
5078                         goto next_pkt_nopost;
5079
5080                 work_mask |= opaque_key;
5081
5082                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5083                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5084                 drop_it:
5085                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5086                                        desc_idx, *post_ptr);
5087                 drop_it_no_recycle:
5088                         /* Other statistics kept track of by card. */
5089                         tp->rx_dropped++;
5090                         goto next_pkt;
5091                 }
5092
5093                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5094                       ETH_FCS_LEN;
5095
5096                 if (len > TG3_RX_COPY_THRESH(tp)) {
5097                         int skb_size;
5098
5099                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5100                                                     *post_ptr);
5101                         if (skb_size < 0)
5102                                 goto drop_it;
5103
5104                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5105                                          PCI_DMA_FROMDEVICE);
5106
5107                         /* Ensure that the update to the skb happens
5108                          * after the usage of the old DMA mapping.
5109                          */
5110                         smp_wmb();
5111
5112                         ri->skb = NULL;
5113
5114                         skb_put(skb, len);
5115                 } else {
5116                         struct sk_buff *copy_skb;
5117
5118                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5119                                        desc_idx, *post_ptr);
5120
5121                         copy_skb = netdev_alloc_skb(tp->dev, len +
5122                                                     TG3_RAW_IP_ALIGN);
5123                         if (copy_skb == NULL)
5124                                 goto drop_it_no_recycle;
5125
5126                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5127                         skb_put(copy_skb, len);
5128                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5129                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5130                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5131
5132                         /* We'll reuse the original ring buffer. */
5133                         skb = copy_skb;
5134                 }
5135
5136                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5137                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5138                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5139                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5140                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5141                 else
5142                         skb_checksum_none_assert(skb);
5143
5144                 skb->protocol = eth_type_trans(skb, tp->dev);
5145
5146                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5147                     skb->protocol != htons(ETH_P_8021Q)) {
5148                         dev_kfree_skb(skb);
5149                         goto drop_it_no_recycle;
5150                 }
5151
5152                 if (desc->type_flags & RXD_FLAG_VLAN &&
5153                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5154                         __vlan_hwaccel_put_tag(skb,
5155                                                desc->err_vlan & RXD_VLAN_MASK);
5156
5157                 napi_gro_receive(&tnapi->napi, skb);
5158
5159                 received++;
5160                 budget--;
5161
5162 next_pkt:
5163                 (*post_ptr)++;
5164
5165                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5166                         tpr->rx_std_prod_idx = std_prod_idx &
5167                                                tp->rx_std_ring_mask;
5168                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5169                                      tpr->rx_std_prod_idx);
5170                         work_mask &= ~RXD_OPAQUE_RING_STD;
5171                         rx_std_posted = 0;
5172                 }
5173 next_pkt_nopost:
5174                 sw_idx++;
5175                 sw_idx &= tp->rx_ret_ring_mask;
5176
5177                 /* Refresh hw_idx to see if there is new work */
5178                 if (sw_idx == hw_idx) {
5179                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5180                         rmb();
5181                 }
5182         }
5183
5184         /* ACK the status ring. */
5185         tnapi->rx_rcb_ptr = sw_idx;
5186         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5187
5188         /* Refill RX ring(s). */
5189         if (!tg3_flag(tp, ENABLE_RSS)) {
5190                 if (work_mask & RXD_OPAQUE_RING_STD) {
5191                         tpr->rx_std_prod_idx = std_prod_idx &
5192                                                tp->rx_std_ring_mask;
5193                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5194                                      tpr->rx_std_prod_idx);
5195                 }
5196                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5197                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5198                                                tp->rx_jmb_ring_mask;
5199                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5200                                      tpr->rx_jmb_prod_idx);
5201                 }
5202                 mmiowb();
5203         } else if (work_mask) {
5204                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5205                  * updated before the producer indices can be updated.
5206                  */
5207                 smp_wmb();
5208
5209                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5210                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5211
5212                 if (tnapi != &tp->napi[1])
5213                         napi_schedule(&tp->napi[1].napi);
5214         }
5215
5216         return received;
5217 }
5218
5219 static void tg3_poll_link(struct tg3 *tp)
5220 {
5221         /* handle link change and other phy events */
5222         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5223                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5224
5225                 if (sblk->status & SD_STATUS_LINK_CHG) {
5226                         sblk->status = SD_STATUS_UPDATED |
5227                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5228                         spin_lock(&tp->lock);
5229                         if (tg3_flag(tp, USE_PHYLIB)) {
5230                                 tw32_f(MAC_STATUS,
5231                                      (MAC_STATUS_SYNC_CHANGED |
5232                                       MAC_STATUS_CFG_CHANGED |
5233                                       MAC_STATUS_MI_COMPLETION |
5234                                       MAC_STATUS_LNKSTATE_CHANGED));
5235                                 udelay(40);
5236                         } else
5237                                 tg3_setup_phy(tp, 0);
5238                         spin_unlock(&tp->lock);
5239                 }
5240         }
5241 }
5242
5243 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5244                                 struct tg3_rx_prodring_set *dpr,
5245                                 struct tg3_rx_prodring_set *spr)
5246 {
5247         u32 si, di, cpycnt, src_prod_idx;
5248         int i, err = 0;
5249
5250         while (1) {
5251                 src_prod_idx = spr->rx_std_prod_idx;
5252
5253                 /* Make sure updates to the rx_std_buffers[] entries and the
5254                  * standard producer index are seen in the correct order.
5255                  */
5256                 smp_rmb();
5257
5258                 if (spr->rx_std_cons_idx == src_prod_idx)
5259                         break;
5260
5261                 if (spr->rx_std_cons_idx < src_prod_idx)
5262                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5263                 else
5264                         cpycnt = tp->rx_std_ring_mask + 1 -
5265                                  spr->rx_std_cons_idx;
5266
5267                 cpycnt = min(cpycnt,
5268                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5269
5270                 si = spr->rx_std_cons_idx;
5271                 di = dpr->rx_std_prod_idx;
5272
5273                 for (i = di; i < di + cpycnt; i++) {
5274                         if (dpr->rx_std_buffers[i].skb) {
5275                                 cpycnt = i - di;
5276                                 err = -ENOSPC;
5277                                 break;
5278                         }
5279                 }
5280
5281                 if (!cpycnt)
5282                         break;
5283
5284                 /* Ensure that updates to the rx_std_buffers ring and the
5285                  * shadowed hardware producer ring from tg3_recycle_skb() are
5286                  * ordered correctly WRT the skb check above.
5287                  */
5288                 smp_rmb();
5289
5290                 memcpy(&dpr->rx_std_buffers[di],
5291                        &spr->rx_std_buffers[si],
5292                        cpycnt * sizeof(struct ring_info));
5293
5294                 for (i = 0; i < cpycnt; i++, di++, si++) {
5295                         struct tg3_rx_buffer_desc *sbd, *dbd;
5296                         sbd = &spr->rx_std[si];
5297                         dbd = &dpr->rx_std[di];
5298                         dbd->addr_hi = sbd->addr_hi;
5299                         dbd->addr_lo = sbd->addr_lo;
5300                 }
5301
5302                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5303                                        tp->rx_std_ring_mask;
5304                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5305                                        tp->rx_std_ring_mask;
5306         }
5307
5308         while (1) {
5309                 src_prod_idx = spr->rx_jmb_prod_idx;
5310
5311                 /* Make sure updates to the rx_jmb_buffers[] entries and
5312                  * the jumbo producer index are seen in the correct order.
5313                  */
5314                 smp_rmb();
5315
5316                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5317                         break;
5318
5319                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5320                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5321                 else
5322                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5323                                  spr->rx_jmb_cons_idx;
5324
5325                 cpycnt = min(cpycnt,
5326                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5327
5328                 si = spr->rx_jmb_cons_idx;
5329                 di = dpr->rx_jmb_prod_idx;
5330
5331                 for (i = di; i < di + cpycnt; i++) {
5332                         if (dpr->rx_jmb_buffers[i].skb) {
5333                                 cpycnt = i - di;
5334                                 err = -ENOSPC;
5335                                 break;
5336                         }
5337                 }
5338
5339                 if (!cpycnt)
5340                         break;
5341
5342                 /* Ensure that updates to the rx_jmb_buffers ring and the
5343                  * shadowed hardware producer ring from tg3_recycle_skb() are
5344                  * ordered correctly WRT the skb check above.
5345                  */
5346                 smp_rmb();
5347
5348                 memcpy(&dpr->rx_jmb_buffers[di],
5349                        &spr->rx_jmb_buffers[si],
5350                        cpycnt * sizeof(struct ring_info));
5351
5352                 for (i = 0; i < cpycnt; i++, di++, si++) {
5353                         struct tg3_rx_buffer_desc *sbd, *dbd;
5354                         sbd = &spr->rx_jmb[si].std;
5355                         dbd = &dpr->rx_jmb[di].std;
5356                         dbd->addr_hi = sbd->addr_hi;
5357                         dbd->addr_lo = sbd->addr_lo;
5358                 }
5359
5360                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5361                                        tp->rx_jmb_ring_mask;
5362                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5363                                        tp->rx_jmb_ring_mask;
5364         }
5365
5366         return err;
5367 }
5368
5369 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5370 {
5371         struct tg3 *tp = tnapi->tp;
5372
5373         /* run TX completion thread */
5374         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5375                 tg3_tx(tnapi);
5376                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5377                         return work_done;
5378         }
5379
5380         /* run RX thread, within the bounds set by NAPI.
5381          * All RX "locking" is done by ensuring outside
5382          * code synchronizes with tg3->napi.poll()
5383          */
5384         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5385                 work_done += tg3_rx(tnapi, budget - work_done);
5386
5387         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5388                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5389                 int i, err = 0;
5390                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5391                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5392
5393                 for (i = 1; i < tp->irq_cnt; i++)
5394                         err |= tg3_rx_prodring_xfer(tp, dpr,
5395                                                     &tp->napi[i].prodring);
5396
5397                 wmb();
5398
5399                 if (std_prod_idx != dpr->rx_std_prod_idx)
5400                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5401                                      dpr->rx_std_prod_idx);
5402
5403                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5404                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5405                                      dpr->rx_jmb_prod_idx);
5406
5407                 mmiowb();
5408
5409                 if (err)
5410                         tw32_f(HOSTCC_MODE, tp->coal_now);
5411         }
5412
5413         return work_done;
5414 }
5415
5416 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5417 {
5418         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5419         struct tg3 *tp = tnapi->tp;
5420         int work_done = 0;
5421         struct tg3_hw_status *sblk = tnapi->hw_status;
5422
5423         while (1) {
5424                 work_done = tg3_poll_work(tnapi, work_done, budget);
5425
5426                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5427                         goto tx_recovery;
5428
5429                 if (unlikely(work_done >= budget))
5430                         break;
5431
5432                 /* tp->last_tag is used in tg3_int_reenable() below
5433                  * to tell the hw how much work has been processed,
5434                  * so we must read it before checking for more work.
5435                  */
5436                 tnapi->last_tag = sblk->status_tag;
5437                 tnapi->last_irq_tag = tnapi->last_tag;
5438                 rmb();
5439
5440                 /* check for RX/TX work to do */
5441                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5442                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5443                         napi_complete(napi);
5444                         /* Reenable interrupts. */
5445                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5446                         mmiowb();
5447                         break;
5448                 }
5449         }
5450
5451         return work_done;
5452
5453 tx_recovery:
5454         /* work_done is guaranteed to be less than budget. */
5455         napi_complete(napi);
5456         schedule_work(&tp->reset_task);
5457         return work_done;
5458 }
5459
5460 static void tg3_process_error(struct tg3 *tp)
5461 {
5462         u32 val;
5463         bool real_error = false;
5464
5465         if (tg3_flag(tp, ERROR_PROCESSED))
5466                 return;
5467
5468         /* Check Flow Attention register */
5469         val = tr32(HOSTCC_FLOW_ATTN);
5470         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5471                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5472                 real_error = true;
5473         }
5474
5475         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5476                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5477                 real_error = true;
5478         }
5479
5480         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5481                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5482                 real_error = true;
5483         }
5484
5485         if (!real_error)
5486                 return;
5487
5488         tg3_dump_state(tp);
5489
5490         tg3_flag_set(tp, ERROR_PROCESSED);
5491         schedule_work(&tp->reset_task);
5492 }
5493
5494 static int tg3_poll(struct napi_struct *napi, int budget)
5495 {
5496         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5497         struct tg3 *tp = tnapi->tp;
5498         int work_done = 0;
5499         struct tg3_hw_status *sblk = tnapi->hw_status;
5500
5501         while (1) {
5502                 if (sblk->status & SD_STATUS_ERROR)
5503                         tg3_process_error(tp);
5504
5505                 tg3_poll_link(tp);
5506
5507                 work_done = tg3_poll_work(tnapi, work_done, budget);
5508
5509                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5510                         goto tx_recovery;
5511
5512                 if (unlikely(work_done >= budget))
5513                         break;
5514
5515                 if (tg3_flag(tp, TAGGED_STATUS)) {
5516                         /* tp->last_tag is used in tg3_int_reenable() below
5517                          * to tell the hw how much work has been processed,
5518                          * so we must read it before checking for more work.
5519                          */
5520                         tnapi->last_tag = sblk->status_tag;
5521                         tnapi->last_irq_tag = tnapi->last_tag;
5522                         rmb();
5523                 } else
5524                         sblk->status &= ~SD_STATUS_UPDATED;
5525
5526                 if (likely(!tg3_has_work(tnapi))) {
5527                         napi_complete(napi);
5528                         tg3_int_reenable(tnapi);
5529                         break;
5530                 }
5531         }
5532
5533         return work_done;
5534
5535 tx_recovery:
5536         /* work_done is guaranteed to be less than budget. */
5537         napi_complete(napi);
5538         schedule_work(&tp->reset_task);
5539         return work_done;
5540 }
5541
5542 static void tg3_napi_disable(struct tg3 *tp)
5543 {
5544         int i;
5545
5546         for (i = tp->irq_cnt - 1; i >= 0; i--)
5547                 napi_disable(&tp->napi[i].napi);
5548 }
5549
5550 static void tg3_napi_enable(struct tg3 *tp)
5551 {
5552         int i;
5553
5554         for (i = 0; i < tp->irq_cnt; i++)
5555                 napi_enable(&tp->napi[i].napi);
5556 }
5557
5558 static void tg3_napi_init(struct tg3 *tp)
5559 {
5560         int i;
5561
5562         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5563         for (i = 1; i < tp->irq_cnt; i++)
5564                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5565 }
5566
5567 static void tg3_napi_fini(struct tg3 *tp)
5568 {
5569         int i;
5570
5571         for (i = 0; i < tp->irq_cnt; i++)
5572                 netif_napi_del(&tp->napi[i].napi);
5573 }
5574
5575 static inline void tg3_netif_stop(struct tg3 *tp)
5576 {
5577         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5578         tg3_napi_disable(tp);
5579         netif_tx_disable(tp->dev);
5580 }
5581
5582 static inline void tg3_netif_start(struct tg3 *tp)
5583 {
5584         /* NOTE: unconditional netif_tx_wake_all_queues is only
5585          * appropriate so long as all callers are assured to
5586          * have free tx slots (such as after tg3_init_hw)
5587          */
5588         netif_tx_wake_all_queues(tp->dev);
5589
5590         tg3_napi_enable(tp);
5591         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5592         tg3_enable_ints(tp);
5593 }
5594
5595 static void tg3_irq_quiesce(struct tg3 *tp)
5596 {
5597         int i;
5598
5599         BUG_ON(tp->irq_sync);
5600
5601         tp->irq_sync = 1;
5602         smp_mb();
5603
5604         for (i = 0; i < tp->irq_cnt; i++)
5605                 synchronize_irq(tp->napi[i].irq_vec);
5606 }
5607
5608 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5609  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5610  * with as well.  Most of the time, this is not necessary except when
5611  * shutting down the device.
5612  */
5613 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5614 {
5615         spin_lock_bh(&tp->lock);
5616         if (irq_sync)
5617                 tg3_irq_quiesce(tp);
5618 }
5619
5620 static inline void tg3_full_unlock(struct tg3 *tp)
5621 {
5622         spin_unlock_bh(&tp->lock);
5623 }
5624
5625 /* One-shot MSI handler - Chip automatically disables interrupt
5626  * after sending MSI so driver doesn't have to do it.
5627  */
5628 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5629 {
5630         struct tg3_napi *tnapi = dev_id;
5631         struct tg3 *tp = tnapi->tp;
5632
5633         prefetch(tnapi->hw_status);
5634         if (tnapi->rx_rcb)
5635                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5636
5637         if (likely(!tg3_irq_sync(tp)))
5638                 napi_schedule(&tnapi->napi);
5639
5640         return IRQ_HANDLED;
5641 }
5642
5643 /* MSI ISR - No need to check for interrupt sharing and no need to
5644  * flush status block and interrupt mailbox. PCI ordering rules
5645  * guarantee that MSI will arrive after the status block.
5646  */
5647 static irqreturn_t tg3_msi(int irq, void *dev_id)
5648 {
5649         struct tg3_napi *tnapi = dev_id;
5650         struct tg3 *tp = tnapi->tp;
5651
5652         prefetch(tnapi->hw_status);
5653         if (tnapi->rx_rcb)
5654                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5655         /*
5656          * Writing any value to intr-mbox-0 clears PCI INTA# and
5657          * chip-internal interrupt pending events.
5658          * Writing non-zero to intr-mbox-0 additional tells the
5659          * NIC to stop sending us irqs, engaging "in-intr-handler"
5660          * event coalescing.
5661          */
5662         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5663         if (likely(!tg3_irq_sync(tp)))
5664                 napi_schedule(&tnapi->napi);
5665
5666         return IRQ_RETVAL(1);
5667 }
5668
5669 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5670 {
5671         struct tg3_napi *tnapi = dev_id;
5672         struct tg3 *tp = tnapi->tp;
5673         struct tg3_hw_status *sblk = tnapi->hw_status;
5674         unsigned int handled = 1;
5675
5676         /* In INTx mode, it is possible for the interrupt to arrive at
5677          * the CPU before the status block posted prior to the interrupt.
5678          * Reading the PCI State register will confirm whether the
5679          * interrupt is ours and will flush the status block.
5680          */
5681         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5682                 if (tg3_flag(tp, CHIP_RESETTING) ||
5683                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5684                         handled = 0;
5685                         goto out;
5686                 }
5687         }
5688
5689         /*
5690          * Writing any value to intr-mbox-0 clears PCI INTA# and
5691          * chip-internal interrupt pending events.
5692          * Writing non-zero to intr-mbox-0 additional tells the
5693          * NIC to stop sending us irqs, engaging "in-intr-handler"
5694          * event coalescing.
5695          *
5696          * Flush the mailbox to de-assert the IRQ immediately to prevent
5697          * spurious interrupts.  The flush impacts performance but
5698          * excessive spurious interrupts can be worse in some cases.
5699          */
5700         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5701         if (tg3_irq_sync(tp))
5702                 goto out;
5703         sblk->status &= ~SD_STATUS_UPDATED;
5704         if (likely(tg3_has_work(tnapi))) {
5705                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5706                 napi_schedule(&tnapi->napi);
5707         } else {
5708                 /* No work, shared interrupt perhaps?  re-enable
5709                  * interrupts, and flush that PCI write
5710                  */
5711                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5712                                0x00000000);
5713         }
5714 out:
5715         return IRQ_RETVAL(handled);
5716 }
5717
5718 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5719 {
5720         struct tg3_napi *tnapi = dev_id;
5721         struct tg3 *tp = tnapi->tp;
5722         struct tg3_hw_status *sblk = tnapi->hw_status;
5723         unsigned int handled = 1;
5724
5725         /* In INTx mode, it is possible for the interrupt to arrive at
5726          * the CPU before the status block posted prior to the interrupt.
5727          * Reading the PCI State register will confirm whether the
5728          * interrupt is ours and will flush the status block.
5729          */
5730         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5731                 if (tg3_flag(tp, CHIP_RESETTING) ||
5732                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5733                         handled = 0;
5734                         goto out;
5735                 }
5736         }
5737
5738         /*
5739          * writing any value to intr-mbox-0 clears PCI INTA# and
5740          * chip-internal interrupt pending events.
5741          * writing non-zero to intr-mbox-0 additional tells the
5742          * NIC to stop sending us irqs, engaging "in-intr-handler"
5743          * event coalescing.
5744          *
5745          * Flush the mailbox to de-assert the IRQ immediately to prevent
5746          * spurious interrupts.  The flush impacts performance but
5747          * excessive spurious interrupts can be worse in some cases.
5748          */
5749         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5750
5751         /*
5752          * In a shared interrupt configuration, sometimes other devices'
5753          * interrupts will scream.  We record the current status tag here
5754          * so that the above check can report that the screaming interrupts
5755          * are unhandled.  Eventually they will be silenced.
5756          */
5757         tnapi->last_irq_tag = sblk->status_tag;
5758
5759         if (tg3_irq_sync(tp))
5760                 goto out;
5761
5762         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5763
5764         napi_schedule(&tnapi->napi);
5765
5766 out:
5767         return IRQ_RETVAL(handled);
5768 }
5769
5770 /* ISR for interrupt test */
5771 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5772 {
5773         struct tg3_napi *tnapi = dev_id;
5774         struct tg3 *tp = tnapi->tp;
5775         struct tg3_hw_status *sblk = tnapi->hw_status;
5776
5777         if ((sblk->status & SD_STATUS_UPDATED) ||
5778             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5779                 tg3_disable_ints(tp);
5780                 return IRQ_RETVAL(1);
5781         }
5782         return IRQ_RETVAL(0);
5783 }
5784
5785 static int tg3_init_hw(struct tg3 *, int);
5786 static int tg3_halt(struct tg3 *, int, int);
5787
5788 /* Restart hardware after configuration changes, self-test, etc.
5789  * Invoked with tp->lock held.
5790  */
5791 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5792         __releases(tp->lock)
5793         __acquires(tp->lock)
5794 {
5795         int err;
5796
5797         err = tg3_init_hw(tp, reset_phy);
5798         if (err) {
5799                 netdev_err(tp->dev,
5800                            "Failed to re-initialize device, aborting\n");
5801                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5802                 tg3_full_unlock(tp);
5803                 del_timer_sync(&tp->timer);
5804                 tp->irq_sync = 0;
5805                 tg3_napi_enable(tp);
5806                 dev_close(tp->dev);
5807                 tg3_full_lock(tp, 0);
5808         }
5809         return err;
5810 }
5811
5812 #ifdef CONFIG_NET_POLL_CONTROLLER
5813 static void tg3_poll_controller(struct net_device *dev)
5814 {
5815         int i;
5816         struct tg3 *tp = netdev_priv(dev);
5817
5818         for (i = 0; i < tp->irq_cnt; i++)
5819                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5820 }
5821 #endif
5822
5823 static void tg3_reset_task(struct work_struct *work)
5824 {
5825         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5826         int err;
5827         unsigned int restart_timer;
5828
5829         tg3_full_lock(tp, 0);
5830
5831         if (!netif_running(tp->dev)) {
5832                 tg3_full_unlock(tp);
5833                 return;
5834         }
5835
5836         tg3_full_unlock(tp);
5837
5838         tg3_phy_stop(tp);
5839
5840         tg3_netif_stop(tp);
5841
5842         tg3_full_lock(tp, 1);
5843
5844         restart_timer = tg3_flag(tp, RESTART_TIMER);
5845         tg3_flag_clear(tp, RESTART_TIMER);
5846
5847         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5848                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5849                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5850                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5851                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5852         }
5853
5854         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5855         err = tg3_init_hw(tp, 1);
5856         if (err)
5857                 goto out;
5858
5859         tg3_netif_start(tp);
5860
5861         if (restart_timer)
5862                 mod_timer(&tp->timer, jiffies + 1);
5863
5864 out:
5865         tg3_full_unlock(tp);
5866
5867         if (!err)
5868                 tg3_phy_start(tp);
5869 }
5870
5871 static void tg3_tx_timeout(struct net_device *dev)
5872 {
5873         struct tg3 *tp = netdev_priv(dev);
5874
5875         if (netif_msg_tx_err(tp)) {
5876                 netdev_err(dev, "transmit timed out, resetting\n");
5877                 tg3_dump_state(tp);
5878         }
5879
5880         schedule_work(&tp->reset_task);
5881 }
5882
5883 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5884 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5885 {
5886         u32 base = (u32) mapping & 0xffffffff;
5887
5888         return (base > 0xffffdcc0) && (base + len + 8 < base);
5889 }
5890
5891 /* Test for DMA addresses > 40-bit */
5892 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5893                                           int len)
5894 {
5895 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5896         if (tg3_flag(tp, 40BIT_DMA_BUG))
5897                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5898         return 0;
5899 #else
5900         return 0;
5901 #endif
5902 }
5903
5904 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5905                         dma_addr_t mapping, int len, u32 flags,
5906                         u32 mss_and_is_end)
5907 {
5908         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5909         int is_end = (mss_and_is_end & 0x1);
5910         u32 mss = (mss_and_is_end >> 1);
5911         u32 vlan_tag = 0;
5912
5913         if (is_end)
5914                 flags |= TXD_FLAG_END;
5915         if (flags & TXD_FLAG_VLAN) {
5916                 vlan_tag = flags >> 16;
5917                 flags &= 0xffff;
5918         }
5919         vlan_tag |= (mss << TXD_MSS_SHIFT);
5920
5921         txd->addr_hi = ((u64) mapping >> 32);
5922         txd->addr_lo = ((u64) mapping & 0xffffffff);
5923         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5924         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5925 }
5926
5927 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5928                                 struct sk_buff *skb, int last)
5929 {
5930         int i;
5931         u32 entry = tnapi->tx_prod;
5932         struct ring_info *txb = &tnapi->tx_buffers[entry];
5933
5934         pci_unmap_single(tnapi->tp->pdev,
5935                          dma_unmap_addr(txb, mapping),
5936                          skb_headlen(skb),
5937                          PCI_DMA_TODEVICE);
5938         for (i = 0; i < last; i++) {
5939                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5940
5941                 entry = NEXT_TX(entry);
5942                 txb = &tnapi->tx_buffers[entry];
5943
5944                 pci_unmap_page(tnapi->tp->pdev,
5945                                dma_unmap_addr(txb, mapping),
5946                                frag->size, PCI_DMA_TODEVICE);
5947         }
5948 }
5949
5950 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5951 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5952                                        struct sk_buff *skb,
5953                                        u32 base_flags, u32 mss)
5954 {
5955         struct tg3 *tp = tnapi->tp;
5956         struct sk_buff *new_skb;
5957         dma_addr_t new_addr = 0;
5958         u32 entry = tnapi->tx_prod;
5959         int ret = 0;
5960
5961         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5962                 new_skb = skb_copy(skb, GFP_ATOMIC);
5963         else {
5964                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5965
5966                 new_skb = skb_copy_expand(skb,
5967                                           skb_headroom(skb) + more_headroom,
5968                                           skb_tailroom(skb), GFP_ATOMIC);
5969         }
5970
5971         if (!new_skb) {
5972                 ret = -1;
5973         } else {
5974                 /* New SKB is guaranteed to be linear. */
5975                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5976                                           PCI_DMA_TODEVICE);
5977                 /* Make sure the mapping succeeded */
5978                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5979                         ret = -1;
5980                         dev_kfree_skb(new_skb);
5981
5982                 /* Make sure new skb does not cross any 4G boundaries.
5983                  * Drop the packet if it does.
5984                  */
5985                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5986                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5987                                          PCI_DMA_TODEVICE);
5988                         ret = -1;
5989                         dev_kfree_skb(new_skb);
5990                 } else {
5991                         tnapi->tx_buffers[entry].skb = new_skb;
5992                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5993                                            mapping, new_addr);
5994
5995                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5996                                     base_flags, 1 | (mss << 1));
5997                 }
5998         }
5999
6000         dev_kfree_skb(skb);
6001
6002         return ret;
6003 }
6004
6005 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6006
6007 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6008  * TSO header is greater than 80 bytes.
6009  */
6010 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6011 {
6012         struct sk_buff *segs, *nskb;
6013         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6014
6015         /* Estimate the number of fragments in the worst case */
6016         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6017                 netif_stop_queue(tp->dev);
6018
6019                 /* netif_tx_stop_queue() must be done before checking
6020                  * checking tx index in tg3_tx_avail() below, because in
6021                  * tg3_tx(), we update tx index before checking for
6022                  * netif_tx_queue_stopped().
6023                  */
6024                 smp_mb();
6025                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6026                         return NETDEV_TX_BUSY;
6027
6028                 netif_wake_queue(tp->dev);
6029         }
6030
6031         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6032         if (IS_ERR(segs))
6033                 goto tg3_tso_bug_end;
6034
6035         do {
6036                 nskb = segs;
6037                 segs = segs->next;
6038                 nskb->next = NULL;
6039                 tg3_start_xmit(nskb, tp->dev);
6040         } while (segs);
6041
6042 tg3_tso_bug_end:
6043         dev_kfree_skb(skb);
6044
6045         return NETDEV_TX_OK;
6046 }
6047
6048 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6049  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6050  */
6051 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6052 {
6053         struct tg3 *tp = netdev_priv(dev);
6054         u32 len, entry, base_flags, mss;
6055         int i = -1, would_hit_hwbug;
6056         dma_addr_t mapping;
6057         struct tg3_napi *tnapi;
6058         struct netdev_queue *txq;
6059         unsigned int last;
6060
6061         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6062         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6063         if (tg3_flag(tp, ENABLE_TSS))
6064                 tnapi++;
6065
6066         /* We are running in BH disabled context with netif_tx_lock
6067          * and TX reclaim runs via tp->napi.poll inside of a software
6068          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6069          * no IRQ context deadlocks to worry about either.  Rejoice!
6070          */
6071         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
6072                 if (!netif_tx_queue_stopped(txq)) {
6073                         netif_tx_stop_queue(txq);
6074
6075                         /* This is a hard error, log it. */
6076                         netdev_err(dev,
6077                                    "BUG! Tx Ring full when queue awake!\n");
6078                 }
6079                 return NETDEV_TX_BUSY;
6080         }
6081
6082         entry = tnapi->tx_prod;
6083         base_flags = 0;
6084         if (skb->ip_summed == CHECKSUM_PARTIAL)
6085                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6086
6087         mss = skb_shinfo(skb)->gso_size;
6088         if (mss) {
6089                 struct iphdr *iph;
6090                 u32 tcp_opt_len, hdr_len;
6091
6092                 if (skb_header_cloned(skb) &&
6093                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6094                         dev_kfree_skb(skb);
6095                         goto out_unlock;
6096                 }
6097
6098                 iph = ip_hdr(skb);
6099                 tcp_opt_len = tcp_optlen(skb);
6100
6101                 if (skb_is_gso_v6(skb)) {
6102                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6103                 } else {
6104                         u32 ip_tcp_len;
6105
6106                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6107                         hdr_len = ip_tcp_len + tcp_opt_len;
6108
6109                         iph->check = 0;
6110                         iph->tot_len = htons(mss + hdr_len);
6111                 }
6112
6113                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6114                     tg3_flag(tp, TSO_BUG))
6115                         return tg3_tso_bug(tp, skb);
6116
6117                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6118                                TXD_FLAG_CPU_POST_DMA);
6119
6120                 if (tg3_flag(tp, HW_TSO_1) ||
6121                     tg3_flag(tp, HW_TSO_2) ||
6122                     tg3_flag(tp, HW_TSO_3)) {
6123                         tcp_hdr(skb)->check = 0;
6124                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6125                 } else
6126                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6127                                                                  iph->daddr, 0,
6128                                                                  IPPROTO_TCP,
6129                                                                  0);
6130
6131                 if (tg3_flag(tp, HW_TSO_3)) {
6132                         mss |= (hdr_len & 0xc) << 12;
6133                         if (hdr_len & 0x10)
6134                                 base_flags |= 0x00000010;
6135                         base_flags |= (hdr_len & 0x3e0) << 5;
6136                 } else if (tg3_flag(tp, HW_TSO_2))
6137                         mss |= hdr_len << 9;
6138                 else if (tg3_flag(tp, HW_TSO_1) ||
6139                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6140                         if (tcp_opt_len || iph->ihl > 5) {
6141                                 int tsflags;
6142
6143                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6144                                 mss |= (tsflags << 11);
6145                         }
6146                 } else {
6147                         if (tcp_opt_len || iph->ihl > 5) {
6148                                 int tsflags;
6149
6150                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6151                                 base_flags |= tsflags << 12;
6152                         }
6153                 }
6154         }
6155
6156         if (vlan_tx_tag_present(skb))
6157                 base_flags |= (TXD_FLAG_VLAN |
6158                                (vlan_tx_tag_get(skb) << 16));
6159
6160         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6161             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6162                 base_flags |= TXD_FLAG_JMB_PKT;
6163
6164         len = skb_headlen(skb);
6165
6166         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6167         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6168                 dev_kfree_skb(skb);
6169                 goto out_unlock;
6170         }
6171
6172         tnapi->tx_buffers[entry].skb = skb;
6173         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6174
6175         would_hit_hwbug = 0;
6176
6177         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6178                 would_hit_hwbug = 1;
6179
6180         if (tg3_4g_overflow_test(mapping, len))
6181                 would_hit_hwbug = 1;
6182
6183         if (tg3_40bit_overflow_test(tp, mapping, len))
6184                 would_hit_hwbug = 1;
6185
6186         if (tg3_flag(tp, 5701_DMA_BUG))
6187                 would_hit_hwbug = 1;
6188
6189         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6190                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6191
6192         entry = NEXT_TX(entry);
6193
6194         /* Now loop through additional data fragments, and queue them. */
6195         if (skb_shinfo(skb)->nr_frags > 0) {
6196                 last = skb_shinfo(skb)->nr_frags - 1;
6197                 for (i = 0; i <= last; i++) {
6198                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6199
6200                         len = frag->size;
6201                         mapping = pci_map_page(tp->pdev,
6202                                                frag->page,
6203                                                frag->page_offset,
6204                                                len, PCI_DMA_TODEVICE);
6205
6206                         tnapi->tx_buffers[entry].skb = NULL;
6207                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6208                                            mapping);
6209                         if (pci_dma_mapping_error(tp->pdev, mapping))
6210                                 goto dma_error;
6211
6212                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6213                             len <= 8)
6214                                 would_hit_hwbug = 1;
6215
6216                         if (tg3_4g_overflow_test(mapping, len))
6217                                 would_hit_hwbug = 1;
6218
6219                         if (tg3_40bit_overflow_test(tp, mapping, len))
6220                                 would_hit_hwbug = 1;
6221
6222                         if (tg3_flag(tp, HW_TSO_1) ||
6223                             tg3_flag(tp, HW_TSO_2) ||
6224                             tg3_flag(tp, HW_TSO_3))
6225                                 tg3_set_txd(tnapi, entry, mapping, len,
6226                                             base_flags, (i == last)|(mss << 1));
6227                         else
6228                                 tg3_set_txd(tnapi, entry, mapping, len,
6229                                             base_flags, (i == last));
6230
6231                         entry = NEXT_TX(entry);
6232                 }
6233         }
6234
6235         if (would_hit_hwbug) {
6236                 tg3_skb_error_unmap(tnapi, skb, i);
6237
6238                 /* If the workaround fails due to memory/mapping
6239                  * failure, silently drop this packet.
6240                  */
6241                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6242                         goto out_unlock;
6243
6244                 entry = NEXT_TX(tnapi->tx_prod);
6245         }
6246
6247         skb_tx_timestamp(skb);
6248
6249         /* Packets are ready, update Tx producer idx local and on card. */
6250         tw32_tx_mbox(tnapi->prodmbox, entry);
6251
6252         tnapi->tx_prod = entry;
6253         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6254                 netif_tx_stop_queue(txq);
6255
6256                 /* netif_tx_stop_queue() must be done before checking
6257                  * checking tx index in tg3_tx_avail() below, because in
6258                  * tg3_tx(), we update tx index before checking for
6259                  * netif_tx_queue_stopped().
6260                  */
6261                 smp_mb();
6262                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6263                         netif_tx_wake_queue(txq);
6264         }
6265
6266 out_unlock:
6267         mmiowb();
6268
6269         return NETDEV_TX_OK;
6270
6271 dma_error:
6272         tg3_skb_error_unmap(tnapi, skb, i);
6273         dev_kfree_skb(skb);
6274         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6275         return NETDEV_TX_OK;
6276 }
6277
6278 static void tg3_set_loopback(struct net_device *dev, u32 features)
6279 {
6280         struct tg3 *tp = netdev_priv(dev);
6281
6282         if (features & NETIF_F_LOOPBACK) {
6283                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6284                         return;
6285
6286                 /*
6287                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6288                  * loopback mode if Half-Duplex mode was negotiated earlier.
6289                  */
6290                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6291
6292                 /* Enable internal MAC loopback mode */
6293                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6294                 spin_lock_bh(&tp->lock);
6295                 tw32(MAC_MODE, tp->mac_mode);
6296                 netif_carrier_on(tp->dev);
6297                 spin_unlock_bh(&tp->lock);
6298                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6299         } else {
6300                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6301                         return;
6302
6303                 /* Disable internal MAC loopback mode */
6304                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6305                 spin_lock_bh(&tp->lock);
6306                 tw32(MAC_MODE, tp->mac_mode);
6307                 /* Force link status check */
6308                 tg3_setup_phy(tp, 1);
6309                 spin_unlock_bh(&tp->lock);
6310                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6311         }
6312 }
6313
6314 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6315 {
6316         struct tg3 *tp = netdev_priv(dev);
6317
6318         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6319                 features &= ~NETIF_F_ALL_TSO;
6320
6321         return features;
6322 }
6323
6324 static int tg3_set_features(struct net_device *dev, u32 features)
6325 {
6326         u32 changed = dev->features ^ features;
6327
6328         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6329                 tg3_set_loopback(dev, features);
6330
6331         return 0;
6332 }
6333
6334 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6335                                int new_mtu)
6336 {
6337         dev->mtu = new_mtu;
6338
6339         if (new_mtu > ETH_DATA_LEN) {
6340                 if (tg3_flag(tp, 5780_CLASS)) {
6341                         netdev_update_features(dev);
6342                         tg3_flag_clear(tp, TSO_CAPABLE);
6343                 } else {
6344                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6345                 }
6346         } else {
6347                 if (tg3_flag(tp, 5780_CLASS)) {
6348                         tg3_flag_set(tp, TSO_CAPABLE);
6349                         netdev_update_features(dev);
6350                 }
6351                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6352         }
6353 }
6354
6355 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6356 {
6357         struct tg3 *tp = netdev_priv(dev);
6358         int err;
6359
6360         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6361                 return -EINVAL;
6362
6363         if (!netif_running(dev)) {
6364                 /* We'll just catch it later when the
6365                  * device is up'd.
6366                  */
6367                 tg3_set_mtu(dev, tp, new_mtu);
6368                 return 0;
6369         }
6370
6371         tg3_phy_stop(tp);
6372
6373         tg3_netif_stop(tp);
6374
6375         tg3_full_lock(tp, 1);
6376
6377         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6378
6379         tg3_set_mtu(dev, tp, new_mtu);
6380
6381         err = tg3_restart_hw(tp, 0);
6382
6383         if (!err)
6384                 tg3_netif_start(tp);
6385
6386         tg3_full_unlock(tp);
6387
6388         if (!err)
6389                 tg3_phy_start(tp);
6390
6391         return err;
6392 }
6393
6394 static void tg3_rx_prodring_free(struct tg3 *tp,
6395                                  struct tg3_rx_prodring_set *tpr)
6396 {
6397         int i;
6398
6399         if (tpr != &tp->napi[0].prodring) {
6400                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6401                      i = (i + 1) & tp->rx_std_ring_mask)
6402                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6403                                         tp->rx_pkt_map_sz);
6404
6405                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6406                         for (i = tpr->rx_jmb_cons_idx;
6407                              i != tpr->rx_jmb_prod_idx;
6408                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6409                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6410                                                 TG3_RX_JMB_MAP_SZ);
6411                         }
6412                 }
6413
6414                 return;
6415         }
6416
6417         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6418                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6419                                 tp->rx_pkt_map_sz);
6420
6421         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6422                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6423                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6424                                         TG3_RX_JMB_MAP_SZ);
6425         }
6426 }
6427
6428 /* Initialize rx rings for packet processing.
6429  *
6430  * The chip has been shut down and the driver detached from
6431  * the networking, so no interrupts or new tx packets will
6432  * end up in the driver.  tp->{tx,}lock are held and thus
6433  * we may not sleep.
6434  */
6435 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6436                                  struct tg3_rx_prodring_set *tpr)
6437 {
6438         u32 i, rx_pkt_dma_sz;
6439
6440         tpr->rx_std_cons_idx = 0;
6441         tpr->rx_std_prod_idx = 0;
6442         tpr->rx_jmb_cons_idx = 0;
6443         tpr->rx_jmb_prod_idx = 0;
6444
6445         if (tpr != &tp->napi[0].prodring) {
6446                 memset(&tpr->rx_std_buffers[0], 0,
6447                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6448                 if (tpr->rx_jmb_buffers)
6449                         memset(&tpr->rx_jmb_buffers[0], 0,
6450                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6451                 goto done;
6452         }
6453
6454         /* Zero out all descriptors. */
6455         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6456
6457         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6458         if (tg3_flag(tp, 5780_CLASS) &&
6459             tp->dev->mtu > ETH_DATA_LEN)
6460                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6461         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6462
6463         /* Initialize invariants of the rings, we only set this
6464          * stuff once.  This works because the card does not
6465          * write into the rx buffer posting rings.
6466          */
6467         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6468                 struct tg3_rx_buffer_desc *rxd;
6469
6470                 rxd = &tpr->rx_std[i];
6471                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6472                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6473                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6474                                (i << RXD_OPAQUE_INDEX_SHIFT));
6475         }
6476
6477         /* Now allocate fresh SKBs for each rx ring. */
6478         for (i = 0; i < tp->rx_pending; i++) {
6479                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6480                         netdev_warn(tp->dev,
6481                                     "Using a smaller RX standard ring. Only "
6482                                     "%d out of %d buffers were allocated "
6483                                     "successfully\n", i, tp->rx_pending);
6484                         if (i == 0)
6485                                 goto initfail;
6486                         tp->rx_pending = i;
6487                         break;
6488                 }
6489         }
6490
6491         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6492                 goto done;
6493
6494         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6495
6496         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6497                 goto done;
6498
6499         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6500                 struct tg3_rx_buffer_desc *rxd;
6501
6502                 rxd = &tpr->rx_jmb[i].std;
6503                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6504                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6505                                   RXD_FLAG_JUMBO;
6506                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6507                        (i << RXD_OPAQUE_INDEX_SHIFT));
6508         }
6509
6510         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6511                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6512                         netdev_warn(tp->dev,
6513                                     "Using a smaller RX jumbo ring. Only %d "
6514                                     "out of %d buffers were allocated "
6515                                     "successfully\n", i, tp->rx_jumbo_pending);
6516                         if (i == 0)
6517                                 goto initfail;
6518                         tp->rx_jumbo_pending = i;
6519                         break;
6520                 }
6521         }
6522
6523 done:
6524         return 0;
6525
6526 initfail:
6527         tg3_rx_prodring_free(tp, tpr);
6528         return -ENOMEM;
6529 }
6530
6531 static void tg3_rx_prodring_fini(struct tg3 *tp,
6532                                  struct tg3_rx_prodring_set *tpr)
6533 {
6534         kfree(tpr->rx_std_buffers);
6535         tpr->rx_std_buffers = NULL;
6536         kfree(tpr->rx_jmb_buffers);
6537         tpr->rx_jmb_buffers = NULL;
6538         if (tpr->rx_std) {
6539                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6540                                   tpr->rx_std, tpr->rx_std_mapping);
6541                 tpr->rx_std = NULL;
6542         }
6543         if (tpr->rx_jmb) {
6544                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6545                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6546                 tpr->rx_jmb = NULL;
6547         }
6548 }
6549
6550 static int tg3_rx_prodring_init(struct tg3 *tp,
6551                                 struct tg3_rx_prodring_set *tpr)
6552 {
6553         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6554                                       GFP_KERNEL);
6555         if (!tpr->rx_std_buffers)
6556                 return -ENOMEM;
6557
6558         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6559                                          TG3_RX_STD_RING_BYTES(tp),
6560                                          &tpr->rx_std_mapping,
6561                                          GFP_KERNEL);
6562         if (!tpr->rx_std)
6563                 goto err_out;
6564
6565         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6566                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6567                                               GFP_KERNEL);
6568                 if (!tpr->rx_jmb_buffers)
6569                         goto err_out;
6570
6571                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6572                                                  TG3_RX_JMB_RING_BYTES(tp),
6573                                                  &tpr->rx_jmb_mapping,
6574                                                  GFP_KERNEL);
6575                 if (!tpr->rx_jmb)
6576                         goto err_out;
6577         }
6578
6579         return 0;
6580
6581 err_out:
6582         tg3_rx_prodring_fini(tp, tpr);
6583         return -ENOMEM;
6584 }
6585
6586 /* Free up pending packets in all rx/tx rings.
6587  *
6588  * The chip has been shut down and the driver detached from
6589  * the networking, so no interrupts or new tx packets will
6590  * end up in the driver.  tp->{tx,}lock is not held and we are not
6591  * in an interrupt context and thus may sleep.
6592  */
6593 static void tg3_free_rings(struct tg3 *tp)
6594 {
6595         int i, j;
6596
6597         for (j = 0; j < tp->irq_cnt; j++) {
6598                 struct tg3_napi *tnapi = &tp->napi[j];
6599
6600                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6601
6602                 if (!tnapi->tx_buffers)
6603                         continue;
6604
6605                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6606                         struct ring_info *txp;
6607                         struct sk_buff *skb;
6608                         unsigned int k;
6609
6610                         txp = &tnapi->tx_buffers[i];
6611                         skb = txp->skb;
6612
6613                         if (skb == NULL) {
6614                                 i++;
6615                                 continue;
6616                         }
6617
6618                         pci_unmap_single(tp->pdev,
6619                                          dma_unmap_addr(txp, mapping),
6620                                          skb_headlen(skb),
6621                                          PCI_DMA_TODEVICE);
6622                         txp->skb = NULL;
6623
6624                         i++;
6625
6626                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6627                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6628                                 pci_unmap_page(tp->pdev,
6629                                                dma_unmap_addr(txp, mapping),
6630                                                skb_shinfo(skb)->frags[k].size,
6631                                                PCI_DMA_TODEVICE);
6632                                 i++;
6633                         }
6634
6635                         dev_kfree_skb_any(skb);
6636                 }
6637         }
6638 }
6639
6640 /* Initialize tx/rx rings for packet processing.
6641  *
6642  * The chip has been shut down and the driver detached from
6643  * the networking, so no interrupts or new tx packets will
6644  * end up in the driver.  tp->{tx,}lock are held and thus
6645  * we may not sleep.
6646  */
6647 static int tg3_init_rings(struct tg3 *tp)
6648 {
6649         int i;
6650
6651         /* Free up all the SKBs. */
6652         tg3_free_rings(tp);
6653
6654         for (i = 0; i < tp->irq_cnt; i++) {
6655                 struct tg3_napi *tnapi = &tp->napi[i];
6656
6657                 tnapi->last_tag = 0;
6658                 tnapi->last_irq_tag = 0;
6659                 tnapi->hw_status->status = 0;
6660                 tnapi->hw_status->status_tag = 0;
6661                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6662
6663                 tnapi->tx_prod = 0;
6664                 tnapi->tx_cons = 0;
6665                 if (tnapi->tx_ring)
6666                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6667
6668                 tnapi->rx_rcb_ptr = 0;
6669                 if (tnapi->rx_rcb)
6670                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6671
6672                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6673                         tg3_free_rings(tp);
6674                         return -ENOMEM;
6675                 }
6676         }
6677
6678         return 0;
6679 }
6680
6681 /*
6682  * Must not be invoked with interrupt sources disabled and
6683  * the hardware shutdown down.
6684  */
6685 static void tg3_free_consistent(struct tg3 *tp)
6686 {
6687         int i;
6688
6689         for (i = 0; i < tp->irq_cnt; i++) {
6690                 struct tg3_napi *tnapi = &tp->napi[i];
6691
6692                 if (tnapi->tx_ring) {
6693                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6694                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6695                         tnapi->tx_ring = NULL;
6696                 }
6697
6698                 kfree(tnapi->tx_buffers);
6699                 tnapi->tx_buffers = NULL;
6700
6701                 if (tnapi->rx_rcb) {
6702                         dma_free_coherent(&tp->pdev->dev,
6703                                           TG3_RX_RCB_RING_BYTES(tp),
6704                                           tnapi->rx_rcb,
6705                                           tnapi->rx_rcb_mapping);
6706                         tnapi->rx_rcb = NULL;
6707                 }
6708
6709                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6710
6711                 if (tnapi->hw_status) {
6712                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6713                                           tnapi->hw_status,
6714                                           tnapi->status_mapping);
6715                         tnapi->hw_status = NULL;
6716                 }
6717         }
6718
6719         if (tp->hw_stats) {
6720                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6721                                   tp->hw_stats, tp->stats_mapping);
6722                 tp->hw_stats = NULL;
6723         }
6724 }
6725
6726 /*
6727  * Must not be invoked with interrupt sources disabled and
6728  * the hardware shutdown down.  Can sleep.
6729  */
6730 static int tg3_alloc_consistent(struct tg3 *tp)
6731 {
6732         int i;
6733
6734         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6735                                           sizeof(struct tg3_hw_stats),
6736                                           &tp->stats_mapping,
6737                                           GFP_KERNEL);
6738         if (!tp->hw_stats)
6739                 goto err_out;
6740
6741         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6742
6743         for (i = 0; i < tp->irq_cnt; i++) {
6744                 struct tg3_napi *tnapi = &tp->napi[i];
6745                 struct tg3_hw_status *sblk;
6746
6747                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6748                                                       TG3_HW_STATUS_SIZE,
6749                                                       &tnapi->status_mapping,
6750                                                       GFP_KERNEL);
6751                 if (!tnapi->hw_status)
6752                         goto err_out;
6753
6754                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6755                 sblk = tnapi->hw_status;
6756
6757                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6758                         goto err_out;
6759
6760                 /* If multivector TSS is enabled, vector 0 does not handle
6761                  * tx interrupts.  Don't allocate any resources for it.
6762                  */
6763                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6764                     (i && tg3_flag(tp, ENABLE_TSS))) {
6765                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6766                                                     TG3_TX_RING_SIZE,
6767                                                     GFP_KERNEL);
6768                         if (!tnapi->tx_buffers)
6769                                 goto err_out;
6770
6771                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6772                                                             TG3_TX_RING_BYTES,
6773                                                         &tnapi->tx_desc_mapping,
6774                                                             GFP_KERNEL);
6775                         if (!tnapi->tx_ring)
6776                                 goto err_out;
6777                 }
6778
6779                 /*
6780                  * When RSS is enabled, the status block format changes
6781                  * slightly.  The "rx_jumbo_consumer", "reserved",
6782                  * and "rx_mini_consumer" members get mapped to the
6783                  * other three rx return ring producer indexes.
6784                  */
6785                 switch (i) {
6786                 default:
6787                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6788                         break;
6789                 case 2:
6790                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6791                         break;
6792                 case 3:
6793                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6794                         break;
6795                 case 4:
6796                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6797                         break;
6798                 }
6799
6800                 /*
6801                  * If multivector RSS is enabled, vector 0 does not handle
6802                  * rx or tx interrupts.  Don't allocate any resources for it.
6803                  */
6804                 if (!i && tg3_flag(tp, ENABLE_RSS))
6805                         continue;
6806
6807                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6808                                                    TG3_RX_RCB_RING_BYTES(tp),
6809                                                    &tnapi->rx_rcb_mapping,
6810                                                    GFP_KERNEL);
6811                 if (!tnapi->rx_rcb)
6812                         goto err_out;
6813
6814                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6815         }
6816
6817         return 0;
6818
6819 err_out:
6820         tg3_free_consistent(tp);
6821         return -ENOMEM;
6822 }
6823
6824 #define MAX_WAIT_CNT 1000
6825
6826 /* To stop a block, clear the enable bit and poll till it
6827  * clears.  tp->lock is held.
6828  */
6829 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6830 {
6831         unsigned int i;
6832         u32 val;
6833
6834         if (tg3_flag(tp, 5705_PLUS)) {
6835                 switch (ofs) {
6836                 case RCVLSC_MODE:
6837                 case DMAC_MODE:
6838                 case MBFREE_MODE:
6839                 case BUFMGR_MODE:
6840                 case MEMARB_MODE:
6841                         /* We can't enable/disable these bits of the
6842                          * 5705/5750, just say success.
6843                          */
6844                         return 0;
6845
6846                 default:
6847                         break;
6848                 }
6849         }
6850
6851         val = tr32(ofs);
6852         val &= ~enable_bit;
6853         tw32_f(ofs, val);
6854
6855         for (i = 0; i < MAX_WAIT_CNT; i++) {
6856                 udelay(100);
6857                 val = tr32(ofs);
6858                 if ((val & enable_bit) == 0)
6859                         break;
6860         }
6861
6862         if (i == MAX_WAIT_CNT && !silent) {
6863                 dev_err(&tp->pdev->dev,
6864                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6865                         ofs, enable_bit);
6866                 return -ENODEV;
6867         }
6868
6869         return 0;
6870 }
6871
6872 /* tp->lock is held. */
6873 static int tg3_abort_hw(struct tg3 *tp, int silent)
6874 {
6875         int i, err;
6876
6877         tg3_disable_ints(tp);
6878
6879         tp->rx_mode &= ~RX_MODE_ENABLE;
6880         tw32_f(MAC_RX_MODE, tp->rx_mode);
6881         udelay(10);
6882
6883         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6884         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6885         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6886         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6887         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6888         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6889
6890         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6891         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6892         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6893         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6894         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6895         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6896         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6897
6898         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6899         tw32_f(MAC_MODE, tp->mac_mode);
6900         udelay(40);
6901
6902         tp->tx_mode &= ~TX_MODE_ENABLE;
6903         tw32_f(MAC_TX_MODE, tp->tx_mode);
6904
6905         for (i = 0; i < MAX_WAIT_CNT; i++) {
6906                 udelay(100);
6907                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6908                         break;
6909         }
6910         if (i >= MAX_WAIT_CNT) {
6911                 dev_err(&tp->pdev->dev,
6912                         "%s timed out, TX_MODE_ENABLE will not clear "
6913                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6914                 err |= -ENODEV;
6915         }
6916
6917         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6918         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6919         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6920
6921         tw32(FTQ_RESET, 0xffffffff);
6922         tw32(FTQ_RESET, 0x00000000);
6923
6924         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6925         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6926
6927         for (i = 0; i < tp->irq_cnt; i++) {
6928                 struct tg3_napi *tnapi = &tp->napi[i];
6929                 if (tnapi->hw_status)
6930                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6931         }
6932         if (tp->hw_stats)
6933                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6934
6935         return err;
6936 }
6937
6938 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6939 {
6940         int i;
6941         u32 apedata;
6942
6943         /* NCSI does not support APE events */
6944         if (tg3_flag(tp, APE_HAS_NCSI))
6945                 return;
6946
6947         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6948         if (apedata != APE_SEG_SIG_MAGIC)
6949                 return;
6950
6951         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6952         if (!(apedata & APE_FW_STATUS_READY))
6953                 return;
6954
6955         /* Wait for up to 1 millisecond for APE to service previous event. */
6956         for (i = 0; i < 10; i++) {
6957                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6958                         return;
6959
6960                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6961
6962                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6963                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6964                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6965
6966                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6967
6968                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6969                         break;
6970
6971                 udelay(100);
6972         }
6973
6974         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6975                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6976 }
6977
6978 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6979 {
6980         u32 event;
6981         u32 apedata;
6982
6983         if (!tg3_flag(tp, ENABLE_APE))
6984                 return;
6985
6986         switch (kind) {
6987         case RESET_KIND_INIT:
6988                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6989                                 APE_HOST_SEG_SIG_MAGIC);
6990                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6991                                 APE_HOST_SEG_LEN_MAGIC);
6992                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6993                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6994                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6995                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6996                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6997                                 APE_HOST_BEHAV_NO_PHYLOCK);
6998                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6999                                     TG3_APE_HOST_DRVR_STATE_START);
7000
7001                 event = APE_EVENT_STATUS_STATE_START;
7002                 break;
7003         case RESET_KIND_SHUTDOWN:
7004                 /* With the interface we are currently using,
7005                  * APE does not track driver state.  Wiping
7006                  * out the HOST SEGMENT SIGNATURE forces
7007                  * the APE to assume OS absent status.
7008                  */
7009                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
7010
7011                 if (device_may_wakeup(&tp->pdev->dev) &&
7012                     tg3_flag(tp, WOL_ENABLE)) {
7013                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
7014                                             TG3_APE_HOST_WOL_SPEED_AUTO);
7015                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
7016                 } else
7017                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
7018
7019                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
7020
7021                 event = APE_EVENT_STATUS_STATE_UNLOAD;
7022                 break;
7023         case RESET_KIND_SUSPEND:
7024                 event = APE_EVENT_STATUS_STATE_SUSPEND;
7025                 break;
7026         default:
7027                 return;
7028         }
7029
7030         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
7031
7032         tg3_ape_send_event(tp, event);
7033 }
7034
7035 /* tp->lock is held. */
7036 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
7037 {
7038         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
7039                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
7040
7041         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7042                 switch (kind) {
7043                 case RESET_KIND_INIT:
7044                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7045                                       DRV_STATE_START);
7046                         break;
7047
7048                 case RESET_KIND_SHUTDOWN:
7049                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7050                                       DRV_STATE_UNLOAD);
7051                         break;
7052
7053                 case RESET_KIND_SUSPEND:
7054                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7055                                       DRV_STATE_SUSPEND);
7056                         break;
7057
7058                 default:
7059                         break;
7060                 }
7061         }
7062
7063         if (kind == RESET_KIND_INIT ||
7064             kind == RESET_KIND_SUSPEND)
7065                 tg3_ape_driver_state_change(tp, kind);
7066 }
7067
7068 /* tp->lock is held. */
7069 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
7070 {
7071         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
7072                 switch (kind) {
7073                 case RESET_KIND_INIT:
7074                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7075                                       DRV_STATE_START_DONE);
7076                         break;
7077
7078                 case RESET_KIND_SHUTDOWN:
7079                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7080                                       DRV_STATE_UNLOAD_DONE);
7081                         break;
7082
7083                 default:
7084                         break;
7085                 }
7086         }
7087
7088         if (kind == RESET_KIND_SHUTDOWN)
7089                 tg3_ape_driver_state_change(tp, kind);
7090 }
7091
7092 /* tp->lock is held. */
7093 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
7094 {
7095         if (tg3_flag(tp, ENABLE_ASF)) {
7096                 switch (kind) {
7097                 case RESET_KIND_INIT:
7098                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7099                                       DRV_STATE_START);
7100                         break;
7101
7102                 case RESET_KIND_SHUTDOWN:
7103                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7104                                       DRV_STATE_UNLOAD);
7105                         break;
7106
7107                 case RESET_KIND_SUSPEND:
7108                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
7109                                       DRV_STATE_SUSPEND);
7110                         break;
7111
7112                 default:
7113                         break;
7114                 }
7115         }
7116 }
7117
7118 static int tg3_poll_fw(struct tg3 *tp)
7119 {
7120         int i;
7121         u32 val;
7122
7123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7124                 /* Wait up to 20ms for init done. */
7125                 for (i = 0; i < 200; i++) {
7126                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7127                                 return 0;
7128                         udelay(100);
7129                 }
7130                 return -ENODEV;
7131         }
7132
7133         /* Wait for firmware initialization to complete. */
7134         for (i = 0; i < 100000; i++) {
7135                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7136                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7137                         break;
7138                 udelay(10);
7139         }
7140
7141         /* Chip might not be fitted with firmware.  Some Sun onboard
7142          * parts are configured like that.  So don't signal the timeout
7143          * of the above loop as an error, but do report the lack of
7144          * running firmware once.
7145          */
7146         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7147                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7148
7149                 netdev_info(tp->dev, "No firmware running\n");
7150         }
7151
7152         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7153                 /* The 57765 A0 needs a little more
7154                  * time to do some important work.
7155                  */
7156                 mdelay(10);
7157         }
7158
7159         return 0;
7160 }
7161
7162 /* Save PCI command register before chip reset */
7163 static void tg3_save_pci_state(struct tg3 *tp)
7164 {
7165         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7166 }
7167
7168 /* Restore PCI state after chip reset */
7169 static void tg3_restore_pci_state(struct tg3 *tp)
7170 {
7171         u32 val;
7172
7173         /* Re-enable indirect register accesses. */
7174         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7175                                tp->misc_host_ctrl);
7176
7177         /* Set MAX PCI retry to zero. */
7178         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7179         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7180             tg3_flag(tp, PCIX_MODE))
7181                 val |= PCISTATE_RETRY_SAME_DMA;
7182         /* Allow reads and writes to the APE register and memory space. */
7183         if (tg3_flag(tp, ENABLE_APE))
7184                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7185                        PCISTATE_ALLOW_APE_SHMEM_WR |
7186                        PCISTATE_ALLOW_APE_PSPACE_WR;
7187         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7188
7189         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7190
7191         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7192                 if (tg3_flag(tp, PCI_EXPRESS))
7193                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7194                 else {
7195                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7196                                               tp->pci_cacheline_sz);
7197                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7198                                               tp->pci_lat_timer);
7199                 }
7200         }
7201
7202         /* Make sure PCI-X relaxed ordering bit is clear. */
7203         if (tg3_flag(tp, PCIX_MODE)) {
7204                 u16 pcix_cmd;
7205
7206                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7207                                      &pcix_cmd);
7208                 pcix_cmd &= ~PCI_X_CMD_ERO;
7209                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7210                                       pcix_cmd);
7211         }
7212
7213         if (tg3_flag(tp, 5780_CLASS)) {
7214
7215                 /* Chip reset on 5780 will reset MSI enable bit,
7216                  * so need to restore it.
7217                  */
7218                 if (tg3_flag(tp, USING_MSI)) {
7219                         u16 ctrl;
7220
7221                         pci_read_config_word(tp->pdev,
7222                                              tp->msi_cap + PCI_MSI_FLAGS,
7223                                              &ctrl);
7224                         pci_write_config_word(tp->pdev,
7225                                               tp->msi_cap + PCI_MSI_FLAGS,
7226                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7227                         val = tr32(MSGINT_MODE);
7228                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7229                 }
7230         }
7231 }
7232
7233 static void tg3_stop_fw(struct tg3 *);
7234
7235 /* tp->lock is held. */
7236 static int tg3_chip_reset(struct tg3 *tp)
7237 {
7238         u32 val;
7239         void (*write_op)(struct tg3 *, u32, u32);
7240         int i, err;
7241
7242         tg3_nvram_lock(tp);
7243
7244         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7245
7246         /* No matching tg3_nvram_unlock() after this because
7247          * chip reset below will undo the nvram lock.
7248          */
7249         tp->nvram_lock_cnt = 0;
7250
7251         /* GRC_MISC_CFG core clock reset will clear the memory
7252          * enable bit in PCI register 4 and the MSI enable bit
7253          * on some chips, so we save relevant registers here.
7254          */
7255         tg3_save_pci_state(tp);
7256
7257         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7258             tg3_flag(tp, 5755_PLUS))
7259                 tw32(GRC_FASTBOOT_PC, 0);
7260
7261         /*
7262          * We must avoid the readl() that normally takes place.
7263          * It locks machines, causes machine checks, and other
7264          * fun things.  So, temporarily disable the 5701
7265          * hardware workaround, while we do the reset.
7266          */
7267         write_op = tp->write32;
7268         if (write_op == tg3_write_flush_reg32)
7269                 tp->write32 = tg3_write32;
7270
7271         /* Prevent the irq handler from reading or writing PCI registers
7272          * during chip reset when the memory enable bit in the PCI command
7273          * register may be cleared.  The chip does not generate interrupt
7274          * at this time, but the irq handler may still be called due to irq
7275          * sharing or irqpoll.
7276          */
7277         tg3_flag_set(tp, CHIP_RESETTING);
7278         for (i = 0; i < tp->irq_cnt; i++) {
7279                 struct tg3_napi *tnapi = &tp->napi[i];
7280                 if (tnapi->hw_status) {
7281                         tnapi->hw_status->status = 0;
7282                         tnapi->hw_status->status_tag = 0;
7283                 }
7284                 tnapi->last_tag = 0;
7285                 tnapi->last_irq_tag = 0;
7286         }
7287         smp_mb();
7288
7289         for (i = 0; i < tp->irq_cnt; i++)
7290                 synchronize_irq(tp->napi[i].irq_vec);
7291
7292         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7293                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7294                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7295         }
7296
7297         /* do the reset */
7298         val = GRC_MISC_CFG_CORECLK_RESET;
7299
7300         if (tg3_flag(tp, PCI_EXPRESS)) {
7301                 /* Force PCIe 1.0a mode */
7302                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7303                     !tg3_flag(tp, 57765_PLUS) &&
7304                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7305                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7306                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7307
7308                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7309                         tw32(GRC_MISC_CFG, (1 << 29));
7310                         val |= (1 << 29);
7311                 }
7312         }
7313
7314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7315                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7316                 tw32(GRC_VCPU_EXT_CTRL,
7317                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7318         }
7319
7320         /* Manage gphy power for all CPMU absent PCIe devices. */
7321         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7322                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7323
7324         tw32(GRC_MISC_CFG, val);
7325
7326         /* restore 5701 hardware bug workaround write method */
7327         tp->write32 = write_op;
7328
7329         /* Unfortunately, we have to delay before the PCI read back.
7330          * Some 575X chips even will not respond to a PCI cfg access
7331          * when the reset command is given to the chip.
7332          *
7333          * How do these hardware designers expect things to work
7334          * properly if the PCI write is posted for a long period
7335          * of time?  It is always necessary to have some method by
7336          * which a register read back can occur to push the write
7337          * out which does the reset.
7338          *
7339          * For most tg3 variants the trick below was working.
7340          * Ho hum...
7341          */
7342         udelay(120);
7343
7344         /* Flush PCI posted writes.  The normal MMIO registers
7345          * are inaccessible at this time so this is the only
7346          * way to make this reliably (actually, this is no longer
7347          * the case, see above).  I tried to use indirect
7348          * register read/write but this upset some 5701 variants.
7349          */
7350         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7351
7352         udelay(120);
7353
7354         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7355                 u16 val16;
7356
7357                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7358                         int i;
7359                         u32 cfg_val;
7360
7361                         /* Wait for link training to complete.  */
7362                         for (i = 0; i < 5000; i++)
7363                                 udelay(100);
7364
7365                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7366                         pci_write_config_dword(tp->pdev, 0xc4,
7367                                                cfg_val | (1 << 15));
7368                 }
7369
7370                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7371                 pci_read_config_word(tp->pdev,
7372                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7373                                      &val16);
7374                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7375                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7376                 /*
7377                  * Older PCIe devices only support the 128 byte
7378                  * MPS setting.  Enforce the restriction.
7379                  */
7380                 if (!tg3_flag(tp, CPMU_PRESENT))
7381                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7382                 pci_write_config_word(tp->pdev,
7383                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7384                                       val16);
7385
7386                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7387
7388                 /* Clear error status */
7389                 pci_write_config_word(tp->pdev,
7390                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7391                                       PCI_EXP_DEVSTA_CED |
7392                                       PCI_EXP_DEVSTA_NFED |
7393                                       PCI_EXP_DEVSTA_FED |
7394                                       PCI_EXP_DEVSTA_URD);
7395         }
7396
7397         tg3_restore_pci_state(tp);
7398
7399         tg3_flag_clear(tp, CHIP_RESETTING);
7400         tg3_flag_clear(tp, ERROR_PROCESSED);
7401
7402         val = 0;
7403         if (tg3_flag(tp, 5780_CLASS))
7404                 val = tr32(MEMARB_MODE);
7405         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7406
7407         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7408                 tg3_stop_fw(tp);
7409                 tw32(0x5000, 0x400);
7410         }
7411
7412         tw32(GRC_MODE, tp->grc_mode);
7413
7414         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7415                 val = tr32(0xc4);
7416
7417                 tw32(0xc4, val | (1 << 15));
7418         }
7419
7420         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7421             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7422                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7423                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7424                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7425                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7426         }
7427
7428         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7429                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7430                 val = tp->mac_mode;
7431         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7432                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7433                 val = tp->mac_mode;
7434         } else
7435                 val = 0;
7436
7437         tw32_f(MAC_MODE, val);
7438         udelay(40);
7439
7440         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7441
7442         err = tg3_poll_fw(tp);
7443         if (err)
7444                 return err;
7445
7446         tg3_mdio_start(tp);
7447
7448         if (tg3_flag(tp, PCI_EXPRESS) &&
7449             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7450             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7451             !tg3_flag(tp, 57765_PLUS)) {
7452                 val = tr32(0x7c00);
7453
7454                 tw32(0x7c00, val | (1 << 25));
7455         }
7456
7457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7458                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7459                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7460         }
7461
7462         /* Reprobe ASF enable state.  */
7463         tg3_flag_clear(tp, ENABLE_ASF);
7464         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7465         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7466         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7467                 u32 nic_cfg;
7468
7469                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7470                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7471                         tg3_flag_set(tp, ENABLE_ASF);
7472                         tp->last_event_jiffies = jiffies;
7473                         if (tg3_flag(tp, 5750_PLUS))
7474                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7475                 }
7476         }
7477
7478         return 0;
7479 }
7480
7481 /* tp->lock is held. */
7482 static void tg3_stop_fw(struct tg3 *tp)
7483 {
7484         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7485                 /* Wait for RX cpu to ACK the previous event. */
7486                 tg3_wait_for_event_ack(tp);
7487
7488                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7489
7490                 tg3_generate_fw_event(tp);
7491
7492                 /* Wait for RX cpu to ACK this event. */
7493                 tg3_wait_for_event_ack(tp);
7494         }
7495 }
7496
7497 /* tp->lock is held. */
7498 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7499 {
7500         int err;
7501
7502         tg3_stop_fw(tp);
7503
7504         tg3_write_sig_pre_reset(tp, kind);
7505
7506         tg3_abort_hw(tp, silent);
7507         err = tg3_chip_reset(tp);
7508
7509         __tg3_set_mac_addr(tp, 0);
7510
7511         tg3_write_sig_legacy(tp, kind);
7512         tg3_write_sig_post_reset(tp, kind);
7513
7514         if (err)
7515                 return err;
7516
7517         return 0;
7518 }
7519
7520 #define RX_CPU_SCRATCH_BASE     0x30000
7521 #define RX_CPU_SCRATCH_SIZE     0x04000
7522 #define TX_CPU_SCRATCH_BASE     0x34000
7523 #define TX_CPU_SCRATCH_SIZE     0x04000
7524
7525 /* tp->lock is held. */
7526 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7527 {
7528         int i;
7529
7530         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7531
7532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7533                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7534
7535                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7536                 return 0;
7537         }
7538         if (offset == RX_CPU_BASE) {
7539                 for (i = 0; i < 10000; i++) {
7540                         tw32(offset + CPU_STATE, 0xffffffff);
7541                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7542                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7543                                 break;
7544                 }
7545
7546                 tw32(offset + CPU_STATE, 0xffffffff);
7547                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7548                 udelay(10);
7549         } else {
7550                 for (i = 0; i < 10000; i++) {
7551                         tw32(offset + CPU_STATE, 0xffffffff);
7552                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7553                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7554                                 break;
7555                 }
7556         }
7557
7558         if (i >= 10000) {
7559                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7560                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7561                 return -ENODEV;
7562         }
7563
7564         /* Clear firmware's nvram arbitration. */
7565         if (tg3_flag(tp, NVRAM))
7566                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7567         return 0;
7568 }
7569
7570 struct fw_info {
7571         unsigned int fw_base;
7572         unsigned int fw_len;
7573         const __be32 *fw_data;
7574 };
7575
7576 /* tp->lock is held. */
7577 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7578                                  int cpu_scratch_size, struct fw_info *info)
7579 {
7580         int err, lock_err, i;
7581         void (*write_op)(struct tg3 *, u32, u32);
7582
7583         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7584                 netdev_err(tp->dev,
7585                            "%s: Trying to load TX cpu firmware which is 5705\n",
7586                            __func__);
7587                 return -EINVAL;
7588         }
7589
7590         if (tg3_flag(tp, 5705_PLUS))
7591                 write_op = tg3_write_mem;
7592         else
7593                 write_op = tg3_write_indirect_reg32;
7594
7595         /* It is possible that bootcode is still loading at this point.
7596          * Get the nvram lock first before halting the cpu.
7597          */
7598         lock_err = tg3_nvram_lock(tp);
7599         err = tg3_halt_cpu(tp, cpu_base);
7600         if (!lock_err)
7601                 tg3_nvram_unlock(tp);
7602         if (err)
7603                 goto out;
7604
7605         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7606                 write_op(tp, cpu_scratch_base + i, 0);
7607         tw32(cpu_base + CPU_STATE, 0xffffffff);
7608         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7609         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7610                 write_op(tp, (cpu_scratch_base +
7611                               (info->fw_base & 0xffff) +
7612                               (i * sizeof(u32))),
7613                               be32_to_cpu(info->fw_data[i]));
7614
7615         err = 0;
7616
7617 out:
7618         return err;
7619 }
7620
7621 /* tp->lock is held. */
7622 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7623 {
7624         struct fw_info info;
7625         const __be32 *fw_data;
7626         int err, i;
7627
7628         fw_data = (void *)tp->fw->data;
7629
7630         /* Firmware blob starts with version numbers, followed by
7631            start address and length. We are setting complete length.
7632            length = end_address_of_bss - start_address_of_text.
7633            Remainder is the blob to be loaded contiguously
7634            from start address. */
7635
7636         info.fw_base = be32_to_cpu(fw_data[1]);
7637         info.fw_len = tp->fw->size - 12;
7638         info.fw_data = &fw_data[3];
7639
7640         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7641                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7642                                     &info);
7643         if (err)
7644                 return err;
7645
7646         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7647                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7648                                     &info);
7649         if (err)
7650                 return err;
7651
7652         /* Now startup only the RX cpu. */
7653         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7654         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7655
7656         for (i = 0; i < 5; i++) {
7657                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7658                         break;
7659                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7660                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7661                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7662                 udelay(1000);
7663         }
7664         if (i >= 5) {
7665                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7666                            "should be %08x\n", __func__,
7667                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7668                 return -ENODEV;
7669         }
7670         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7671         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7672
7673         return 0;
7674 }
7675
7676 /* tp->lock is held. */
7677 static int tg3_load_tso_firmware(struct tg3 *tp)
7678 {
7679         struct fw_info info;
7680         const __be32 *fw_data;
7681         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7682         int err, i;
7683
7684         if (tg3_flag(tp, HW_TSO_1) ||
7685             tg3_flag(tp, HW_TSO_2) ||
7686             tg3_flag(tp, HW_TSO_3))
7687                 return 0;
7688
7689         fw_data = (void *)tp->fw->data;
7690
7691         /* Firmware blob starts with version numbers, followed by
7692            start address and length. We are setting complete length.
7693            length = end_address_of_bss - start_address_of_text.
7694            Remainder is the blob to be loaded contiguously
7695            from start address. */
7696
7697         info.fw_base = be32_to_cpu(fw_data[1]);
7698         cpu_scratch_size = tp->fw_len;
7699         info.fw_len = tp->fw->size - 12;
7700         info.fw_data = &fw_data[3];
7701
7702         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7703                 cpu_base = RX_CPU_BASE;
7704                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7705         } else {
7706                 cpu_base = TX_CPU_BASE;
7707                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7708                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7709         }
7710
7711         err = tg3_load_firmware_cpu(tp, cpu_base,
7712                                     cpu_scratch_base, cpu_scratch_size,
7713                                     &info);
7714         if (err)
7715                 return err;
7716
7717         /* Now startup the cpu. */
7718         tw32(cpu_base + CPU_STATE, 0xffffffff);
7719         tw32_f(cpu_base + CPU_PC, info.fw_base);
7720
7721         for (i = 0; i < 5; i++) {
7722                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7723                         break;
7724                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7725                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7726                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7727                 udelay(1000);
7728         }
7729         if (i >= 5) {
7730                 netdev_err(tp->dev,
7731                            "%s fails to set CPU PC, is %08x should be %08x\n",
7732                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7733                 return -ENODEV;
7734         }
7735         tw32(cpu_base + CPU_STATE, 0xffffffff);
7736         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7737         return 0;
7738 }
7739
7740
7741 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7742 {
7743         struct tg3 *tp = netdev_priv(dev);
7744         struct sockaddr *addr = p;
7745         int err = 0, skip_mac_1 = 0;
7746
7747         if (!is_valid_ether_addr(addr->sa_data))
7748                 return -EINVAL;
7749
7750         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7751
7752         if (!netif_running(dev))
7753                 return 0;
7754
7755         if (tg3_flag(tp, ENABLE_ASF)) {
7756                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7757
7758                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7759                 addr0_low = tr32(MAC_ADDR_0_LOW);
7760                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7761                 addr1_low = tr32(MAC_ADDR_1_LOW);
7762
7763                 /* Skip MAC addr 1 if ASF is using it. */
7764                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7765                     !(addr1_high == 0 && addr1_low == 0))
7766                         skip_mac_1 = 1;
7767         }
7768         spin_lock_bh(&tp->lock);
7769         __tg3_set_mac_addr(tp, skip_mac_1);
7770         spin_unlock_bh(&tp->lock);
7771
7772         return err;
7773 }
7774
7775 /* tp->lock is held. */
7776 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7777                            dma_addr_t mapping, u32 maxlen_flags,
7778                            u32 nic_addr)
7779 {
7780         tg3_write_mem(tp,
7781                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7782                       ((u64) mapping >> 32));
7783         tg3_write_mem(tp,
7784                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7785                       ((u64) mapping & 0xffffffff));
7786         tg3_write_mem(tp,
7787                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7788                        maxlen_flags);
7789
7790         if (!tg3_flag(tp, 5705_PLUS))
7791                 tg3_write_mem(tp,
7792                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7793                               nic_addr);
7794 }
7795
7796 static void __tg3_set_rx_mode(struct net_device *);
7797 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7798 {
7799         int i;
7800
7801         if (!tg3_flag(tp, ENABLE_TSS)) {
7802                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7803                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7804                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7805         } else {
7806                 tw32(HOSTCC_TXCOL_TICKS, 0);
7807                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7808                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7809         }
7810
7811         if (!tg3_flag(tp, ENABLE_RSS)) {
7812                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7813                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7814                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7815         } else {
7816                 tw32(HOSTCC_RXCOL_TICKS, 0);
7817                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7818                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7819         }
7820
7821         if (!tg3_flag(tp, 5705_PLUS)) {
7822                 u32 val = ec->stats_block_coalesce_usecs;
7823
7824                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7825                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7826
7827                 if (!netif_carrier_ok(tp->dev))
7828                         val = 0;
7829
7830                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7831         }
7832
7833         for (i = 0; i < tp->irq_cnt - 1; i++) {
7834                 u32 reg;
7835
7836                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7837                 tw32(reg, ec->rx_coalesce_usecs);
7838                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7839                 tw32(reg, ec->rx_max_coalesced_frames);
7840                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7841                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7842
7843                 if (tg3_flag(tp, ENABLE_TSS)) {
7844                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7845                         tw32(reg, ec->tx_coalesce_usecs);
7846                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7847                         tw32(reg, ec->tx_max_coalesced_frames);
7848                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7849                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7850                 }
7851         }
7852
7853         for (; i < tp->irq_max - 1; i++) {
7854                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7855                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7856                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7857
7858                 if (tg3_flag(tp, ENABLE_TSS)) {
7859                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7860                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7861                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7862                 }
7863         }
7864 }
7865
7866 /* tp->lock is held. */
7867 static void tg3_rings_reset(struct tg3 *tp)
7868 {
7869         int i;
7870         u32 stblk, txrcb, rxrcb, limit;
7871         struct tg3_napi *tnapi = &tp->napi[0];
7872
7873         /* Disable all transmit rings but the first. */
7874         if (!tg3_flag(tp, 5705_PLUS))
7875                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7876         else if (tg3_flag(tp, 5717_PLUS))
7877                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7878         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7879                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7880         else
7881                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7882
7883         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7884              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7885                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7886                               BDINFO_FLAGS_DISABLED);
7887
7888
7889         /* Disable all receive return rings but the first. */
7890         if (tg3_flag(tp, 5717_PLUS))
7891                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7892         else if (!tg3_flag(tp, 5705_PLUS))
7893                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7894         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7895                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7896                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7897         else
7898                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7899
7900         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7901              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7902                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7903                               BDINFO_FLAGS_DISABLED);
7904
7905         /* Disable interrupts */
7906         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7907         tp->napi[0].chk_msi_cnt = 0;
7908         tp->napi[0].last_rx_cons = 0;
7909         tp->napi[0].last_tx_cons = 0;
7910
7911         /* Zero mailbox registers. */
7912         if (tg3_flag(tp, SUPPORT_MSIX)) {
7913                 for (i = 1; i < tp->irq_max; i++) {
7914                         tp->napi[i].tx_prod = 0;
7915                         tp->napi[i].tx_cons = 0;
7916                         if (tg3_flag(tp, ENABLE_TSS))
7917                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7918                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7919                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7920                         tp->napi[0].chk_msi_cnt = 0;
7921                         tp->napi[i].last_rx_cons = 0;
7922                         tp->napi[i].last_tx_cons = 0;
7923                 }
7924                 if (!tg3_flag(tp, ENABLE_TSS))
7925                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7926         } else {
7927                 tp->napi[0].tx_prod = 0;
7928                 tp->napi[0].tx_cons = 0;
7929                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7930                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7931         }
7932
7933         /* Make sure the NIC-based send BD rings are disabled. */
7934         if (!tg3_flag(tp, 5705_PLUS)) {
7935                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7936                 for (i = 0; i < 16; i++)
7937                         tw32_tx_mbox(mbox + i * 8, 0);
7938         }
7939
7940         txrcb = NIC_SRAM_SEND_RCB;
7941         rxrcb = NIC_SRAM_RCV_RET_RCB;
7942
7943         /* Clear status block in ram. */
7944         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7945
7946         /* Set status block DMA address */
7947         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7948              ((u64) tnapi->status_mapping >> 32));
7949         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7950              ((u64) tnapi->status_mapping & 0xffffffff));
7951
7952         if (tnapi->tx_ring) {
7953                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7954                                (TG3_TX_RING_SIZE <<
7955                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7956                                NIC_SRAM_TX_BUFFER_DESC);
7957                 txrcb += TG3_BDINFO_SIZE;
7958         }
7959
7960         if (tnapi->rx_rcb) {
7961                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7962                                (tp->rx_ret_ring_mask + 1) <<
7963                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7964                 rxrcb += TG3_BDINFO_SIZE;
7965         }
7966
7967         stblk = HOSTCC_STATBLCK_RING1;
7968
7969         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7970                 u64 mapping = (u64)tnapi->status_mapping;
7971                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7972                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7973
7974                 /* Clear status block in ram. */
7975                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7976
7977                 if (tnapi->tx_ring) {
7978                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7979                                        (TG3_TX_RING_SIZE <<
7980                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7981                                        NIC_SRAM_TX_BUFFER_DESC);
7982                         txrcb += TG3_BDINFO_SIZE;
7983                 }
7984
7985                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7986                                ((tp->rx_ret_ring_mask + 1) <<
7987                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7988
7989                 stblk += 8;
7990                 rxrcb += TG3_BDINFO_SIZE;
7991         }
7992 }
7993
7994 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7995 {
7996         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7997
7998         if (!tg3_flag(tp, 5750_PLUS) ||
7999             tg3_flag(tp, 5780_CLASS) ||
8000             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8002                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8003         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8004                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8005                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8006         else
8007                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8008
8009         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8010         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8011
8012         val = min(nic_rep_thresh, host_rep_thresh);
8013         tw32(RCVBDI_STD_THRESH, val);
8014
8015         if (tg3_flag(tp, 57765_PLUS))
8016                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8017
8018         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8019                 return;
8020
8021         if (!tg3_flag(tp, 5705_PLUS))
8022                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8023         else
8024                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8025
8026         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8027
8028         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8029         tw32(RCVBDI_JUMBO_THRESH, val);
8030
8031         if (tg3_flag(tp, 57765_PLUS))
8032                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8033 }
8034
8035 /* tp->lock is held. */
8036 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8037 {
8038         u32 val, rdmac_mode;
8039         int i, err, limit;
8040         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8041
8042         tg3_disable_ints(tp);
8043
8044         tg3_stop_fw(tp);
8045
8046         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8047
8048         if (tg3_flag(tp, INIT_COMPLETE))
8049                 tg3_abort_hw(tp, 1);
8050
8051         /* Enable MAC control of LPI */
8052         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8053                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8054                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8055                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8056
8057                 tw32_f(TG3_CPMU_EEE_CTRL,
8058                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8059
8060                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8061                       TG3_CPMU_EEEMD_LPI_IN_TX |
8062                       TG3_CPMU_EEEMD_LPI_IN_RX |
8063                       TG3_CPMU_EEEMD_EEE_ENABLE;
8064
8065                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8066                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8067
8068                 if (tg3_flag(tp, ENABLE_APE))
8069                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8070
8071                 tw32_f(TG3_CPMU_EEE_MODE, val);
8072
8073                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8074                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8075                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8076
8077                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8078                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8079                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8080         }
8081
8082         if (reset_phy)
8083                 tg3_phy_reset(tp);
8084
8085         err = tg3_chip_reset(tp);
8086         if (err)
8087                 return err;
8088
8089         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8090
8091         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8092                 val = tr32(TG3_CPMU_CTRL);
8093                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8094                 tw32(TG3_CPMU_CTRL, val);
8095
8096                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8097                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8098                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8099                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8100
8101                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8102                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8103                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8104                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8105
8106                 val = tr32(TG3_CPMU_HST_ACC);
8107                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8108                 val |= CPMU_HST_ACC_MACCLK_6_25;
8109                 tw32(TG3_CPMU_HST_ACC, val);
8110         }
8111
8112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8113                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8114                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8115                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8116                 tw32(PCIE_PWR_MGMT_THRESH, val);
8117
8118                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8119                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8120
8121                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8122
8123                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8124                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8125         }
8126
8127         if (tg3_flag(tp, L1PLLPD_EN)) {
8128                 u32 grc_mode = tr32(GRC_MODE);
8129
8130                 /* Access the lower 1K of PL PCIE block registers. */
8131                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8132                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8133
8134                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8135                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8136                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8137
8138                 tw32(GRC_MODE, grc_mode);
8139         }
8140
8141         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8142                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8143                         u32 grc_mode = tr32(GRC_MODE);
8144
8145                         /* Access the lower 1K of PL PCIE block registers. */
8146                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8147                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8148
8149                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8150                                    TG3_PCIE_PL_LO_PHYCTL5);
8151                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8152                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8153
8154                         tw32(GRC_MODE, grc_mode);
8155                 }
8156
8157                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8158                         u32 grc_mode = tr32(GRC_MODE);
8159
8160                         /* Access the lower 1K of DL PCIE block registers. */
8161                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8162                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8163
8164                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8165                                    TG3_PCIE_DL_LO_FTSMAX);
8166                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8167                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8168                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8169
8170                         tw32(GRC_MODE, grc_mode);
8171                 }
8172
8173                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8174                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8175                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8176                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8177         }
8178
8179         /* This works around an issue with Athlon chipsets on
8180          * B3 tigon3 silicon.  This bit has no effect on any
8181          * other revision.  But do not set this on PCI Express
8182          * chips and don't even touch the clocks if the CPMU is present.
8183          */
8184         if (!tg3_flag(tp, CPMU_PRESENT)) {
8185                 if (!tg3_flag(tp, PCI_EXPRESS))
8186                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8187                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8188         }
8189
8190         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8191             tg3_flag(tp, PCIX_MODE)) {
8192                 val = tr32(TG3PCI_PCISTATE);
8193                 val |= PCISTATE_RETRY_SAME_DMA;
8194                 tw32(TG3PCI_PCISTATE, val);
8195         }
8196
8197         if (tg3_flag(tp, ENABLE_APE)) {
8198                 /* Allow reads and writes to the
8199                  * APE register and memory space.
8200                  */
8201                 val = tr32(TG3PCI_PCISTATE);
8202                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8203                        PCISTATE_ALLOW_APE_SHMEM_WR |
8204                        PCISTATE_ALLOW_APE_PSPACE_WR;
8205                 tw32(TG3PCI_PCISTATE, val);
8206         }
8207
8208         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8209                 /* Enable some hw fixes.  */
8210                 val = tr32(TG3PCI_MSI_DATA);
8211                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8212                 tw32(TG3PCI_MSI_DATA, val);
8213         }
8214
8215         /* Descriptor ring init may make accesses to the
8216          * NIC SRAM area to setup the TX descriptors, so we
8217          * can only do this after the hardware has been
8218          * successfully reset.
8219          */
8220         err = tg3_init_rings(tp);
8221         if (err)
8222                 return err;
8223
8224         if (tg3_flag(tp, 57765_PLUS)) {
8225                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8226                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8227                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8228                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8229                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8230                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8231                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8232                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8233         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8234                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8235                 /* This value is determined during the probe time DMA
8236                  * engine test, tg3_test_dma.
8237                  */
8238                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8239         }
8240
8241         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8242                           GRC_MODE_4X_NIC_SEND_RINGS |
8243                           GRC_MODE_NO_TX_PHDR_CSUM |
8244                           GRC_MODE_NO_RX_PHDR_CSUM);
8245         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8246
8247         /* Pseudo-header checksum is done by hardware logic and not
8248          * the offload processers, so make the chip do the pseudo-
8249          * header checksums on receive.  For transmit it is more
8250          * convenient to do the pseudo-header checksum in software
8251          * as Linux does that on transmit for us in all cases.
8252          */
8253         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8254
8255         tw32(GRC_MODE,
8256              tp->grc_mode |
8257              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8258
8259         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8260         val = tr32(GRC_MISC_CFG);
8261         val &= ~0xff;
8262         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8263         tw32(GRC_MISC_CFG, val);
8264
8265         /* Initialize MBUF/DESC pool. */
8266         if (tg3_flag(tp, 5750_PLUS)) {
8267                 /* Do nothing.  */
8268         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8269                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8270                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8271                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8272                 else
8273                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8274                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8275                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8276         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8277                 int fw_len;
8278
8279                 fw_len = tp->fw_len;
8280                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8281                 tw32(BUFMGR_MB_POOL_ADDR,
8282                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8283                 tw32(BUFMGR_MB_POOL_SIZE,
8284                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8285         }
8286
8287         if (tp->dev->mtu <= ETH_DATA_LEN) {
8288                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8289                      tp->bufmgr_config.mbuf_read_dma_low_water);
8290                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8291                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8292                 tw32(BUFMGR_MB_HIGH_WATER,
8293                      tp->bufmgr_config.mbuf_high_water);
8294         } else {
8295                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8296                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8297                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8298                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8299                 tw32(BUFMGR_MB_HIGH_WATER,
8300                      tp->bufmgr_config.mbuf_high_water_jumbo);
8301         }
8302         tw32(BUFMGR_DMA_LOW_WATER,
8303              tp->bufmgr_config.dma_low_water);
8304         tw32(BUFMGR_DMA_HIGH_WATER,
8305              tp->bufmgr_config.dma_high_water);
8306
8307         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8309                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8311             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8312             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8313                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8314         tw32(BUFMGR_MODE, val);
8315         for (i = 0; i < 2000; i++) {
8316                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8317                         break;
8318                 udelay(10);
8319         }
8320         if (i >= 2000) {
8321                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8322                 return -ENODEV;
8323         }
8324
8325         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8326                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8327
8328         tg3_setup_rxbd_thresholds(tp);
8329
8330         /* Initialize TG3_BDINFO's at:
8331          *  RCVDBDI_STD_BD:     standard eth size rx ring
8332          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8333          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8334          *
8335          * like so:
8336          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8337          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8338          *                              ring attribute flags
8339          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8340          *
8341          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8342          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8343          *
8344          * The size of each ring is fixed in the firmware, but the location is
8345          * configurable.
8346          */
8347         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8348              ((u64) tpr->rx_std_mapping >> 32));
8349         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8350              ((u64) tpr->rx_std_mapping & 0xffffffff));
8351         if (!tg3_flag(tp, 5717_PLUS))
8352                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8353                      NIC_SRAM_RX_BUFFER_DESC);
8354
8355         /* Disable the mini ring */
8356         if (!tg3_flag(tp, 5705_PLUS))
8357                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8358                      BDINFO_FLAGS_DISABLED);
8359
8360         /* Program the jumbo buffer descriptor ring control
8361          * blocks on those devices that have them.
8362          */
8363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8364             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8365
8366                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8367                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8368                              ((u64) tpr->rx_jmb_mapping >> 32));
8369                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8370                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8371                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8372                               BDINFO_FLAGS_MAXLEN_SHIFT;
8373                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8374                              val | BDINFO_FLAGS_USE_EXT_RECV);
8375                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8376                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8377                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8378                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8379                 } else {
8380                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8381                              BDINFO_FLAGS_DISABLED);
8382                 }
8383
8384                 if (tg3_flag(tp, 57765_PLUS)) {
8385                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8386                                 val = TG3_RX_STD_MAX_SIZE_5700;
8387                         else
8388                                 val = TG3_RX_STD_MAX_SIZE_5717;
8389                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8390                         val |= (TG3_RX_STD_DMA_SZ << 2);
8391                 } else
8392                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8393         } else
8394                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8395
8396         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8397
8398         tpr->rx_std_prod_idx = tp->rx_pending;
8399         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8400
8401         tpr->rx_jmb_prod_idx =
8402                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8403         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8404
8405         tg3_rings_reset(tp);
8406
8407         /* Initialize MAC address and backoff seed. */
8408         __tg3_set_mac_addr(tp, 0);
8409
8410         /* MTU + ethernet header + FCS + optional VLAN tag */
8411         tw32(MAC_RX_MTU_SIZE,
8412              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8413
8414         /* The slot time is changed by tg3_setup_phy if we
8415          * run at gigabit with half duplex.
8416          */
8417         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8418               (6 << TX_LENGTHS_IPG_SHIFT) |
8419               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8420
8421         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8422                 val |= tr32(MAC_TX_LENGTHS) &
8423                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8424                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8425
8426         tw32(MAC_TX_LENGTHS, val);
8427
8428         /* Receive rules. */
8429         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8430         tw32(RCVLPC_CONFIG, 0x0181);
8431
8432         /* Calculate RDMAC_MODE setting early, we need it to determine
8433          * the RCVLPC_STATE_ENABLE mask.
8434          */
8435         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8436                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8437                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8438                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8439                       RDMAC_MODE_LNGREAD_ENAB);
8440
8441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8442                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8443
8444         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8445             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8446             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8447                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8448                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8449                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8450
8451         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8452             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8453                 if (tg3_flag(tp, TSO_CAPABLE) &&
8454                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8455                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8456                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8457                            !tg3_flag(tp, IS_5788)) {
8458                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8459                 }
8460         }
8461
8462         if (tg3_flag(tp, PCI_EXPRESS))
8463                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8464
8465         if (tg3_flag(tp, HW_TSO_1) ||
8466             tg3_flag(tp, HW_TSO_2) ||
8467             tg3_flag(tp, HW_TSO_3))
8468                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8469
8470         if (tg3_flag(tp, 57765_PLUS) ||
8471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8473                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8474
8475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8476                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8477
8478         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8479             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8480             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8482             tg3_flag(tp, 57765_PLUS)) {
8483                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8484                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8485                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8486                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8487                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8488                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8489                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8490                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8491                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8492                 }
8493                 tw32(TG3_RDMA_RSRVCTRL_REG,
8494                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8495         }
8496
8497         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8498             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8499                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8500                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8501                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8502                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8503         }
8504
8505         /* Receive/send statistics. */
8506         if (tg3_flag(tp, 5750_PLUS)) {
8507                 val = tr32(RCVLPC_STATS_ENABLE);
8508                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8509                 tw32(RCVLPC_STATS_ENABLE, val);
8510         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8511                    tg3_flag(tp, TSO_CAPABLE)) {
8512                 val = tr32(RCVLPC_STATS_ENABLE);
8513                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8514                 tw32(RCVLPC_STATS_ENABLE, val);
8515         } else {
8516                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8517         }
8518         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8519         tw32(SNDDATAI_STATSENAB, 0xffffff);
8520         tw32(SNDDATAI_STATSCTRL,
8521              (SNDDATAI_SCTRL_ENABLE |
8522               SNDDATAI_SCTRL_FASTUPD));
8523
8524         /* Setup host coalescing engine. */
8525         tw32(HOSTCC_MODE, 0);
8526         for (i = 0; i < 2000; i++) {
8527                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8528                         break;
8529                 udelay(10);
8530         }
8531
8532         __tg3_set_coalesce(tp, &tp->coal);
8533
8534         if (!tg3_flag(tp, 5705_PLUS)) {
8535                 /* Status/statistics block address.  See tg3_timer,
8536                  * the tg3_periodic_fetch_stats call there, and
8537                  * tg3_get_stats to see how this works for 5705/5750 chips.
8538                  */
8539                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8540                      ((u64) tp->stats_mapping >> 32));
8541                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8542                      ((u64) tp->stats_mapping & 0xffffffff));
8543                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8544
8545                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8546
8547                 /* Clear statistics and status block memory areas */
8548                 for (i = NIC_SRAM_STATS_BLK;
8549                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8550                      i += sizeof(u32)) {
8551                         tg3_write_mem(tp, i, 0);
8552                         udelay(40);
8553                 }
8554         }
8555
8556         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8557
8558         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8559         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8560         if (!tg3_flag(tp, 5705_PLUS))
8561                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8562
8563         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8564                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8565                 /* reset to prevent losing 1st rx packet intermittently */
8566                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8567                 udelay(10);
8568         }
8569
8570         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8571                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8572                         MAC_MODE_FHDE_ENABLE;
8573         if (tg3_flag(tp, ENABLE_APE))
8574                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8575         if (!tg3_flag(tp, 5705_PLUS) &&
8576             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8577             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8578                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8579         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8580         udelay(40);
8581
8582         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8583          * If TG3_FLAG_IS_NIC is zero, we should read the
8584          * register to preserve the GPIO settings for LOMs. The GPIOs,
8585          * whether used as inputs or outputs, are set by boot code after
8586          * reset.
8587          */
8588         if (!tg3_flag(tp, IS_NIC)) {
8589                 u32 gpio_mask;
8590
8591                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8592                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8593                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8594
8595                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8596                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8597                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8598
8599                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8600                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8601
8602                 tp->grc_local_ctrl &= ~gpio_mask;
8603                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8604
8605                 /* GPIO1 must be driven high for eeprom write protect */
8606                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8607                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8608                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8609         }
8610         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8611         udelay(100);
8612
8613         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8614                 val = tr32(MSGINT_MODE);
8615                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8616                 tw32(MSGINT_MODE, val);
8617         }
8618
8619         if (!tg3_flag(tp, 5705_PLUS)) {
8620                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8621                 udelay(40);
8622         }
8623
8624         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8625                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8626                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8627                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8628                WDMAC_MODE_LNGREAD_ENAB);
8629
8630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8631             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8632                 if (tg3_flag(tp, TSO_CAPABLE) &&
8633                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8634                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8635                         /* nothing */
8636                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8637                            !tg3_flag(tp, IS_5788)) {
8638                         val |= WDMAC_MODE_RX_ACCEL;
8639                 }
8640         }
8641
8642         /* Enable host coalescing bug fix */
8643         if (tg3_flag(tp, 5755_PLUS))
8644                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8645
8646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8647                 val |= WDMAC_MODE_BURST_ALL_DATA;
8648
8649         tw32_f(WDMAC_MODE, val);
8650         udelay(40);
8651
8652         if (tg3_flag(tp, PCIX_MODE)) {
8653                 u16 pcix_cmd;
8654
8655                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8656                                      &pcix_cmd);
8657                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8658                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8659                         pcix_cmd |= PCI_X_CMD_READ_2K;
8660                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8661                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8662                         pcix_cmd |= PCI_X_CMD_READ_2K;
8663                 }
8664                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8665                                       pcix_cmd);
8666         }
8667
8668         tw32_f(RDMAC_MODE, rdmac_mode);
8669         udelay(40);
8670
8671         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8672         if (!tg3_flag(tp, 5705_PLUS))
8673                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8674
8675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8676                 tw32(SNDDATAC_MODE,
8677                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8678         else
8679                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8680
8681         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8682         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8683         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8684         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8685                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8686         tw32(RCVDBDI_MODE, val);
8687         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8688         if (tg3_flag(tp, HW_TSO_1) ||
8689             tg3_flag(tp, HW_TSO_2) ||
8690             tg3_flag(tp, HW_TSO_3))
8691                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8692         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8693         if (tg3_flag(tp, ENABLE_TSS))
8694                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8695         tw32(SNDBDI_MODE, val);
8696         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8697
8698         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8699                 err = tg3_load_5701_a0_firmware_fix(tp);
8700                 if (err)
8701                         return err;
8702         }
8703
8704         if (tg3_flag(tp, TSO_CAPABLE)) {
8705                 err = tg3_load_tso_firmware(tp);
8706                 if (err)
8707                         return err;
8708         }
8709
8710         tp->tx_mode = TX_MODE_ENABLE;
8711
8712         if (tg3_flag(tp, 5755_PLUS) ||
8713             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8714                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8715
8716         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8717                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8718                 tp->tx_mode &= ~val;
8719                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8720         }
8721
8722         tw32_f(MAC_TX_MODE, tp->tx_mode);
8723         udelay(100);
8724
8725         if (tg3_flag(tp, ENABLE_RSS)) {
8726                 int i = 0;
8727                 u32 reg = MAC_RSS_INDIR_TBL_0;
8728
8729                 if (tp->irq_cnt == 2) {
8730                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8731                                 tw32(reg, 0x0);
8732                                 reg += 4;
8733                         }
8734                 } else {
8735                         u32 val;
8736
8737                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8738                                 val = i % (tp->irq_cnt - 1);
8739                                 i++;
8740                                 for (; i % 8; i++) {
8741                                         val <<= 4;
8742                                         val |= (i % (tp->irq_cnt - 1));
8743                                 }
8744                                 tw32(reg, val);
8745                                 reg += 4;
8746                         }
8747                 }
8748
8749                 /* Setup the "secret" hash key. */
8750                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8751                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8752                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8753                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8754                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8755                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8756                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8757                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8758                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8759                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8760         }
8761
8762         tp->rx_mode = RX_MODE_ENABLE;
8763         if (tg3_flag(tp, 5755_PLUS))
8764                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8765
8766         if (tg3_flag(tp, ENABLE_RSS))
8767                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8768                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8769                                RX_MODE_RSS_IPV6_HASH_EN |
8770                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8771                                RX_MODE_RSS_IPV4_HASH_EN |
8772                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8773
8774         tw32_f(MAC_RX_MODE, tp->rx_mode);
8775         udelay(10);
8776
8777         tw32(MAC_LED_CTRL, tp->led_ctrl);
8778
8779         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8780         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8781                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8782                 udelay(10);
8783         }
8784         tw32_f(MAC_RX_MODE, tp->rx_mode);
8785         udelay(10);
8786
8787         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8788                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8789                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8790                         /* Set drive transmission level to 1.2V  */
8791                         /* only if the signal pre-emphasis bit is not set  */
8792                         val = tr32(MAC_SERDES_CFG);
8793                         val &= 0xfffff000;
8794                         val |= 0x880;
8795                         tw32(MAC_SERDES_CFG, val);
8796                 }
8797                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8798                         tw32(MAC_SERDES_CFG, 0x616000);
8799         }
8800
8801         /* Prevent chip from dropping frames when flow control
8802          * is enabled.
8803          */
8804         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8805                 val = 1;
8806         else
8807                 val = 2;
8808         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8809
8810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8811             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8812                 /* Use hardware link auto-negotiation */
8813                 tg3_flag_set(tp, HW_AUTONEG);
8814         }
8815
8816         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8817             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8818                 u32 tmp;
8819
8820                 tmp = tr32(SERDES_RX_CTRL);
8821                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8822                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8823                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8824                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8825         }
8826
8827         if (!tg3_flag(tp, USE_PHYLIB)) {
8828                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8829                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8830                         tp->link_config.speed = tp->link_config.orig_speed;
8831                         tp->link_config.duplex = tp->link_config.orig_duplex;
8832                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8833                 }
8834
8835                 err = tg3_setup_phy(tp, 0);
8836                 if (err)
8837                         return err;
8838
8839                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8840                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8841                         u32 tmp;
8842
8843                         /* Clear CRC stats. */
8844                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8845                                 tg3_writephy(tp, MII_TG3_TEST1,
8846                                              tmp | MII_TG3_TEST1_CRC_EN);
8847                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8848                         }
8849                 }
8850         }
8851
8852         __tg3_set_rx_mode(tp->dev);
8853
8854         /* Initialize receive rules. */
8855         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8856         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8857         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8858         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8859
8860         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8861                 limit = 8;
8862         else
8863                 limit = 16;
8864         if (tg3_flag(tp, ENABLE_ASF))
8865                 limit -= 4;
8866         switch (limit) {
8867         case 16:
8868                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8869         case 15:
8870                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8871         case 14:
8872                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8873         case 13:
8874                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8875         case 12:
8876                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8877         case 11:
8878                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8879         case 10:
8880                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8881         case 9:
8882                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8883         case 8:
8884                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8885         case 7:
8886                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8887         case 6:
8888                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8889         case 5:
8890                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8891         case 4:
8892                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8893         case 3:
8894                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8895         case 2:
8896         case 1:
8897
8898         default:
8899                 break;
8900         }
8901
8902         if (tg3_flag(tp, ENABLE_APE))
8903                 /* Write our heartbeat update interval to APE. */
8904                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8905                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8906
8907         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8908
8909         return 0;
8910 }
8911
8912 /* Called at device open time to get the chip ready for
8913  * packet processing.  Invoked with tp->lock held.
8914  */
8915 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8916 {
8917         tg3_switch_clocks(tp);
8918
8919         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8920
8921         return tg3_reset_hw(tp, reset_phy);
8922 }
8923
8924 #define TG3_STAT_ADD32(PSTAT, REG) \
8925 do {    u32 __val = tr32(REG); \
8926         (PSTAT)->low += __val; \
8927         if ((PSTAT)->low < __val) \
8928                 (PSTAT)->high += 1; \
8929 } while (0)
8930
8931 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8932 {
8933         struct tg3_hw_stats *sp = tp->hw_stats;
8934
8935         if (!netif_carrier_ok(tp->dev))
8936                 return;
8937
8938         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8939         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8940         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8941         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8942         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8943         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8944         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8945         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8946         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8947         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8948         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8949         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8950         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8951
8952         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8953         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8954         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8955         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8956         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8957         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8958         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8959         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8960         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8961         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8962         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8963         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8964         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8965         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8966
8967         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8968         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8969             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8970             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8971                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8972         } else {
8973                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8974                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8975                 if (val) {
8976                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8977                         sp->rx_discards.low += val;
8978                         if (sp->rx_discards.low < val)
8979                                 sp->rx_discards.high += 1;
8980                 }
8981                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8982         }
8983         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8984 }
8985
8986 static void tg3_chk_missed_msi(struct tg3 *tp)
8987 {
8988         u32 i;
8989
8990         for (i = 0; i < tp->irq_cnt; i++) {
8991                 struct tg3_napi *tnapi = &tp->napi[i];
8992
8993                 if (tg3_has_work(tnapi)) {
8994                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8995                             tnapi->last_tx_cons == tnapi->tx_cons) {
8996                                 if (tnapi->chk_msi_cnt < 1) {
8997                                         tnapi->chk_msi_cnt++;
8998                                         return;
8999                                 }
9000                                 tw32_mailbox(tnapi->int_mbox,
9001                                              tnapi->last_tag << 24);
9002                         }
9003                 }
9004                 tnapi->chk_msi_cnt = 0;
9005                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9006                 tnapi->last_tx_cons = tnapi->tx_cons;
9007         }
9008 }
9009
9010 static void tg3_timer(unsigned long __opaque)
9011 {
9012         struct tg3 *tp = (struct tg3 *) __opaque;
9013
9014         if (tp->irq_sync)
9015                 goto restart_timer;
9016
9017         spin_lock(&tp->lock);
9018
9019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9020             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9021                 tg3_chk_missed_msi(tp);
9022
9023         if (!tg3_flag(tp, TAGGED_STATUS)) {
9024                 /* All of this garbage is because when using non-tagged
9025                  * IRQ status the mailbox/status_block protocol the chip
9026                  * uses with the cpu is race prone.
9027                  */
9028                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9029                         tw32(GRC_LOCAL_CTRL,
9030                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9031                 } else {
9032                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9033                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9034                 }
9035
9036                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9037                         tg3_flag_set(tp, RESTART_TIMER);
9038                         spin_unlock(&tp->lock);
9039                         schedule_work(&tp->reset_task);
9040                         return;
9041                 }
9042         }
9043
9044         /* This part only runs once per second. */
9045         if (!--tp->timer_counter) {
9046                 if (tg3_flag(tp, 5705_PLUS))
9047                         tg3_periodic_fetch_stats(tp);
9048
9049                 if (tp->setlpicnt && !--tp->setlpicnt)
9050                         tg3_phy_eee_enable(tp);
9051
9052                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9053                         u32 mac_stat;
9054                         int phy_event;
9055
9056                         mac_stat = tr32(MAC_STATUS);
9057
9058                         phy_event = 0;
9059                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9060                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9061                                         phy_event = 1;
9062                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9063                                 phy_event = 1;
9064
9065                         if (phy_event)
9066                                 tg3_setup_phy(tp, 0);
9067                 } else if (tg3_flag(tp, POLL_SERDES)) {
9068                         u32 mac_stat = tr32(MAC_STATUS);
9069                         int need_setup = 0;
9070
9071                         if (netif_carrier_ok(tp->dev) &&
9072                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9073                                 need_setup = 1;
9074                         }
9075                         if (!netif_carrier_ok(tp->dev) &&
9076                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9077                                          MAC_STATUS_SIGNAL_DET))) {
9078                                 need_setup = 1;
9079                         }
9080                         if (need_setup) {
9081                                 if (!tp->serdes_counter) {
9082                                         tw32_f(MAC_MODE,
9083                                              (tp->mac_mode &
9084                                               ~MAC_MODE_PORT_MODE_MASK));
9085                                         udelay(40);
9086                                         tw32_f(MAC_MODE, tp->mac_mode);
9087                                         udelay(40);
9088                                 }
9089                                 tg3_setup_phy(tp, 0);
9090                         }
9091                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9092                            tg3_flag(tp, 5780_CLASS)) {
9093                         tg3_serdes_parallel_detect(tp);
9094                 }
9095
9096                 tp->timer_counter = tp->timer_multiplier;
9097         }
9098
9099         /* Heartbeat is only sent once every 2 seconds.
9100          *
9101          * The heartbeat is to tell the ASF firmware that the host
9102          * driver is still alive.  In the event that the OS crashes,
9103          * ASF needs to reset the hardware to free up the FIFO space
9104          * that may be filled with rx packets destined for the host.
9105          * If the FIFO is full, ASF will no longer function properly.
9106          *
9107          * Unintended resets have been reported on real time kernels
9108          * where the timer doesn't run on time.  Netpoll will also have
9109          * same problem.
9110          *
9111          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9112          * to check the ring condition when the heartbeat is expiring
9113          * before doing the reset.  This will prevent most unintended
9114          * resets.
9115          */
9116         if (!--tp->asf_counter) {
9117                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9118                         tg3_wait_for_event_ack(tp);
9119
9120                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9121                                       FWCMD_NICDRV_ALIVE3);
9122                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9123                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9124                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9125
9126                         tg3_generate_fw_event(tp);
9127                 }
9128                 tp->asf_counter = tp->asf_multiplier;
9129         }
9130
9131         spin_unlock(&tp->lock);
9132
9133 restart_timer:
9134         tp->timer.expires = jiffies + tp->timer_offset;
9135         add_timer(&tp->timer);
9136 }
9137
9138 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9139 {
9140         irq_handler_t fn;
9141         unsigned long flags;
9142         char *name;
9143         struct tg3_napi *tnapi = &tp->napi[irq_num];
9144
9145         if (tp->irq_cnt == 1)
9146                 name = tp->dev->name;
9147         else {
9148                 name = &tnapi->irq_lbl[0];
9149                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9150                 name[IFNAMSIZ-1] = 0;
9151         }
9152
9153         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9154                 fn = tg3_msi;
9155                 if (tg3_flag(tp, 1SHOT_MSI))
9156                         fn = tg3_msi_1shot;
9157                 flags = 0;
9158         } else {
9159                 fn = tg3_interrupt;
9160                 if (tg3_flag(tp, TAGGED_STATUS))
9161                         fn = tg3_interrupt_tagged;
9162                 flags = IRQF_SHARED;
9163         }
9164
9165         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9166 }
9167
9168 static int tg3_test_interrupt(struct tg3 *tp)
9169 {
9170         struct tg3_napi *tnapi = &tp->napi[0];
9171         struct net_device *dev = tp->dev;
9172         int err, i, intr_ok = 0;
9173         u32 val;
9174
9175         if (!netif_running(dev))
9176                 return -ENODEV;
9177
9178         tg3_disable_ints(tp);
9179
9180         free_irq(tnapi->irq_vec, tnapi);
9181
9182         /*
9183          * Turn off MSI one shot mode.  Otherwise this test has no
9184          * observable way to know whether the interrupt was delivered.
9185          */
9186         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9187                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9188                 tw32(MSGINT_MODE, val);
9189         }
9190
9191         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9192                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9193         if (err)
9194                 return err;
9195
9196         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9197         tg3_enable_ints(tp);
9198
9199         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9200                tnapi->coal_now);
9201
9202         for (i = 0; i < 5; i++) {
9203                 u32 int_mbox, misc_host_ctrl;
9204
9205                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9206                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9207
9208                 if ((int_mbox != 0) ||
9209                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9210                         intr_ok = 1;
9211                         break;
9212                 }
9213
9214                 msleep(10);
9215         }
9216
9217         tg3_disable_ints(tp);
9218
9219         free_irq(tnapi->irq_vec, tnapi);
9220
9221         err = tg3_request_irq(tp, 0);
9222
9223         if (err)
9224                 return err;
9225
9226         if (intr_ok) {
9227                 /* Reenable MSI one shot mode. */
9228                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9229                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9230                         tw32(MSGINT_MODE, val);
9231                 }
9232                 return 0;
9233         }
9234
9235         return -EIO;
9236 }
9237
9238 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9239  * successfully restored
9240  */
9241 static int tg3_test_msi(struct tg3 *tp)
9242 {
9243         int err;
9244         u16 pci_cmd;
9245
9246         if (!tg3_flag(tp, USING_MSI))
9247                 return 0;
9248
9249         /* Turn off SERR reporting in case MSI terminates with Master
9250          * Abort.
9251          */
9252         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9253         pci_write_config_word(tp->pdev, PCI_COMMAND,
9254                               pci_cmd & ~PCI_COMMAND_SERR);
9255
9256         err = tg3_test_interrupt(tp);
9257
9258         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9259
9260         if (!err)
9261                 return 0;
9262
9263         /* other failures */
9264         if (err != -EIO)
9265                 return err;
9266
9267         /* MSI test failed, go back to INTx mode */
9268         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9269                     "to INTx mode. Please report this failure to the PCI "
9270                     "maintainer and include system chipset information\n");
9271
9272         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9273
9274         pci_disable_msi(tp->pdev);
9275
9276         tg3_flag_clear(tp, USING_MSI);
9277         tp->napi[0].irq_vec = tp->pdev->irq;
9278
9279         err = tg3_request_irq(tp, 0);
9280         if (err)
9281                 return err;
9282
9283         /* Need to reset the chip because the MSI cycle may have terminated
9284          * with Master Abort.
9285          */
9286         tg3_full_lock(tp, 1);
9287
9288         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9289         err = tg3_init_hw(tp, 1);
9290
9291         tg3_full_unlock(tp);
9292
9293         if (err)
9294                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9295
9296         return err;
9297 }
9298
9299 static int tg3_request_firmware(struct tg3 *tp)
9300 {
9301         const __be32 *fw_data;
9302
9303         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9304                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9305                            tp->fw_needed);
9306                 return -ENOENT;
9307         }
9308
9309         fw_data = (void *)tp->fw->data;
9310
9311         /* Firmware blob starts with version numbers, followed by
9312          * start address and _full_ length including BSS sections
9313          * (which must be longer than the actual data, of course
9314          */
9315
9316         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9317         if (tp->fw_len < (tp->fw->size - 12)) {
9318                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9319                            tp->fw_len, tp->fw_needed);
9320                 release_firmware(tp->fw);
9321                 tp->fw = NULL;
9322                 return -EINVAL;
9323         }
9324
9325         /* We no longer need firmware; we have it. */
9326         tp->fw_needed = NULL;
9327         return 0;
9328 }
9329
9330 static bool tg3_enable_msix(struct tg3 *tp)
9331 {
9332         int i, rc, cpus = num_online_cpus();
9333         struct msix_entry msix_ent[tp->irq_max];
9334
9335         if (cpus == 1)
9336                 /* Just fallback to the simpler MSI mode. */
9337                 return false;
9338
9339         /*
9340          * We want as many rx rings enabled as there are cpus.
9341          * The first MSIX vector only deals with link interrupts, etc,
9342          * so we add one to the number of vectors we are requesting.
9343          */
9344         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9345
9346         for (i = 0; i < tp->irq_max; i++) {
9347                 msix_ent[i].entry  = i;
9348                 msix_ent[i].vector = 0;
9349         }
9350
9351         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9352         if (rc < 0) {
9353                 return false;
9354         } else if (rc != 0) {
9355                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9356                         return false;
9357                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9358                               tp->irq_cnt, rc);
9359                 tp->irq_cnt = rc;
9360         }
9361
9362         for (i = 0; i < tp->irq_max; i++)
9363                 tp->napi[i].irq_vec = msix_ent[i].vector;
9364
9365         netif_set_real_num_tx_queues(tp->dev, 1);
9366         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9367         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9368                 pci_disable_msix(tp->pdev);
9369                 return false;
9370         }
9371
9372         if (tp->irq_cnt > 1) {
9373                 tg3_flag_set(tp, ENABLE_RSS);
9374
9375                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9376                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9377                         tg3_flag_set(tp, ENABLE_TSS);
9378                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9379                 }
9380         }
9381
9382         return true;
9383 }
9384
9385 static void tg3_ints_init(struct tg3 *tp)
9386 {
9387         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9388             !tg3_flag(tp, TAGGED_STATUS)) {
9389                 /* All MSI supporting chips should support tagged
9390                  * status.  Assert that this is the case.
9391                  */
9392                 netdev_warn(tp->dev,
9393                             "MSI without TAGGED_STATUS? Not using MSI\n");
9394                 goto defcfg;
9395         }
9396
9397         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9398                 tg3_flag_set(tp, USING_MSIX);
9399         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9400                 tg3_flag_set(tp, USING_MSI);
9401
9402         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9403                 u32 msi_mode = tr32(MSGINT_MODE);
9404                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9405                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9406                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9407         }
9408 defcfg:
9409         if (!tg3_flag(tp, USING_MSIX)) {
9410                 tp->irq_cnt = 1;
9411                 tp->napi[0].irq_vec = tp->pdev->irq;
9412                 netif_set_real_num_tx_queues(tp->dev, 1);
9413                 netif_set_real_num_rx_queues(tp->dev, 1);
9414         }
9415 }
9416
9417 static void tg3_ints_fini(struct tg3 *tp)
9418 {
9419         if (tg3_flag(tp, USING_MSIX))
9420                 pci_disable_msix(tp->pdev);
9421         else if (tg3_flag(tp, USING_MSI))
9422                 pci_disable_msi(tp->pdev);
9423         tg3_flag_clear(tp, USING_MSI);
9424         tg3_flag_clear(tp, USING_MSIX);
9425         tg3_flag_clear(tp, ENABLE_RSS);
9426         tg3_flag_clear(tp, ENABLE_TSS);
9427 }
9428
9429 static int tg3_open(struct net_device *dev)
9430 {
9431         struct tg3 *tp = netdev_priv(dev);
9432         int i, err;
9433
9434         if (tp->fw_needed) {
9435                 err = tg3_request_firmware(tp);
9436                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9437                         if (err)
9438                                 return err;
9439                 } else if (err) {
9440                         netdev_warn(tp->dev, "TSO capability disabled\n");
9441                         tg3_flag_clear(tp, TSO_CAPABLE);
9442                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9443                         netdev_notice(tp->dev, "TSO capability restored\n");
9444                         tg3_flag_set(tp, TSO_CAPABLE);
9445                 }
9446         }
9447
9448         netif_carrier_off(tp->dev);
9449
9450         err = tg3_power_up(tp);
9451         if (err)
9452                 return err;
9453
9454         tg3_full_lock(tp, 0);
9455
9456         tg3_disable_ints(tp);
9457         tg3_flag_clear(tp, INIT_COMPLETE);
9458
9459         tg3_full_unlock(tp);
9460
9461         /*
9462          * Setup interrupts first so we know how
9463          * many NAPI resources to allocate
9464          */
9465         tg3_ints_init(tp);
9466
9467         /* The placement of this call is tied
9468          * to the setup and use of Host TX descriptors.
9469          */
9470         err = tg3_alloc_consistent(tp);
9471         if (err)
9472                 goto err_out1;
9473
9474         tg3_napi_init(tp);
9475
9476         tg3_napi_enable(tp);
9477
9478         for (i = 0; i < tp->irq_cnt; i++) {
9479                 struct tg3_napi *tnapi = &tp->napi[i];
9480                 err = tg3_request_irq(tp, i);
9481                 if (err) {
9482                         for (i--; i >= 0; i--)
9483                                 free_irq(tnapi->irq_vec, tnapi);
9484                         break;
9485                 }
9486         }
9487
9488         if (err)
9489                 goto err_out2;
9490
9491         tg3_full_lock(tp, 0);
9492
9493         err = tg3_init_hw(tp, 1);
9494         if (err) {
9495                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9496                 tg3_free_rings(tp);
9497         } else {
9498                 if (tg3_flag(tp, TAGGED_STATUS) &&
9499                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9500                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9501                         tp->timer_offset = HZ;
9502                 else
9503                         tp->timer_offset = HZ / 10;
9504
9505                 BUG_ON(tp->timer_offset > HZ);
9506                 tp->timer_counter = tp->timer_multiplier =
9507                         (HZ / tp->timer_offset);
9508                 tp->asf_counter = tp->asf_multiplier =
9509                         ((HZ / tp->timer_offset) * 2);
9510
9511                 init_timer(&tp->timer);
9512                 tp->timer.expires = jiffies + tp->timer_offset;
9513                 tp->timer.data = (unsigned long) tp;
9514                 tp->timer.function = tg3_timer;
9515         }
9516
9517         tg3_full_unlock(tp);
9518
9519         if (err)
9520                 goto err_out3;
9521
9522         if (tg3_flag(tp, USING_MSI)) {
9523                 err = tg3_test_msi(tp);
9524
9525                 if (err) {
9526                         tg3_full_lock(tp, 0);
9527                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9528                         tg3_free_rings(tp);
9529                         tg3_full_unlock(tp);
9530
9531                         goto err_out2;
9532                 }
9533
9534                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9535                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9536
9537                         tw32(PCIE_TRANSACTION_CFG,
9538                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9539                 }
9540         }
9541
9542         tg3_phy_start(tp);
9543
9544         tg3_full_lock(tp, 0);
9545
9546         add_timer(&tp->timer);
9547         tg3_flag_set(tp, INIT_COMPLETE);
9548         tg3_enable_ints(tp);
9549
9550         tg3_full_unlock(tp);
9551
9552         netif_tx_start_all_queues(dev);
9553
9554         /*
9555          * Reset loopback feature if it was turned on while the device was down
9556          * make sure that it's installed properly now.
9557          */
9558         if (dev->features & NETIF_F_LOOPBACK)
9559                 tg3_set_loopback(dev, dev->features);
9560
9561         return 0;
9562
9563 err_out3:
9564         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9565                 struct tg3_napi *tnapi = &tp->napi[i];
9566                 free_irq(tnapi->irq_vec, tnapi);
9567         }
9568
9569 err_out2:
9570         tg3_napi_disable(tp);
9571         tg3_napi_fini(tp);
9572         tg3_free_consistent(tp);
9573
9574 err_out1:
9575         tg3_ints_fini(tp);
9576         tg3_frob_aux_power(tp, false);
9577         pci_set_power_state(tp->pdev, PCI_D3hot);
9578         return err;
9579 }
9580
9581 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9582                                                  struct rtnl_link_stats64 *);
9583 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9584
9585 static int tg3_close(struct net_device *dev)
9586 {
9587         int i;
9588         struct tg3 *tp = netdev_priv(dev);
9589
9590         tg3_napi_disable(tp);
9591         cancel_work_sync(&tp->reset_task);
9592
9593         netif_tx_stop_all_queues(dev);
9594
9595         del_timer_sync(&tp->timer);
9596
9597         tg3_phy_stop(tp);
9598
9599         tg3_full_lock(tp, 1);
9600
9601         tg3_disable_ints(tp);
9602
9603         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9604         tg3_free_rings(tp);
9605         tg3_flag_clear(tp, INIT_COMPLETE);
9606
9607         tg3_full_unlock(tp);
9608
9609         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9610                 struct tg3_napi *tnapi = &tp->napi[i];
9611                 free_irq(tnapi->irq_vec, tnapi);
9612         }
9613
9614         tg3_ints_fini(tp);
9615
9616         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9617
9618         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9619                sizeof(tp->estats_prev));
9620
9621         tg3_napi_fini(tp);
9622
9623         tg3_free_consistent(tp);
9624
9625         tg3_power_down(tp);
9626
9627         netif_carrier_off(tp->dev);
9628
9629         return 0;
9630 }
9631
9632 static inline u64 get_stat64(tg3_stat64_t *val)
9633 {
9634        return ((u64)val->high << 32) | ((u64)val->low);
9635 }
9636
9637 static u64 calc_crc_errors(struct tg3 *tp)
9638 {
9639         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9640
9641         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9642             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9643              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9644                 u32 val;
9645
9646                 spin_lock_bh(&tp->lock);
9647                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9648                         tg3_writephy(tp, MII_TG3_TEST1,
9649                                      val | MII_TG3_TEST1_CRC_EN);
9650                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9651                 } else
9652                         val = 0;
9653                 spin_unlock_bh(&tp->lock);
9654
9655                 tp->phy_crc_errors += val;
9656
9657                 return tp->phy_crc_errors;
9658         }
9659
9660         return get_stat64(&hw_stats->rx_fcs_errors);
9661 }
9662
9663 #define ESTAT_ADD(member) \
9664         estats->member =        old_estats->member + \
9665                                 get_stat64(&hw_stats->member)
9666
9667 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9668 {
9669         struct tg3_ethtool_stats *estats = &tp->estats;
9670         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9671         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9672
9673         if (!hw_stats)
9674                 return old_estats;
9675
9676         ESTAT_ADD(rx_octets);
9677         ESTAT_ADD(rx_fragments);
9678         ESTAT_ADD(rx_ucast_packets);
9679         ESTAT_ADD(rx_mcast_packets);
9680         ESTAT_ADD(rx_bcast_packets);
9681         ESTAT_ADD(rx_fcs_errors);
9682         ESTAT_ADD(rx_align_errors);
9683         ESTAT_ADD(rx_xon_pause_rcvd);
9684         ESTAT_ADD(rx_xoff_pause_rcvd);
9685         ESTAT_ADD(rx_mac_ctrl_rcvd);
9686         ESTAT_ADD(rx_xoff_entered);
9687         ESTAT_ADD(rx_frame_too_long_errors);
9688         ESTAT_ADD(rx_jabbers);
9689         ESTAT_ADD(rx_undersize_packets);
9690         ESTAT_ADD(rx_in_length_errors);
9691         ESTAT_ADD(rx_out_length_errors);
9692         ESTAT_ADD(rx_64_or_less_octet_packets);
9693         ESTAT_ADD(rx_65_to_127_octet_packets);
9694         ESTAT_ADD(rx_128_to_255_octet_packets);
9695         ESTAT_ADD(rx_256_to_511_octet_packets);
9696         ESTAT_ADD(rx_512_to_1023_octet_packets);
9697         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9698         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9699         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9700         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9701         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9702
9703         ESTAT_ADD(tx_octets);
9704         ESTAT_ADD(tx_collisions);
9705         ESTAT_ADD(tx_xon_sent);
9706         ESTAT_ADD(tx_xoff_sent);
9707         ESTAT_ADD(tx_flow_control);
9708         ESTAT_ADD(tx_mac_errors);
9709         ESTAT_ADD(tx_single_collisions);
9710         ESTAT_ADD(tx_mult_collisions);
9711         ESTAT_ADD(tx_deferred);
9712         ESTAT_ADD(tx_excessive_collisions);
9713         ESTAT_ADD(tx_late_collisions);
9714         ESTAT_ADD(tx_collide_2times);
9715         ESTAT_ADD(tx_collide_3times);
9716         ESTAT_ADD(tx_collide_4times);
9717         ESTAT_ADD(tx_collide_5times);
9718         ESTAT_ADD(tx_collide_6times);
9719         ESTAT_ADD(tx_collide_7times);
9720         ESTAT_ADD(tx_collide_8times);
9721         ESTAT_ADD(tx_collide_9times);
9722         ESTAT_ADD(tx_collide_10times);
9723         ESTAT_ADD(tx_collide_11times);
9724         ESTAT_ADD(tx_collide_12times);
9725         ESTAT_ADD(tx_collide_13times);
9726         ESTAT_ADD(tx_collide_14times);
9727         ESTAT_ADD(tx_collide_15times);
9728         ESTAT_ADD(tx_ucast_packets);
9729         ESTAT_ADD(tx_mcast_packets);
9730         ESTAT_ADD(tx_bcast_packets);
9731         ESTAT_ADD(tx_carrier_sense_errors);
9732         ESTAT_ADD(tx_discards);
9733         ESTAT_ADD(tx_errors);
9734
9735         ESTAT_ADD(dma_writeq_full);
9736         ESTAT_ADD(dma_write_prioq_full);
9737         ESTAT_ADD(rxbds_empty);
9738         ESTAT_ADD(rx_discards);
9739         ESTAT_ADD(rx_errors);
9740         ESTAT_ADD(rx_threshold_hit);
9741
9742         ESTAT_ADD(dma_readq_full);
9743         ESTAT_ADD(dma_read_prioq_full);
9744         ESTAT_ADD(tx_comp_queue_full);
9745
9746         ESTAT_ADD(ring_set_send_prod_index);
9747         ESTAT_ADD(ring_status_update);
9748         ESTAT_ADD(nic_irqs);
9749         ESTAT_ADD(nic_avoided_irqs);
9750         ESTAT_ADD(nic_tx_threshold_hit);
9751
9752         ESTAT_ADD(mbuf_lwm_thresh_hit);
9753
9754         return estats;
9755 }
9756
9757 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9758                                                  struct rtnl_link_stats64 *stats)
9759 {
9760         struct tg3 *tp = netdev_priv(dev);
9761         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9762         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9763
9764         if (!hw_stats)
9765                 return old_stats;
9766
9767         stats->rx_packets = old_stats->rx_packets +
9768                 get_stat64(&hw_stats->rx_ucast_packets) +
9769                 get_stat64(&hw_stats->rx_mcast_packets) +
9770                 get_stat64(&hw_stats->rx_bcast_packets);
9771
9772         stats->tx_packets = old_stats->tx_packets +
9773                 get_stat64(&hw_stats->tx_ucast_packets) +
9774                 get_stat64(&hw_stats->tx_mcast_packets) +
9775                 get_stat64(&hw_stats->tx_bcast_packets);
9776
9777         stats->rx_bytes = old_stats->rx_bytes +
9778                 get_stat64(&hw_stats->rx_octets);
9779         stats->tx_bytes = old_stats->tx_bytes +
9780                 get_stat64(&hw_stats->tx_octets);
9781
9782         stats->rx_errors = old_stats->rx_errors +
9783                 get_stat64(&hw_stats->rx_errors);
9784         stats->tx_errors = old_stats->tx_errors +
9785                 get_stat64(&hw_stats->tx_errors) +
9786                 get_stat64(&hw_stats->tx_mac_errors) +
9787                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9788                 get_stat64(&hw_stats->tx_discards);
9789
9790         stats->multicast = old_stats->multicast +
9791                 get_stat64(&hw_stats->rx_mcast_packets);
9792         stats->collisions = old_stats->collisions +
9793                 get_stat64(&hw_stats->tx_collisions);
9794
9795         stats->rx_length_errors = old_stats->rx_length_errors +
9796                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9797                 get_stat64(&hw_stats->rx_undersize_packets);
9798
9799         stats->rx_over_errors = old_stats->rx_over_errors +
9800                 get_stat64(&hw_stats->rxbds_empty);
9801         stats->rx_frame_errors = old_stats->rx_frame_errors +
9802                 get_stat64(&hw_stats->rx_align_errors);
9803         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9804                 get_stat64(&hw_stats->tx_discards);
9805         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9806                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9807
9808         stats->rx_crc_errors = old_stats->rx_crc_errors +
9809                 calc_crc_errors(tp);
9810
9811         stats->rx_missed_errors = old_stats->rx_missed_errors +
9812                 get_stat64(&hw_stats->rx_discards);
9813
9814         stats->rx_dropped = tp->rx_dropped;
9815
9816         return stats;
9817 }
9818
9819 static inline u32 calc_crc(unsigned char *buf, int len)
9820 {
9821         u32 reg;
9822         u32 tmp;
9823         int j, k;
9824
9825         reg = 0xffffffff;
9826
9827         for (j = 0; j < len; j++) {
9828                 reg ^= buf[j];
9829
9830                 for (k = 0; k < 8; k++) {
9831                         tmp = reg & 0x01;
9832
9833                         reg >>= 1;
9834
9835                         if (tmp)
9836                                 reg ^= 0xedb88320;
9837                 }
9838         }
9839
9840         return ~reg;
9841 }
9842
9843 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9844 {
9845         /* accept or reject all multicast frames */
9846         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9847         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9848         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9849         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9850 }
9851
9852 static void __tg3_set_rx_mode(struct net_device *dev)
9853 {
9854         struct tg3 *tp = netdev_priv(dev);
9855         u32 rx_mode;
9856
9857         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9858                                   RX_MODE_KEEP_VLAN_TAG);
9859
9860 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9861         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9862          * flag clear.
9863          */
9864         if (!tg3_flag(tp, ENABLE_ASF))
9865                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9866 #endif
9867
9868         if (dev->flags & IFF_PROMISC) {
9869                 /* Promiscuous mode. */
9870                 rx_mode |= RX_MODE_PROMISC;
9871         } else if (dev->flags & IFF_ALLMULTI) {
9872                 /* Accept all multicast. */
9873                 tg3_set_multi(tp, 1);
9874         } else if (netdev_mc_empty(dev)) {
9875                 /* Reject all multicast. */
9876                 tg3_set_multi(tp, 0);
9877         } else {
9878                 /* Accept one or more multicast(s). */
9879                 struct netdev_hw_addr *ha;
9880                 u32 mc_filter[4] = { 0, };
9881                 u32 regidx;
9882                 u32 bit;
9883                 u32 crc;
9884
9885                 netdev_for_each_mc_addr(ha, dev) {
9886                         crc = calc_crc(ha->addr, ETH_ALEN);
9887                         bit = ~crc & 0x7f;
9888                         regidx = (bit & 0x60) >> 5;
9889                         bit &= 0x1f;
9890                         mc_filter[regidx] |= (1 << bit);
9891                 }
9892
9893                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9894                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9895                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9896                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9897         }
9898
9899         if (rx_mode != tp->rx_mode) {
9900                 tp->rx_mode = rx_mode;
9901                 tw32_f(MAC_RX_MODE, rx_mode);
9902                 udelay(10);
9903         }
9904 }
9905
9906 static void tg3_set_rx_mode(struct net_device *dev)
9907 {
9908         struct tg3 *tp = netdev_priv(dev);
9909
9910         if (!netif_running(dev))
9911                 return;
9912
9913         tg3_full_lock(tp, 0);
9914         __tg3_set_rx_mode(dev);
9915         tg3_full_unlock(tp);
9916 }
9917
9918 static int tg3_get_regs_len(struct net_device *dev)
9919 {
9920         return TG3_REG_BLK_SIZE;
9921 }
9922
9923 static void tg3_get_regs(struct net_device *dev,
9924                 struct ethtool_regs *regs, void *_p)
9925 {
9926         struct tg3 *tp = netdev_priv(dev);
9927
9928         regs->version = 0;
9929
9930         memset(_p, 0, TG3_REG_BLK_SIZE);
9931
9932         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9933                 return;
9934
9935         tg3_full_lock(tp, 0);
9936
9937         tg3_dump_legacy_regs(tp, (u32 *)_p);
9938
9939         tg3_full_unlock(tp);
9940 }
9941
9942 static int tg3_get_eeprom_len(struct net_device *dev)
9943 {
9944         struct tg3 *tp = netdev_priv(dev);
9945
9946         return tp->nvram_size;
9947 }
9948
9949 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9950 {
9951         struct tg3 *tp = netdev_priv(dev);
9952         int ret;
9953         u8  *pd;
9954         u32 i, offset, len, b_offset, b_count;
9955         __be32 val;
9956
9957         if (tg3_flag(tp, NO_NVRAM))
9958                 return -EINVAL;
9959
9960         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9961                 return -EAGAIN;
9962
9963         offset = eeprom->offset;
9964         len = eeprom->len;
9965         eeprom->len = 0;
9966
9967         eeprom->magic = TG3_EEPROM_MAGIC;
9968
9969         if (offset & 3) {
9970                 /* adjustments to start on required 4 byte boundary */
9971                 b_offset = offset & 3;
9972                 b_count = 4 - b_offset;
9973                 if (b_count > len) {
9974                         /* i.e. offset=1 len=2 */
9975                         b_count = len;
9976                 }
9977                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9978                 if (ret)
9979                         return ret;
9980                 memcpy(data, ((char *)&val) + b_offset, b_count);
9981                 len -= b_count;
9982                 offset += b_count;
9983                 eeprom->len += b_count;
9984         }
9985
9986         /* read bytes up to the last 4 byte boundary */
9987         pd = &data[eeprom->len];
9988         for (i = 0; i < (len - (len & 3)); i += 4) {
9989                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9990                 if (ret) {
9991                         eeprom->len += i;
9992                         return ret;
9993                 }
9994                 memcpy(pd + i, &val, 4);
9995         }
9996         eeprom->len += i;
9997
9998         if (len & 3) {
9999                 /* read last bytes not ending on 4 byte boundary */
10000                 pd = &data[eeprom->len];
10001                 b_count = len & 3;
10002                 b_offset = offset + len - b_count;
10003                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10004                 if (ret)
10005                         return ret;
10006                 memcpy(pd, &val, b_count);
10007                 eeprom->len += b_count;
10008         }
10009         return 0;
10010 }
10011
10012 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10013
10014 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10015 {
10016         struct tg3 *tp = netdev_priv(dev);
10017         int ret;
10018         u32 offset, len, b_offset, odd_len;
10019         u8 *buf;
10020         __be32 start, end;
10021
10022         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10023                 return -EAGAIN;
10024
10025         if (tg3_flag(tp, NO_NVRAM) ||
10026             eeprom->magic != TG3_EEPROM_MAGIC)
10027                 return -EINVAL;
10028
10029         offset = eeprom->offset;
10030         len = eeprom->len;
10031
10032         if ((b_offset = (offset & 3))) {
10033                 /* adjustments to start on required 4 byte boundary */
10034                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10035                 if (ret)
10036                         return ret;
10037                 len += b_offset;
10038                 offset &= ~3;
10039                 if (len < 4)
10040                         len = 4;
10041         }
10042
10043         odd_len = 0;
10044         if (len & 3) {
10045                 /* adjustments to end on required 4 byte boundary */
10046                 odd_len = 1;
10047                 len = (len + 3) & ~3;
10048                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10049                 if (ret)
10050                         return ret;
10051         }
10052
10053         buf = data;
10054         if (b_offset || odd_len) {
10055                 buf = kmalloc(len, GFP_KERNEL);
10056                 if (!buf)
10057                         return -ENOMEM;
10058                 if (b_offset)
10059                         memcpy(buf, &start, 4);
10060                 if (odd_len)
10061                         memcpy(buf+len-4, &end, 4);
10062                 memcpy(buf + b_offset, data, eeprom->len);
10063         }
10064
10065         ret = tg3_nvram_write_block(tp, offset, len, buf);
10066
10067         if (buf != data)
10068                 kfree(buf);
10069
10070         return ret;
10071 }
10072
10073 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10074 {
10075         struct tg3 *tp = netdev_priv(dev);
10076
10077         if (tg3_flag(tp, USE_PHYLIB)) {
10078                 struct phy_device *phydev;
10079                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10080                         return -EAGAIN;
10081                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10082                 return phy_ethtool_gset(phydev, cmd);
10083         }
10084
10085         cmd->supported = (SUPPORTED_Autoneg);
10086
10087         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10088                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10089                                    SUPPORTED_1000baseT_Full);
10090
10091         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10092                 cmd->supported |= (SUPPORTED_100baseT_Half |
10093                                   SUPPORTED_100baseT_Full |
10094                                   SUPPORTED_10baseT_Half |
10095                                   SUPPORTED_10baseT_Full |
10096                                   SUPPORTED_TP);
10097                 cmd->port = PORT_TP;
10098         } else {
10099                 cmd->supported |= SUPPORTED_FIBRE;
10100                 cmd->port = PORT_FIBRE;
10101         }
10102
10103         cmd->advertising = tp->link_config.advertising;
10104         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10105                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10106                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10107                                 cmd->advertising |= ADVERTISED_Pause;
10108                         } else {
10109                                 cmd->advertising |= ADVERTISED_Pause |
10110                                                     ADVERTISED_Asym_Pause;
10111                         }
10112                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10113                         cmd->advertising |= ADVERTISED_Asym_Pause;
10114                 }
10115         }
10116         if (netif_running(dev)) {
10117                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10118                 cmd->duplex = tp->link_config.active_duplex;
10119         } else {
10120                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10121                 cmd->duplex = DUPLEX_INVALID;
10122         }
10123         cmd->phy_address = tp->phy_addr;
10124         cmd->transceiver = XCVR_INTERNAL;
10125         cmd->autoneg = tp->link_config.autoneg;
10126         cmd->maxtxpkt = 0;
10127         cmd->maxrxpkt = 0;
10128         return 0;
10129 }
10130
10131 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10132 {
10133         struct tg3 *tp = netdev_priv(dev);
10134         u32 speed = ethtool_cmd_speed(cmd);
10135
10136         if (tg3_flag(tp, USE_PHYLIB)) {
10137                 struct phy_device *phydev;
10138                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10139                         return -EAGAIN;
10140                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10141                 return phy_ethtool_sset(phydev, cmd);
10142         }
10143
10144         if (cmd->autoneg != AUTONEG_ENABLE &&
10145             cmd->autoneg != AUTONEG_DISABLE)
10146                 return -EINVAL;
10147
10148         if (cmd->autoneg == AUTONEG_DISABLE &&
10149             cmd->duplex != DUPLEX_FULL &&
10150             cmd->duplex != DUPLEX_HALF)
10151                 return -EINVAL;
10152
10153         if (cmd->autoneg == AUTONEG_ENABLE) {
10154                 u32 mask = ADVERTISED_Autoneg |
10155                            ADVERTISED_Pause |
10156                            ADVERTISED_Asym_Pause;
10157
10158                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10159                         mask |= ADVERTISED_1000baseT_Half |
10160                                 ADVERTISED_1000baseT_Full;
10161
10162                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10163                         mask |= ADVERTISED_100baseT_Half |
10164                                 ADVERTISED_100baseT_Full |
10165                                 ADVERTISED_10baseT_Half |
10166                                 ADVERTISED_10baseT_Full |
10167                                 ADVERTISED_TP;
10168                 else
10169                         mask |= ADVERTISED_FIBRE;
10170
10171                 if (cmd->advertising & ~mask)
10172                         return -EINVAL;
10173
10174                 mask &= (ADVERTISED_1000baseT_Half |
10175                          ADVERTISED_1000baseT_Full |
10176                          ADVERTISED_100baseT_Half |
10177                          ADVERTISED_100baseT_Full |
10178                          ADVERTISED_10baseT_Half |
10179                          ADVERTISED_10baseT_Full);
10180
10181                 cmd->advertising &= mask;
10182         } else {
10183                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10184                         if (speed != SPEED_1000)
10185                                 return -EINVAL;
10186
10187                         if (cmd->duplex != DUPLEX_FULL)
10188                                 return -EINVAL;
10189                 } else {
10190                         if (speed != SPEED_100 &&
10191                             speed != SPEED_10)
10192                                 return -EINVAL;
10193                 }
10194         }
10195
10196         tg3_full_lock(tp, 0);
10197
10198         tp->link_config.autoneg = cmd->autoneg;
10199         if (cmd->autoneg == AUTONEG_ENABLE) {
10200                 tp->link_config.advertising = (cmd->advertising |
10201                                               ADVERTISED_Autoneg);
10202                 tp->link_config.speed = SPEED_INVALID;
10203                 tp->link_config.duplex = DUPLEX_INVALID;
10204         } else {
10205                 tp->link_config.advertising = 0;
10206                 tp->link_config.speed = speed;
10207                 tp->link_config.duplex = cmd->duplex;
10208         }
10209
10210         tp->link_config.orig_speed = tp->link_config.speed;
10211         tp->link_config.orig_duplex = tp->link_config.duplex;
10212         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10213
10214         if (netif_running(dev))
10215                 tg3_setup_phy(tp, 1);
10216
10217         tg3_full_unlock(tp);
10218
10219         return 0;
10220 }
10221
10222 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10223 {
10224         struct tg3 *tp = netdev_priv(dev);
10225
10226         strcpy(info->driver, DRV_MODULE_NAME);
10227         strcpy(info->version, DRV_MODULE_VERSION);
10228         strcpy(info->fw_version, tp->fw_ver);
10229         strcpy(info->bus_info, pci_name(tp->pdev));
10230 }
10231
10232 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10233 {
10234         struct tg3 *tp = netdev_priv(dev);
10235
10236         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10237                 wol->supported = WAKE_MAGIC;
10238         else
10239                 wol->supported = 0;
10240         wol->wolopts = 0;
10241         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10242                 wol->wolopts = WAKE_MAGIC;
10243         memset(&wol->sopass, 0, sizeof(wol->sopass));
10244 }
10245
10246 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10247 {
10248         struct tg3 *tp = netdev_priv(dev);
10249         struct device *dp = &tp->pdev->dev;
10250
10251         if (wol->wolopts & ~WAKE_MAGIC)
10252                 return -EINVAL;
10253         if ((wol->wolopts & WAKE_MAGIC) &&
10254             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10255                 return -EINVAL;
10256
10257         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10258
10259         spin_lock_bh(&tp->lock);
10260         if (device_may_wakeup(dp))
10261                 tg3_flag_set(tp, WOL_ENABLE);
10262         else
10263                 tg3_flag_clear(tp, WOL_ENABLE);
10264         spin_unlock_bh(&tp->lock);
10265
10266         return 0;
10267 }
10268
10269 static u32 tg3_get_msglevel(struct net_device *dev)
10270 {
10271         struct tg3 *tp = netdev_priv(dev);
10272         return tp->msg_enable;
10273 }
10274
10275 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10276 {
10277         struct tg3 *tp = netdev_priv(dev);
10278         tp->msg_enable = value;
10279 }
10280
10281 static int tg3_nway_reset(struct net_device *dev)
10282 {
10283         struct tg3 *tp = netdev_priv(dev);
10284         int r;
10285
10286         if (!netif_running(dev))
10287                 return -EAGAIN;
10288
10289         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10290                 return -EINVAL;
10291
10292         if (tg3_flag(tp, USE_PHYLIB)) {
10293                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10294                         return -EAGAIN;
10295                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10296         } else {
10297                 u32 bmcr;
10298
10299                 spin_lock_bh(&tp->lock);
10300                 r = -EINVAL;
10301                 tg3_readphy(tp, MII_BMCR, &bmcr);
10302                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10303                     ((bmcr & BMCR_ANENABLE) ||
10304                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10305                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10306                                                    BMCR_ANENABLE);
10307                         r = 0;
10308                 }
10309                 spin_unlock_bh(&tp->lock);
10310         }
10311
10312         return r;
10313 }
10314
10315 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10316 {
10317         struct tg3 *tp = netdev_priv(dev);
10318
10319         ering->rx_max_pending = tp->rx_std_ring_mask;
10320         ering->rx_mini_max_pending = 0;
10321         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10322                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10323         else
10324                 ering->rx_jumbo_max_pending = 0;
10325
10326         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10327
10328         ering->rx_pending = tp->rx_pending;
10329         ering->rx_mini_pending = 0;
10330         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10331                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10332         else
10333                 ering->rx_jumbo_pending = 0;
10334
10335         ering->tx_pending = tp->napi[0].tx_pending;
10336 }
10337
10338 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10339 {
10340         struct tg3 *tp = netdev_priv(dev);
10341         int i, irq_sync = 0, err = 0;
10342
10343         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10344             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10345             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10346             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10347             (tg3_flag(tp, TSO_BUG) &&
10348              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10349                 return -EINVAL;
10350
10351         if (netif_running(dev)) {
10352                 tg3_phy_stop(tp);
10353                 tg3_netif_stop(tp);
10354                 irq_sync = 1;
10355         }
10356
10357         tg3_full_lock(tp, irq_sync);
10358
10359         tp->rx_pending = ering->rx_pending;
10360
10361         if (tg3_flag(tp, MAX_RXPEND_64) &&
10362             tp->rx_pending > 63)
10363                 tp->rx_pending = 63;
10364         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10365
10366         for (i = 0; i < tp->irq_max; i++)
10367                 tp->napi[i].tx_pending = ering->tx_pending;
10368
10369         if (netif_running(dev)) {
10370                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10371                 err = tg3_restart_hw(tp, 1);
10372                 if (!err)
10373                         tg3_netif_start(tp);
10374         }
10375
10376         tg3_full_unlock(tp);
10377
10378         if (irq_sync && !err)
10379                 tg3_phy_start(tp);
10380
10381         return err;
10382 }
10383
10384 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10385 {
10386         struct tg3 *tp = netdev_priv(dev);
10387
10388         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10389
10390         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10391                 epause->rx_pause = 1;
10392         else
10393                 epause->rx_pause = 0;
10394
10395         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10396                 epause->tx_pause = 1;
10397         else
10398                 epause->tx_pause = 0;
10399 }
10400
10401 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10402 {
10403         struct tg3 *tp = netdev_priv(dev);
10404         int err = 0;
10405
10406         if (tg3_flag(tp, USE_PHYLIB)) {
10407                 u32 newadv;
10408                 struct phy_device *phydev;
10409
10410                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10411
10412                 if (!(phydev->supported & SUPPORTED_Pause) ||
10413                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10414                      (epause->rx_pause != epause->tx_pause)))
10415                         return -EINVAL;
10416
10417                 tp->link_config.flowctrl = 0;
10418                 if (epause->rx_pause) {
10419                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10420
10421                         if (epause->tx_pause) {
10422                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10423                                 newadv = ADVERTISED_Pause;
10424                         } else
10425                                 newadv = ADVERTISED_Pause |
10426                                          ADVERTISED_Asym_Pause;
10427                 } else if (epause->tx_pause) {
10428                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10429                         newadv = ADVERTISED_Asym_Pause;
10430                 } else
10431                         newadv = 0;
10432
10433                 if (epause->autoneg)
10434                         tg3_flag_set(tp, PAUSE_AUTONEG);
10435                 else
10436                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10437
10438                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10439                         u32 oldadv = phydev->advertising &
10440                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10441                         if (oldadv != newadv) {
10442                                 phydev->advertising &=
10443                                         ~(ADVERTISED_Pause |
10444                                           ADVERTISED_Asym_Pause);
10445                                 phydev->advertising |= newadv;
10446                                 if (phydev->autoneg) {
10447                                         /*
10448                                          * Always renegotiate the link to
10449                                          * inform our link partner of our
10450                                          * flow control settings, even if the
10451                                          * flow control is forced.  Let
10452                                          * tg3_adjust_link() do the final
10453                                          * flow control setup.
10454                                          */
10455                                         return phy_start_aneg(phydev);
10456                                 }
10457                         }
10458
10459                         if (!epause->autoneg)
10460                                 tg3_setup_flow_control(tp, 0, 0);
10461                 } else {
10462                         tp->link_config.orig_advertising &=
10463                                         ~(ADVERTISED_Pause |
10464                                           ADVERTISED_Asym_Pause);
10465                         tp->link_config.orig_advertising |= newadv;
10466                 }
10467         } else {
10468                 int irq_sync = 0;
10469
10470                 if (netif_running(dev)) {
10471                         tg3_netif_stop(tp);
10472                         irq_sync = 1;
10473                 }
10474
10475                 tg3_full_lock(tp, irq_sync);
10476
10477                 if (epause->autoneg)
10478                         tg3_flag_set(tp, PAUSE_AUTONEG);
10479                 else
10480                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10481                 if (epause->rx_pause)
10482                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10483                 else
10484                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10485                 if (epause->tx_pause)
10486                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10487                 else
10488                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10489
10490                 if (netif_running(dev)) {
10491                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10492                         err = tg3_restart_hw(tp, 1);
10493                         if (!err)
10494                                 tg3_netif_start(tp);
10495                 }
10496
10497                 tg3_full_unlock(tp);
10498         }
10499
10500         return err;
10501 }
10502
10503 static int tg3_get_sset_count(struct net_device *dev, int sset)
10504 {
10505         switch (sset) {
10506         case ETH_SS_TEST:
10507                 return TG3_NUM_TEST;
10508         case ETH_SS_STATS:
10509                 return TG3_NUM_STATS;
10510         default:
10511                 return -EOPNOTSUPP;
10512         }
10513 }
10514
10515 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10516 {
10517         switch (stringset) {
10518         case ETH_SS_STATS:
10519                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10520                 break;
10521         case ETH_SS_TEST:
10522                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10523                 break;
10524         default:
10525                 WARN_ON(1);     /* we need a WARN() */
10526                 break;
10527         }
10528 }
10529
10530 static int tg3_set_phys_id(struct net_device *dev,
10531                             enum ethtool_phys_id_state state)
10532 {
10533         struct tg3 *tp = netdev_priv(dev);
10534
10535         if (!netif_running(tp->dev))
10536                 return -EAGAIN;
10537
10538         switch (state) {
10539         case ETHTOOL_ID_ACTIVE:
10540                 return 1;       /* cycle on/off once per second */
10541
10542         case ETHTOOL_ID_ON:
10543                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10544                      LED_CTRL_1000MBPS_ON |
10545                      LED_CTRL_100MBPS_ON |
10546                      LED_CTRL_10MBPS_ON |
10547                      LED_CTRL_TRAFFIC_OVERRIDE |
10548                      LED_CTRL_TRAFFIC_BLINK |
10549                      LED_CTRL_TRAFFIC_LED);
10550                 break;
10551
10552         case ETHTOOL_ID_OFF:
10553                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10554                      LED_CTRL_TRAFFIC_OVERRIDE);
10555                 break;
10556
10557         case ETHTOOL_ID_INACTIVE:
10558                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10559                 break;
10560         }
10561
10562         return 0;
10563 }
10564
10565 static void tg3_get_ethtool_stats(struct net_device *dev,
10566                                    struct ethtool_stats *estats, u64 *tmp_stats)
10567 {
10568         struct tg3 *tp = netdev_priv(dev);
10569         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10570 }
10571
10572 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10573 {
10574         int i;
10575         __be32 *buf;
10576         u32 offset = 0, len = 0;
10577         u32 magic, val;
10578
10579         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10580                 return NULL;
10581
10582         if (magic == TG3_EEPROM_MAGIC) {
10583                 for (offset = TG3_NVM_DIR_START;
10584                      offset < TG3_NVM_DIR_END;
10585                      offset += TG3_NVM_DIRENT_SIZE) {
10586                         if (tg3_nvram_read(tp, offset, &val))
10587                                 return NULL;
10588
10589                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10590                             TG3_NVM_DIRTYPE_EXTVPD)
10591                                 break;
10592                 }
10593
10594                 if (offset != TG3_NVM_DIR_END) {
10595                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10596                         if (tg3_nvram_read(tp, offset + 4, &offset))
10597                                 return NULL;
10598
10599                         offset = tg3_nvram_logical_addr(tp, offset);
10600                 }
10601         }
10602
10603         if (!offset || !len) {
10604                 offset = TG3_NVM_VPD_OFF;
10605                 len = TG3_NVM_VPD_LEN;
10606         }
10607
10608         buf = kmalloc(len, GFP_KERNEL);
10609         if (buf == NULL)
10610                 return NULL;
10611
10612         if (magic == TG3_EEPROM_MAGIC) {
10613                 for (i = 0; i < len; i += 4) {
10614                         /* The data is in little-endian format in NVRAM.
10615                          * Use the big-endian read routines to preserve
10616                          * the byte order as it exists in NVRAM.
10617                          */
10618                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10619                                 goto error;
10620                 }
10621         } else {
10622                 u8 *ptr;
10623                 ssize_t cnt;
10624                 unsigned int pos = 0;
10625
10626                 ptr = (u8 *)&buf[0];
10627                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10628                         cnt = pci_read_vpd(tp->pdev, pos,
10629                                            len - pos, ptr);
10630                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10631                                 cnt = 0;
10632                         else if (cnt < 0)
10633                                 goto error;
10634                 }
10635                 if (pos != len)
10636                         goto error;
10637         }
10638
10639         return buf;
10640
10641 error:
10642         kfree(buf);
10643         return NULL;
10644 }
10645
10646 #define NVRAM_TEST_SIZE 0x100
10647 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10648 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10649 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10650 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10651 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10652 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x4c
10653 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10654 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10655
10656 static int tg3_test_nvram(struct tg3 *tp)
10657 {
10658         u32 csum, magic;
10659         __be32 *buf;
10660         int i, j, k, err = 0, size;
10661
10662         if (tg3_flag(tp, NO_NVRAM))
10663                 return 0;
10664
10665         if (tg3_nvram_read(tp, 0, &magic) != 0)
10666                 return -EIO;
10667
10668         if (magic == TG3_EEPROM_MAGIC)
10669                 size = NVRAM_TEST_SIZE;
10670         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10671                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10672                     TG3_EEPROM_SB_FORMAT_1) {
10673                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10674                         case TG3_EEPROM_SB_REVISION_0:
10675                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10676                                 break;
10677                         case TG3_EEPROM_SB_REVISION_2:
10678                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10679                                 break;
10680                         case TG3_EEPROM_SB_REVISION_3:
10681                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10682                                 break;
10683                         case TG3_EEPROM_SB_REVISION_4:
10684                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10685                                 break;
10686                         case TG3_EEPROM_SB_REVISION_5:
10687                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10688                                 break;
10689                         case TG3_EEPROM_SB_REVISION_6:
10690                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10691                                 break;
10692                         default:
10693                                 return -EIO;
10694                         }
10695                 } else
10696                         return 0;
10697         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10698                 size = NVRAM_SELFBOOT_HW_SIZE;
10699         else
10700                 return -EIO;
10701
10702         buf = kmalloc(size, GFP_KERNEL);
10703         if (buf == NULL)
10704                 return -ENOMEM;
10705
10706         err = -EIO;
10707         for (i = 0, j = 0; i < size; i += 4, j++) {
10708                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10709                 if (err)
10710                         break;
10711         }
10712         if (i < size)
10713                 goto out;
10714
10715         /* Selfboot format */
10716         magic = be32_to_cpu(buf[0]);
10717         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10718             TG3_EEPROM_MAGIC_FW) {
10719                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10720
10721                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10722                     TG3_EEPROM_SB_REVISION_2) {
10723                         /* For rev 2, the csum doesn't include the MBA. */
10724                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10725                                 csum8 += buf8[i];
10726                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10727                                 csum8 += buf8[i];
10728                 } else {
10729                         for (i = 0; i < size; i++)
10730                                 csum8 += buf8[i];
10731                 }
10732
10733                 if (csum8 == 0) {
10734                         err = 0;
10735                         goto out;
10736                 }
10737
10738                 err = -EIO;
10739                 goto out;
10740         }
10741
10742         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10743             TG3_EEPROM_MAGIC_HW) {
10744                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10745                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10746                 u8 *buf8 = (u8 *) buf;
10747
10748                 /* Separate the parity bits and the data bytes.  */
10749                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10750                         if ((i == 0) || (i == 8)) {
10751                                 int l;
10752                                 u8 msk;
10753
10754                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10755                                         parity[k++] = buf8[i] & msk;
10756                                 i++;
10757                         } else if (i == 16) {
10758                                 int l;
10759                                 u8 msk;
10760
10761                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10762                                         parity[k++] = buf8[i] & msk;
10763                                 i++;
10764
10765                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10766                                         parity[k++] = buf8[i] & msk;
10767                                 i++;
10768                         }
10769                         data[j++] = buf8[i];
10770                 }
10771
10772                 err = -EIO;
10773                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10774                         u8 hw8 = hweight8(data[i]);
10775
10776                         if ((hw8 & 0x1) && parity[i])
10777                                 goto out;
10778                         else if (!(hw8 & 0x1) && !parity[i])
10779                                 goto out;
10780                 }
10781                 err = 0;
10782                 goto out;
10783         }
10784
10785         err = -EIO;
10786
10787         /* Bootstrap checksum at offset 0x10 */
10788         csum = calc_crc((unsigned char *) buf, 0x10);
10789         if (csum != le32_to_cpu(buf[0x10/4]))
10790                 goto out;
10791
10792         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10793         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10794         if (csum != le32_to_cpu(buf[0xfc/4]))
10795                 goto out;
10796
10797         kfree(buf);
10798
10799         buf = tg3_vpd_readblock(tp);
10800         if (!buf)
10801                 return -ENOMEM;
10802
10803         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10804                              PCI_VPD_LRDT_RO_DATA);
10805         if (i > 0) {
10806                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10807                 if (j < 0)
10808                         goto out;
10809
10810                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10811                         goto out;
10812
10813                 i += PCI_VPD_LRDT_TAG_SIZE;
10814                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10815                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10816                 if (j > 0) {
10817                         u8 csum8 = 0;
10818
10819                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10820
10821                         for (i = 0; i <= j; i++)
10822                                 csum8 += ((u8 *)buf)[i];
10823
10824                         if (csum8)
10825                                 goto out;
10826                 }
10827         }
10828
10829         err = 0;
10830
10831 out:
10832         kfree(buf);
10833         return err;
10834 }
10835
10836 #define TG3_SERDES_TIMEOUT_SEC  2
10837 #define TG3_COPPER_TIMEOUT_SEC  6
10838
10839 static int tg3_test_link(struct tg3 *tp)
10840 {
10841         int i, max;
10842
10843         if (!netif_running(tp->dev))
10844                 return -ENODEV;
10845
10846         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10847                 max = TG3_SERDES_TIMEOUT_SEC;
10848         else
10849                 max = TG3_COPPER_TIMEOUT_SEC;
10850
10851         for (i = 0; i < max; i++) {
10852                 if (netif_carrier_ok(tp->dev))
10853                         return 0;
10854
10855                 if (msleep_interruptible(1000))
10856                         break;
10857         }
10858
10859         return -EIO;
10860 }
10861
10862 /* Only test the commonly used registers */
10863 static int tg3_test_registers(struct tg3 *tp)
10864 {
10865         int i, is_5705, is_5750;
10866         u32 offset, read_mask, write_mask, val, save_val, read_val;
10867         static struct {
10868                 u16 offset;
10869                 u16 flags;
10870 #define TG3_FL_5705     0x1
10871 #define TG3_FL_NOT_5705 0x2
10872 #define TG3_FL_NOT_5788 0x4
10873 #define TG3_FL_NOT_5750 0x8
10874                 u32 read_mask;
10875                 u32 write_mask;
10876         } reg_tbl[] = {
10877                 /* MAC Control Registers */
10878                 { MAC_MODE, TG3_FL_NOT_5705,
10879                         0x00000000, 0x00ef6f8c },
10880                 { MAC_MODE, TG3_FL_5705,
10881                         0x00000000, 0x01ef6b8c },
10882                 { MAC_STATUS, TG3_FL_NOT_5705,
10883                         0x03800107, 0x00000000 },
10884                 { MAC_STATUS, TG3_FL_5705,
10885                         0x03800100, 0x00000000 },
10886                 { MAC_ADDR_0_HIGH, 0x0000,
10887                         0x00000000, 0x0000ffff },
10888                 { MAC_ADDR_0_LOW, 0x0000,
10889                         0x00000000, 0xffffffff },
10890                 { MAC_RX_MTU_SIZE, 0x0000,
10891                         0x00000000, 0x0000ffff },
10892                 { MAC_TX_MODE, 0x0000,
10893                         0x00000000, 0x00000070 },
10894                 { MAC_TX_LENGTHS, 0x0000,
10895                         0x00000000, 0x00003fff },
10896                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10897                         0x00000000, 0x000007fc },
10898                 { MAC_RX_MODE, TG3_FL_5705,
10899                         0x00000000, 0x000007dc },
10900                 { MAC_HASH_REG_0, 0x0000,
10901                         0x00000000, 0xffffffff },
10902                 { MAC_HASH_REG_1, 0x0000,
10903                         0x00000000, 0xffffffff },
10904                 { MAC_HASH_REG_2, 0x0000,
10905                         0x00000000, 0xffffffff },
10906                 { MAC_HASH_REG_3, 0x0000,
10907                         0x00000000, 0xffffffff },
10908
10909                 /* Receive Data and Receive BD Initiator Control Registers. */
10910                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10911                         0x00000000, 0xffffffff },
10912                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10913                         0x00000000, 0xffffffff },
10914                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10915                         0x00000000, 0x00000003 },
10916                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10917                         0x00000000, 0xffffffff },
10918                 { RCVDBDI_STD_BD+0, 0x0000,
10919                         0x00000000, 0xffffffff },
10920                 { RCVDBDI_STD_BD+4, 0x0000,
10921                         0x00000000, 0xffffffff },
10922                 { RCVDBDI_STD_BD+8, 0x0000,
10923                         0x00000000, 0xffff0002 },
10924                 { RCVDBDI_STD_BD+0xc, 0x0000,
10925                         0x00000000, 0xffffffff },
10926
10927                 /* Receive BD Initiator Control Registers. */
10928                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10929                         0x00000000, 0xffffffff },
10930                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10931                         0x00000000, 0x000003ff },
10932                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10933                         0x00000000, 0xffffffff },
10934
10935                 /* Host Coalescing Control Registers. */
10936                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10937                         0x00000000, 0x00000004 },
10938                 { HOSTCC_MODE, TG3_FL_5705,
10939                         0x00000000, 0x000000f6 },
10940                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10941                         0x00000000, 0xffffffff },
10942                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10943                         0x00000000, 0x000003ff },
10944                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10945                         0x00000000, 0xffffffff },
10946                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10947                         0x00000000, 0x000003ff },
10948                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10949                         0x00000000, 0xffffffff },
10950                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10951                         0x00000000, 0x000000ff },
10952                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10953                         0x00000000, 0xffffffff },
10954                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10955                         0x00000000, 0x000000ff },
10956                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10957                         0x00000000, 0xffffffff },
10958                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10959                         0x00000000, 0xffffffff },
10960                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10961                         0x00000000, 0xffffffff },
10962                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10963                         0x00000000, 0x000000ff },
10964                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10965                         0x00000000, 0xffffffff },
10966                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10967                         0x00000000, 0x000000ff },
10968                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10969                         0x00000000, 0xffffffff },
10970                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10971                         0x00000000, 0xffffffff },
10972                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10973                         0x00000000, 0xffffffff },
10974                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10975                         0x00000000, 0xffffffff },
10976                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10977                         0x00000000, 0xffffffff },
10978                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10979                         0xffffffff, 0x00000000 },
10980                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10981                         0xffffffff, 0x00000000 },
10982
10983                 /* Buffer Manager Control Registers. */
10984                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10985                         0x00000000, 0x007fff80 },
10986                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10987                         0x00000000, 0x007fffff },
10988                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10989                         0x00000000, 0x0000003f },
10990                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10991                         0x00000000, 0x000001ff },
10992                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10993                         0x00000000, 0x000001ff },
10994                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10995                         0xffffffff, 0x00000000 },
10996                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10997                         0xffffffff, 0x00000000 },
10998
10999                 /* Mailbox Registers */
11000                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11001                         0x00000000, 0x000001ff },
11002                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11003                         0x00000000, 0x000001ff },
11004                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11005                         0x00000000, 0x000007ff },
11006                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11007                         0x00000000, 0x000001ff },
11008
11009                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11010         };
11011
11012         is_5705 = is_5750 = 0;
11013         if (tg3_flag(tp, 5705_PLUS)) {
11014                 is_5705 = 1;
11015                 if (tg3_flag(tp, 5750_PLUS))
11016                         is_5750 = 1;
11017         }
11018
11019         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11020                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11021                         continue;
11022
11023                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11024                         continue;
11025
11026                 if (tg3_flag(tp, IS_5788) &&
11027                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11028                         continue;
11029
11030                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11031                         continue;
11032
11033                 offset = (u32) reg_tbl[i].offset;
11034                 read_mask = reg_tbl[i].read_mask;
11035                 write_mask = reg_tbl[i].write_mask;
11036
11037                 /* Save the original register content */
11038                 save_val = tr32(offset);
11039
11040                 /* Determine the read-only value. */
11041                 read_val = save_val & read_mask;
11042
11043                 /* Write zero to the register, then make sure the read-only bits
11044                  * are not changed and the read/write bits are all zeros.
11045                  */
11046                 tw32(offset, 0);
11047
11048                 val = tr32(offset);
11049
11050                 /* Test the read-only and read/write bits. */
11051                 if (((val & read_mask) != read_val) || (val & write_mask))
11052                         goto out;
11053
11054                 /* Write ones to all the bits defined by RdMask and WrMask, then
11055                  * make sure the read-only bits are not changed and the
11056                  * read/write bits are all ones.
11057                  */
11058                 tw32(offset, read_mask | write_mask);
11059
11060                 val = tr32(offset);
11061
11062                 /* Test the read-only bits. */
11063                 if ((val & read_mask) != read_val)
11064                         goto out;
11065
11066                 /* Test the read/write bits. */
11067                 if ((val & write_mask) != write_mask)
11068                         goto out;
11069
11070                 tw32(offset, save_val);
11071         }
11072
11073         return 0;
11074
11075 out:
11076         if (netif_msg_hw(tp))
11077                 netdev_err(tp->dev,
11078                            "Register test failed at offset %x\n", offset);
11079         tw32(offset, save_val);
11080         return -EIO;
11081 }
11082
11083 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11084 {
11085         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11086         int i;
11087         u32 j;
11088
11089         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11090                 for (j = 0; j < len; j += 4) {
11091                         u32 val;
11092
11093                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11094                         tg3_read_mem(tp, offset + j, &val);
11095                         if (val != test_pattern[i])
11096                                 return -EIO;
11097                 }
11098         }
11099         return 0;
11100 }
11101
11102 static int tg3_test_memory(struct tg3 *tp)
11103 {
11104         static struct mem_entry {
11105                 u32 offset;
11106                 u32 len;
11107         } mem_tbl_570x[] = {
11108                 { 0x00000000, 0x00b50},
11109                 { 0x00002000, 0x1c000},
11110                 { 0xffffffff, 0x00000}
11111         }, mem_tbl_5705[] = {
11112                 { 0x00000100, 0x0000c},
11113                 { 0x00000200, 0x00008},
11114                 { 0x00004000, 0x00800},
11115                 { 0x00006000, 0x01000},
11116                 { 0x00008000, 0x02000},
11117                 { 0x00010000, 0x0e000},
11118                 { 0xffffffff, 0x00000}
11119         }, mem_tbl_5755[] = {
11120                 { 0x00000200, 0x00008},
11121                 { 0x00004000, 0x00800},
11122                 { 0x00006000, 0x00800},
11123                 { 0x00008000, 0x02000},
11124                 { 0x00010000, 0x0c000},
11125                 { 0xffffffff, 0x00000}
11126         }, mem_tbl_5906[] = {
11127                 { 0x00000200, 0x00008},
11128                 { 0x00004000, 0x00400},
11129                 { 0x00006000, 0x00400},
11130                 { 0x00008000, 0x01000},
11131                 { 0x00010000, 0x01000},
11132                 { 0xffffffff, 0x00000}
11133         }, mem_tbl_5717[] = {
11134                 { 0x00000200, 0x00008},
11135                 { 0x00010000, 0x0a000},
11136                 { 0x00020000, 0x13c00},
11137                 { 0xffffffff, 0x00000}
11138         }, mem_tbl_57765[] = {
11139                 { 0x00000200, 0x00008},
11140                 { 0x00004000, 0x00800},
11141                 { 0x00006000, 0x09800},
11142                 { 0x00010000, 0x0a000},
11143                 { 0xffffffff, 0x00000}
11144         };
11145         struct mem_entry *mem_tbl;
11146         int err = 0;
11147         int i;
11148
11149         if (tg3_flag(tp, 5717_PLUS))
11150                 mem_tbl = mem_tbl_5717;
11151         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11152                 mem_tbl = mem_tbl_57765;
11153         else if (tg3_flag(tp, 5755_PLUS))
11154                 mem_tbl = mem_tbl_5755;
11155         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11156                 mem_tbl = mem_tbl_5906;
11157         else if (tg3_flag(tp, 5705_PLUS))
11158                 mem_tbl = mem_tbl_5705;
11159         else
11160                 mem_tbl = mem_tbl_570x;
11161
11162         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11163                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11164                 if (err)
11165                         break;
11166         }
11167
11168         return err;
11169 }
11170
11171 #define TG3_MAC_LOOPBACK        0
11172 #define TG3_PHY_LOOPBACK        1
11173 #define TG3_TSO_LOOPBACK        2
11174
11175 #define TG3_TSO_MSS             500
11176
11177 #define TG3_TSO_IP_HDR_LEN      20
11178 #define TG3_TSO_TCP_HDR_LEN     20
11179 #define TG3_TSO_TCP_OPT_LEN     12
11180
11181 static const u8 tg3_tso_header[] = {
11182 0x08, 0x00,
11183 0x45, 0x00, 0x00, 0x00,
11184 0x00, 0x00, 0x40, 0x00,
11185 0x40, 0x06, 0x00, 0x00,
11186 0x0a, 0x00, 0x00, 0x01,
11187 0x0a, 0x00, 0x00, 0x02,
11188 0x0d, 0x00, 0xe0, 0x00,
11189 0x00, 0x00, 0x01, 0x00,
11190 0x00, 0x00, 0x02, 0x00,
11191 0x80, 0x10, 0x10, 0x00,
11192 0x14, 0x09, 0x00, 0x00,
11193 0x01, 0x01, 0x08, 0x0a,
11194 0x11, 0x11, 0x11, 0x11,
11195 0x11, 0x11, 0x11, 0x11,
11196 };
11197
11198 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11199 {
11200         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11201         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11202         struct sk_buff *skb, *rx_skb;
11203         u8 *tx_data;
11204         dma_addr_t map;
11205         int num_pkts, tx_len, rx_len, i, err;
11206         struct tg3_rx_buffer_desc *desc;
11207         struct tg3_napi *tnapi, *rnapi;
11208         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11209
11210         tnapi = &tp->napi[0];
11211         rnapi = &tp->napi[0];
11212         if (tp->irq_cnt > 1) {
11213                 if (tg3_flag(tp, ENABLE_RSS))
11214                         rnapi = &tp->napi[1];
11215                 if (tg3_flag(tp, ENABLE_TSS))
11216                         tnapi = &tp->napi[1];
11217         }
11218         coal_now = tnapi->coal_now | rnapi->coal_now;
11219
11220         if (loopback_mode == TG3_MAC_LOOPBACK) {
11221                 /* HW errata - mac loopback fails in some cases on 5780.
11222                  * Normal traffic and PHY loopback are not affected by
11223                  * errata.  Also, the MAC loopback test is deprecated for
11224                  * all newer ASIC revisions.
11225                  */
11226                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11227                     tg3_flag(tp, CPMU_PRESENT))
11228                         return 0;
11229
11230                 mac_mode = tp->mac_mode &
11231                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11232                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11233                 if (!tg3_flag(tp, 5705_PLUS))
11234                         mac_mode |= MAC_MODE_LINK_POLARITY;
11235                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11236                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11237                 else
11238                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11239                 tw32(MAC_MODE, mac_mode);
11240         } else {
11241                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11242                         tg3_phy_fet_toggle_apd(tp, false);
11243                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11244                 } else
11245                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11246
11247                 tg3_phy_toggle_automdix(tp, 0);
11248
11249                 tg3_writephy(tp, MII_BMCR, val);
11250                 udelay(40);
11251
11252                 mac_mode = tp->mac_mode &
11253                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11254                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11255                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11256                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11257                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11258                         /* The write needs to be flushed for the AC131 */
11259                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11260                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11261                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11262                 } else
11263                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11264
11265                 /* reset to prevent losing 1st rx packet intermittently */
11266                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11267                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11268                         udelay(10);
11269                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11270                 }
11271                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11272                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11273                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11274                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11275                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11276                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11277                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11278                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11279                 }
11280                 tw32(MAC_MODE, mac_mode);
11281
11282                 /* Wait for link */
11283                 for (i = 0; i < 100; i++) {
11284                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11285                                 break;
11286                         mdelay(1);
11287                 }
11288         }
11289
11290         err = -EIO;
11291
11292         tx_len = pktsz;
11293         skb = netdev_alloc_skb(tp->dev, tx_len);
11294         if (!skb)
11295                 return -ENOMEM;
11296
11297         tx_data = skb_put(skb, tx_len);
11298         memcpy(tx_data, tp->dev->dev_addr, 6);
11299         memset(tx_data + 6, 0x0, 8);
11300
11301         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11302
11303         if (loopback_mode == TG3_TSO_LOOPBACK) {
11304                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11305
11306                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11307                               TG3_TSO_TCP_OPT_LEN;
11308
11309                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11310                        sizeof(tg3_tso_header));
11311                 mss = TG3_TSO_MSS;
11312
11313                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11314                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11315
11316                 /* Set the total length field in the IP header */
11317                 iph->tot_len = htons((u16)(mss + hdr_len));
11318
11319                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11320                               TXD_FLAG_CPU_POST_DMA);
11321
11322                 if (tg3_flag(tp, HW_TSO_1) ||
11323                     tg3_flag(tp, HW_TSO_2) ||
11324                     tg3_flag(tp, HW_TSO_3)) {
11325                         struct tcphdr *th;
11326                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11327                         th = (struct tcphdr *)&tx_data[val];
11328                         th->check = 0;
11329                 } else
11330                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11331
11332                 if (tg3_flag(tp, HW_TSO_3)) {
11333                         mss |= (hdr_len & 0xc) << 12;
11334                         if (hdr_len & 0x10)
11335                                 base_flags |= 0x00000010;
11336                         base_flags |= (hdr_len & 0x3e0) << 5;
11337                 } else if (tg3_flag(tp, HW_TSO_2))
11338                         mss |= hdr_len << 9;
11339                 else if (tg3_flag(tp, HW_TSO_1) ||
11340                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11341                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11342                 } else {
11343                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11344                 }
11345
11346                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11347         } else {
11348                 num_pkts = 1;
11349                 data_off = ETH_HLEN;
11350         }
11351
11352         for (i = data_off; i < tx_len; i++)
11353                 tx_data[i] = (u8) (i & 0xff);
11354
11355         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11356         if (pci_dma_mapping_error(tp->pdev, map)) {
11357                 dev_kfree_skb(skb);
11358                 return -EIO;
11359         }
11360
11361         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11362                rnapi->coal_now);
11363
11364         udelay(10);
11365
11366         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11367
11368         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11369                     base_flags, (mss << 1) | 1);
11370
11371         tnapi->tx_prod++;
11372
11373         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11374         tr32_mailbox(tnapi->prodmbox);
11375
11376         udelay(10);
11377
11378         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11379         for (i = 0; i < 35; i++) {
11380                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11381                        coal_now);
11382
11383                 udelay(10);
11384
11385                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11386                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11387                 if ((tx_idx == tnapi->tx_prod) &&
11388                     (rx_idx == (rx_start_idx + num_pkts)))
11389                         break;
11390         }
11391
11392         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11393         dev_kfree_skb(skb);
11394
11395         if (tx_idx != tnapi->tx_prod)
11396                 goto out;
11397
11398         if (rx_idx != rx_start_idx + num_pkts)
11399                 goto out;
11400
11401         val = data_off;
11402         while (rx_idx != rx_start_idx) {
11403                 desc = &rnapi->rx_rcb[rx_start_idx++];
11404                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11405                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11406
11407                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11408                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11409                         goto out;
11410
11411                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11412                          - ETH_FCS_LEN;
11413
11414                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11415                         if (rx_len != tx_len)
11416                                 goto out;
11417
11418                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11419                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11420                                         goto out;
11421                         } else {
11422                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11423                                         goto out;
11424                         }
11425                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11426                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11427                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11428                         goto out;
11429                 }
11430
11431                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11432                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11433                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11434                                              mapping);
11435                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11436                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11437                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11438                                              mapping);
11439                 } else
11440                         goto out;
11441
11442                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11443                                             PCI_DMA_FROMDEVICE);
11444
11445                 for (i = data_off; i < rx_len; i++, val++) {
11446                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11447                                 goto out;
11448                 }
11449         }
11450
11451         err = 0;
11452
11453         /* tg3_free_rings will unmap and free the rx_skb */
11454 out:
11455         return err;
11456 }
11457
11458 #define TG3_STD_LOOPBACK_FAILED         1
11459 #define TG3_JMB_LOOPBACK_FAILED         2
11460 #define TG3_TSO_LOOPBACK_FAILED         4
11461
11462 #define TG3_MAC_LOOPBACK_SHIFT          0
11463 #define TG3_PHY_LOOPBACK_SHIFT          4
11464 #define TG3_LOOPBACK_FAILED             0x00000077
11465
11466 static int tg3_test_loopback(struct tg3 *tp)
11467 {
11468         int err = 0;
11469         u32 eee_cap, cpmuctrl = 0;
11470
11471         if (!netif_running(tp->dev))
11472                 return TG3_LOOPBACK_FAILED;
11473
11474         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11475         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11476
11477         err = tg3_reset_hw(tp, 1);
11478         if (err) {
11479                 err = TG3_LOOPBACK_FAILED;
11480                 goto done;
11481         }
11482
11483         if (tg3_flag(tp, ENABLE_RSS)) {
11484                 int i;
11485
11486                 /* Reroute all rx packets to the 1st queue */
11487                 for (i = MAC_RSS_INDIR_TBL_0;
11488                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11489                         tw32(i, 0x0);
11490         }
11491
11492         /* Turn off gphy autopowerdown. */
11493         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11494                 tg3_phy_toggle_apd(tp, false);
11495
11496         if (tg3_flag(tp, CPMU_PRESENT)) {
11497                 int i;
11498                 u32 status;
11499
11500                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11501
11502                 /* Wait for up to 40 microseconds to acquire lock. */
11503                 for (i = 0; i < 4; i++) {
11504                         status = tr32(TG3_CPMU_MUTEX_GNT);
11505                         if (status == CPMU_MUTEX_GNT_DRIVER)
11506                                 break;
11507                         udelay(10);
11508                 }
11509
11510                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11511                         err = TG3_LOOPBACK_FAILED;
11512                         goto done;
11513                 }
11514
11515                 /* Turn off link-based power management. */
11516                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11517                 tw32(TG3_CPMU_CTRL,
11518                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11519                                   CPMU_CTRL_LINK_AWARE_MODE));
11520         }
11521
11522         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11523                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11524
11525         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11526             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11527                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11528
11529         if (tg3_flag(tp, CPMU_PRESENT)) {
11530                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11531
11532                 /* Release the mutex */
11533                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11534         }
11535
11536         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11537             !tg3_flag(tp, USE_PHYLIB)) {
11538                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11539                         err |= TG3_STD_LOOPBACK_FAILED <<
11540                                TG3_PHY_LOOPBACK_SHIFT;
11541                 if (tg3_flag(tp, TSO_CAPABLE) &&
11542                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11543                         err |= TG3_TSO_LOOPBACK_FAILED <<
11544                                TG3_PHY_LOOPBACK_SHIFT;
11545                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11546                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11547                         err |= TG3_JMB_LOOPBACK_FAILED <<
11548                                TG3_PHY_LOOPBACK_SHIFT;
11549         }
11550
11551         /* Re-enable gphy autopowerdown. */
11552         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11553                 tg3_phy_toggle_apd(tp, true);
11554
11555 done:
11556         tp->phy_flags |= eee_cap;
11557
11558         return err;
11559 }
11560
11561 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11562                           u64 *data)
11563 {
11564         struct tg3 *tp = netdev_priv(dev);
11565
11566         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11567             tg3_power_up(tp)) {
11568                 etest->flags |= ETH_TEST_FL_FAILED;
11569                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11570                 return;
11571         }
11572
11573         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11574
11575         if (tg3_test_nvram(tp) != 0) {
11576                 etest->flags |= ETH_TEST_FL_FAILED;
11577                 data[0] = 1;
11578         }
11579         if (tg3_test_link(tp) != 0) {
11580                 etest->flags |= ETH_TEST_FL_FAILED;
11581                 data[1] = 1;
11582         }
11583         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11584                 int err, err2 = 0, irq_sync = 0;
11585
11586                 if (netif_running(dev)) {
11587                         tg3_phy_stop(tp);
11588                         tg3_netif_stop(tp);
11589                         irq_sync = 1;
11590                 }
11591
11592                 tg3_full_lock(tp, irq_sync);
11593
11594                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11595                 err = tg3_nvram_lock(tp);
11596                 tg3_halt_cpu(tp, RX_CPU_BASE);
11597                 if (!tg3_flag(tp, 5705_PLUS))
11598                         tg3_halt_cpu(tp, TX_CPU_BASE);
11599                 if (!err)
11600                         tg3_nvram_unlock(tp);
11601
11602                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11603                         tg3_phy_reset(tp);
11604
11605                 if (tg3_test_registers(tp) != 0) {
11606                         etest->flags |= ETH_TEST_FL_FAILED;
11607                         data[2] = 1;
11608                 }
11609                 if (tg3_test_memory(tp) != 0) {
11610                         etest->flags |= ETH_TEST_FL_FAILED;
11611                         data[3] = 1;
11612                 }
11613                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11614                         etest->flags |= ETH_TEST_FL_FAILED;
11615
11616                 tg3_full_unlock(tp);
11617
11618                 if (tg3_test_interrupt(tp) != 0) {
11619                         etest->flags |= ETH_TEST_FL_FAILED;
11620                         data[5] = 1;
11621                 }
11622
11623                 tg3_full_lock(tp, 0);
11624
11625                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11626                 if (netif_running(dev)) {
11627                         tg3_flag_set(tp, INIT_COMPLETE);
11628                         err2 = tg3_restart_hw(tp, 1);
11629                         if (!err2)
11630                                 tg3_netif_start(tp);
11631                 }
11632
11633                 tg3_full_unlock(tp);
11634
11635                 if (irq_sync && !err2)
11636                         tg3_phy_start(tp);
11637         }
11638         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11639                 tg3_power_down(tp);
11640
11641 }
11642
11643 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11644 {
11645         struct mii_ioctl_data *data = if_mii(ifr);
11646         struct tg3 *tp = netdev_priv(dev);
11647         int err;
11648
11649         if (tg3_flag(tp, USE_PHYLIB)) {
11650                 struct phy_device *phydev;
11651                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11652                         return -EAGAIN;
11653                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11654                 return phy_mii_ioctl(phydev, ifr, cmd);
11655         }
11656
11657         switch (cmd) {
11658         case SIOCGMIIPHY:
11659                 data->phy_id = tp->phy_addr;
11660
11661                 /* fallthru */
11662         case SIOCGMIIREG: {
11663                 u32 mii_regval;
11664
11665                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11666                         break;                  /* We have no PHY */
11667
11668                 if (!netif_running(dev))
11669                         return -EAGAIN;
11670
11671                 spin_lock_bh(&tp->lock);
11672                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11673                 spin_unlock_bh(&tp->lock);
11674
11675                 data->val_out = mii_regval;
11676
11677                 return err;
11678         }
11679
11680         case SIOCSMIIREG:
11681                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11682                         break;                  /* We have no PHY */
11683
11684                 if (!netif_running(dev))
11685                         return -EAGAIN;
11686
11687                 spin_lock_bh(&tp->lock);
11688                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11689                 spin_unlock_bh(&tp->lock);
11690
11691                 return err;
11692
11693         default:
11694                 /* do nothing */
11695                 break;
11696         }
11697         return -EOPNOTSUPP;
11698 }
11699
11700 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11701 {
11702         struct tg3 *tp = netdev_priv(dev);
11703
11704         memcpy(ec, &tp->coal, sizeof(*ec));
11705         return 0;
11706 }
11707
11708 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11709 {
11710         struct tg3 *tp = netdev_priv(dev);
11711         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11712         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11713
11714         if (!tg3_flag(tp, 5705_PLUS)) {
11715                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11716                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11717                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11718                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11719         }
11720
11721         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11722             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11723             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11724             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11725             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11726             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11727             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11728             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11729             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11730             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11731                 return -EINVAL;
11732
11733         /* No rx interrupts will be generated if both are zero */
11734         if ((ec->rx_coalesce_usecs == 0) &&
11735             (ec->rx_max_coalesced_frames == 0))
11736                 return -EINVAL;
11737
11738         /* No tx interrupts will be generated if both are zero */
11739         if ((ec->tx_coalesce_usecs == 0) &&
11740             (ec->tx_max_coalesced_frames == 0))
11741                 return -EINVAL;
11742
11743         /* Only copy relevant parameters, ignore all others. */
11744         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11745         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11746         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11747         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11748         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11749         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11750         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11751         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11752         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11753
11754         if (netif_running(dev)) {
11755                 tg3_full_lock(tp, 0);
11756                 __tg3_set_coalesce(tp, &tp->coal);
11757                 tg3_full_unlock(tp);
11758         }
11759         return 0;
11760 }
11761
11762 static const struct ethtool_ops tg3_ethtool_ops = {
11763         .get_settings           = tg3_get_settings,
11764         .set_settings           = tg3_set_settings,
11765         .get_drvinfo            = tg3_get_drvinfo,
11766         .get_regs_len           = tg3_get_regs_len,
11767         .get_regs               = tg3_get_regs,
11768         .get_wol                = tg3_get_wol,
11769         .set_wol                = tg3_set_wol,
11770         .get_msglevel           = tg3_get_msglevel,
11771         .set_msglevel           = tg3_set_msglevel,
11772         .nway_reset             = tg3_nway_reset,
11773         .get_link               = ethtool_op_get_link,
11774         .get_eeprom_len         = tg3_get_eeprom_len,
11775         .get_eeprom             = tg3_get_eeprom,
11776         .set_eeprom             = tg3_set_eeprom,
11777         .get_ringparam          = tg3_get_ringparam,
11778         .set_ringparam          = tg3_set_ringparam,
11779         .get_pauseparam         = tg3_get_pauseparam,
11780         .set_pauseparam         = tg3_set_pauseparam,
11781         .self_test              = tg3_self_test,
11782         .get_strings            = tg3_get_strings,
11783         .set_phys_id            = tg3_set_phys_id,
11784         .get_ethtool_stats      = tg3_get_ethtool_stats,
11785         .get_coalesce           = tg3_get_coalesce,
11786         .set_coalesce           = tg3_set_coalesce,
11787         .get_sset_count         = tg3_get_sset_count,
11788 };
11789
11790 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11791 {
11792         u32 cursize, val, magic;
11793
11794         tp->nvram_size = EEPROM_CHIP_SIZE;
11795
11796         if (tg3_nvram_read(tp, 0, &magic) != 0)
11797                 return;
11798
11799         if ((magic != TG3_EEPROM_MAGIC) &&
11800             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11801             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11802                 return;
11803
11804         /*
11805          * Size the chip by reading offsets at increasing powers of two.
11806          * When we encounter our validation signature, we know the addressing
11807          * has wrapped around, and thus have our chip size.
11808          */
11809         cursize = 0x10;
11810
11811         while (cursize < tp->nvram_size) {
11812                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11813                         return;
11814
11815                 if (val == magic)
11816                         break;
11817
11818                 cursize <<= 1;
11819         }
11820
11821         tp->nvram_size = cursize;
11822 }
11823
11824 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11825 {
11826         u32 val;
11827
11828         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11829                 return;
11830
11831         /* Selfboot format */
11832         if (val != TG3_EEPROM_MAGIC) {
11833                 tg3_get_eeprom_size(tp);
11834                 return;
11835         }
11836
11837         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11838                 if (val != 0) {
11839                         /* This is confusing.  We want to operate on the
11840                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11841                          * call will read from NVRAM and byteswap the data
11842                          * according to the byteswapping settings for all
11843                          * other register accesses.  This ensures the data we
11844                          * want will always reside in the lower 16-bits.
11845                          * However, the data in NVRAM is in LE format, which
11846                          * means the data from the NVRAM read will always be
11847                          * opposite the endianness of the CPU.  The 16-bit
11848                          * byteswap then brings the data to CPU endianness.
11849                          */
11850                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11851                         return;
11852                 }
11853         }
11854         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11855 }
11856
11857 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11858 {
11859         u32 nvcfg1;
11860
11861         nvcfg1 = tr32(NVRAM_CFG1);
11862         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11863                 tg3_flag_set(tp, FLASH);
11864         } else {
11865                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11866                 tw32(NVRAM_CFG1, nvcfg1);
11867         }
11868
11869         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11870             tg3_flag(tp, 5780_CLASS)) {
11871                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11872                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11873                         tp->nvram_jedecnum = JEDEC_ATMEL;
11874                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11875                         tg3_flag_set(tp, NVRAM_BUFFERED);
11876                         break;
11877                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11878                         tp->nvram_jedecnum = JEDEC_ATMEL;
11879                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11880                         break;
11881                 case FLASH_VENDOR_ATMEL_EEPROM:
11882                         tp->nvram_jedecnum = JEDEC_ATMEL;
11883                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11884                         tg3_flag_set(tp, NVRAM_BUFFERED);
11885                         break;
11886                 case FLASH_VENDOR_ST:
11887                         tp->nvram_jedecnum = JEDEC_ST;
11888                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11889                         tg3_flag_set(tp, NVRAM_BUFFERED);
11890                         break;
11891                 case FLASH_VENDOR_SAIFUN:
11892                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11893                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11894                         break;
11895                 case FLASH_VENDOR_SST_SMALL:
11896                 case FLASH_VENDOR_SST_LARGE:
11897                         tp->nvram_jedecnum = JEDEC_SST;
11898                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11899                         break;
11900                 }
11901         } else {
11902                 tp->nvram_jedecnum = JEDEC_ATMEL;
11903                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11904                 tg3_flag_set(tp, NVRAM_BUFFERED);
11905         }
11906 }
11907
11908 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11909 {
11910         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11911         case FLASH_5752PAGE_SIZE_256:
11912                 tp->nvram_pagesize = 256;
11913                 break;
11914         case FLASH_5752PAGE_SIZE_512:
11915                 tp->nvram_pagesize = 512;
11916                 break;
11917         case FLASH_5752PAGE_SIZE_1K:
11918                 tp->nvram_pagesize = 1024;
11919                 break;
11920         case FLASH_5752PAGE_SIZE_2K:
11921                 tp->nvram_pagesize = 2048;
11922                 break;
11923         case FLASH_5752PAGE_SIZE_4K:
11924                 tp->nvram_pagesize = 4096;
11925                 break;
11926         case FLASH_5752PAGE_SIZE_264:
11927                 tp->nvram_pagesize = 264;
11928                 break;
11929         case FLASH_5752PAGE_SIZE_528:
11930                 tp->nvram_pagesize = 528;
11931                 break;
11932         }
11933 }
11934
11935 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11936 {
11937         u32 nvcfg1;
11938
11939         nvcfg1 = tr32(NVRAM_CFG1);
11940
11941         /* NVRAM protection for TPM */
11942         if (nvcfg1 & (1 << 27))
11943                 tg3_flag_set(tp, PROTECTED_NVRAM);
11944
11945         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11946         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11947         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11948                 tp->nvram_jedecnum = JEDEC_ATMEL;
11949                 tg3_flag_set(tp, NVRAM_BUFFERED);
11950                 break;
11951         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11952                 tp->nvram_jedecnum = JEDEC_ATMEL;
11953                 tg3_flag_set(tp, NVRAM_BUFFERED);
11954                 tg3_flag_set(tp, FLASH);
11955                 break;
11956         case FLASH_5752VENDOR_ST_M45PE10:
11957         case FLASH_5752VENDOR_ST_M45PE20:
11958         case FLASH_5752VENDOR_ST_M45PE40:
11959                 tp->nvram_jedecnum = JEDEC_ST;
11960                 tg3_flag_set(tp, NVRAM_BUFFERED);
11961                 tg3_flag_set(tp, FLASH);
11962                 break;
11963         }
11964
11965         if (tg3_flag(tp, FLASH)) {
11966                 tg3_nvram_get_pagesize(tp, nvcfg1);
11967         } else {
11968                 /* For eeprom, set pagesize to maximum eeprom size */
11969                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11970
11971                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11972                 tw32(NVRAM_CFG1, nvcfg1);
11973         }
11974 }
11975
11976 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11977 {
11978         u32 nvcfg1, protect = 0;
11979
11980         nvcfg1 = tr32(NVRAM_CFG1);
11981
11982         /* NVRAM protection for TPM */
11983         if (nvcfg1 & (1 << 27)) {
11984                 tg3_flag_set(tp, PROTECTED_NVRAM);
11985                 protect = 1;
11986         }
11987
11988         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11989         switch (nvcfg1) {
11990         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11991         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11992         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11993         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11994                 tp->nvram_jedecnum = JEDEC_ATMEL;
11995                 tg3_flag_set(tp, NVRAM_BUFFERED);
11996                 tg3_flag_set(tp, FLASH);
11997                 tp->nvram_pagesize = 264;
11998                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11999                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12000                         tp->nvram_size = (protect ? 0x3e200 :
12001                                           TG3_NVRAM_SIZE_512KB);
12002                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12003                         tp->nvram_size = (protect ? 0x1f200 :
12004                                           TG3_NVRAM_SIZE_256KB);
12005                 else
12006                         tp->nvram_size = (protect ? 0x1f200 :
12007                                           TG3_NVRAM_SIZE_128KB);
12008                 break;
12009         case FLASH_5752VENDOR_ST_M45PE10:
12010         case FLASH_5752VENDOR_ST_M45PE20:
12011         case FLASH_5752VENDOR_ST_M45PE40:
12012                 tp->nvram_jedecnum = JEDEC_ST;
12013                 tg3_flag_set(tp, NVRAM_BUFFERED);
12014                 tg3_flag_set(tp, FLASH);
12015                 tp->nvram_pagesize = 256;
12016                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12017                         tp->nvram_size = (protect ?
12018                                           TG3_NVRAM_SIZE_64KB :
12019                                           TG3_NVRAM_SIZE_128KB);
12020                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12021                         tp->nvram_size = (protect ?
12022                                           TG3_NVRAM_SIZE_64KB :
12023                                           TG3_NVRAM_SIZE_256KB);
12024                 else
12025                         tp->nvram_size = (protect ?
12026                                           TG3_NVRAM_SIZE_128KB :
12027                                           TG3_NVRAM_SIZE_512KB);
12028                 break;
12029         }
12030 }
12031
12032 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12033 {
12034         u32 nvcfg1;
12035
12036         nvcfg1 = tr32(NVRAM_CFG1);
12037
12038         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12039         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12040         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12041         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12042         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12043                 tp->nvram_jedecnum = JEDEC_ATMEL;
12044                 tg3_flag_set(tp, NVRAM_BUFFERED);
12045                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12046
12047                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12048                 tw32(NVRAM_CFG1, nvcfg1);
12049                 break;
12050         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12051         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12052         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12053         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12054                 tp->nvram_jedecnum = JEDEC_ATMEL;
12055                 tg3_flag_set(tp, NVRAM_BUFFERED);
12056                 tg3_flag_set(tp, FLASH);
12057                 tp->nvram_pagesize = 264;
12058                 break;
12059         case FLASH_5752VENDOR_ST_M45PE10:
12060         case FLASH_5752VENDOR_ST_M45PE20:
12061         case FLASH_5752VENDOR_ST_M45PE40:
12062                 tp->nvram_jedecnum = JEDEC_ST;
12063                 tg3_flag_set(tp, NVRAM_BUFFERED);
12064                 tg3_flag_set(tp, FLASH);
12065                 tp->nvram_pagesize = 256;
12066                 break;
12067         }
12068 }
12069
12070 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12071 {
12072         u32 nvcfg1, protect = 0;
12073
12074         nvcfg1 = tr32(NVRAM_CFG1);
12075
12076         /* NVRAM protection for TPM */
12077         if (nvcfg1 & (1 << 27)) {
12078                 tg3_flag_set(tp, PROTECTED_NVRAM);
12079                 protect = 1;
12080         }
12081
12082         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12083         switch (nvcfg1) {
12084         case FLASH_5761VENDOR_ATMEL_ADB021D:
12085         case FLASH_5761VENDOR_ATMEL_ADB041D:
12086         case FLASH_5761VENDOR_ATMEL_ADB081D:
12087         case FLASH_5761VENDOR_ATMEL_ADB161D:
12088         case FLASH_5761VENDOR_ATMEL_MDB021D:
12089         case FLASH_5761VENDOR_ATMEL_MDB041D:
12090         case FLASH_5761VENDOR_ATMEL_MDB081D:
12091         case FLASH_5761VENDOR_ATMEL_MDB161D:
12092                 tp->nvram_jedecnum = JEDEC_ATMEL;
12093                 tg3_flag_set(tp, NVRAM_BUFFERED);
12094                 tg3_flag_set(tp, FLASH);
12095                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12096                 tp->nvram_pagesize = 256;
12097                 break;
12098         case FLASH_5761VENDOR_ST_A_M45PE20:
12099         case FLASH_5761VENDOR_ST_A_M45PE40:
12100         case FLASH_5761VENDOR_ST_A_M45PE80:
12101         case FLASH_5761VENDOR_ST_A_M45PE16:
12102         case FLASH_5761VENDOR_ST_M_M45PE20:
12103         case FLASH_5761VENDOR_ST_M_M45PE40:
12104         case FLASH_5761VENDOR_ST_M_M45PE80:
12105         case FLASH_5761VENDOR_ST_M_M45PE16:
12106                 tp->nvram_jedecnum = JEDEC_ST;
12107                 tg3_flag_set(tp, NVRAM_BUFFERED);
12108                 tg3_flag_set(tp, FLASH);
12109                 tp->nvram_pagesize = 256;
12110                 break;
12111         }
12112
12113         if (protect) {
12114                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12115         } else {
12116                 switch (nvcfg1) {
12117                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12118                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12119                 case FLASH_5761VENDOR_ST_A_M45PE16:
12120                 case FLASH_5761VENDOR_ST_M_M45PE16:
12121                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12122                         break;
12123                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12124                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12125                 case FLASH_5761VENDOR_ST_A_M45PE80:
12126                 case FLASH_5761VENDOR_ST_M_M45PE80:
12127                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12128                         break;
12129                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12130                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12131                 case FLASH_5761VENDOR_ST_A_M45PE40:
12132                 case FLASH_5761VENDOR_ST_M_M45PE40:
12133                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12134                         break;
12135                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12136                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12137                 case FLASH_5761VENDOR_ST_A_M45PE20:
12138                 case FLASH_5761VENDOR_ST_M_M45PE20:
12139                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12140                         break;
12141                 }
12142         }
12143 }
12144
12145 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12146 {
12147         tp->nvram_jedecnum = JEDEC_ATMEL;
12148         tg3_flag_set(tp, NVRAM_BUFFERED);
12149         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12150 }
12151
12152 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12153 {
12154         u32 nvcfg1;
12155
12156         nvcfg1 = tr32(NVRAM_CFG1);
12157
12158         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12159         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12160         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12161                 tp->nvram_jedecnum = JEDEC_ATMEL;
12162                 tg3_flag_set(tp, NVRAM_BUFFERED);
12163                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12164
12165                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12166                 tw32(NVRAM_CFG1, nvcfg1);
12167                 return;
12168         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12169         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12170         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12171         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12172         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12173         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12174         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12175                 tp->nvram_jedecnum = JEDEC_ATMEL;
12176                 tg3_flag_set(tp, NVRAM_BUFFERED);
12177                 tg3_flag_set(tp, FLASH);
12178
12179                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12180                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12181                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12182                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12183                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12184                         break;
12185                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12186                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12187                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12188                         break;
12189                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12190                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12191                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12192                         break;
12193                 }
12194                 break;
12195         case FLASH_5752VENDOR_ST_M45PE10:
12196         case FLASH_5752VENDOR_ST_M45PE20:
12197         case FLASH_5752VENDOR_ST_M45PE40:
12198                 tp->nvram_jedecnum = JEDEC_ST;
12199                 tg3_flag_set(tp, NVRAM_BUFFERED);
12200                 tg3_flag_set(tp, FLASH);
12201
12202                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12203                 case FLASH_5752VENDOR_ST_M45PE10:
12204                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12205                         break;
12206                 case FLASH_5752VENDOR_ST_M45PE20:
12207                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12208                         break;
12209                 case FLASH_5752VENDOR_ST_M45PE40:
12210                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12211                         break;
12212                 }
12213                 break;
12214         default:
12215                 tg3_flag_set(tp, NO_NVRAM);
12216                 return;
12217         }
12218
12219         tg3_nvram_get_pagesize(tp, nvcfg1);
12220         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12221                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12222 }
12223
12224
12225 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12226 {
12227         u32 nvcfg1;
12228
12229         nvcfg1 = tr32(NVRAM_CFG1);
12230
12231         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12232         case FLASH_5717VENDOR_ATMEL_EEPROM:
12233         case FLASH_5717VENDOR_MICRO_EEPROM:
12234                 tp->nvram_jedecnum = JEDEC_ATMEL;
12235                 tg3_flag_set(tp, NVRAM_BUFFERED);
12236                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12237
12238                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12239                 tw32(NVRAM_CFG1, nvcfg1);
12240                 return;
12241         case FLASH_5717VENDOR_ATMEL_MDB011D:
12242         case FLASH_5717VENDOR_ATMEL_ADB011B:
12243         case FLASH_5717VENDOR_ATMEL_ADB011D:
12244         case FLASH_5717VENDOR_ATMEL_MDB021D:
12245         case FLASH_5717VENDOR_ATMEL_ADB021B:
12246         case FLASH_5717VENDOR_ATMEL_ADB021D:
12247         case FLASH_5717VENDOR_ATMEL_45USPT:
12248                 tp->nvram_jedecnum = JEDEC_ATMEL;
12249                 tg3_flag_set(tp, NVRAM_BUFFERED);
12250                 tg3_flag_set(tp, FLASH);
12251
12252                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12253                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12254                         /* Detect size with tg3_nvram_get_size() */
12255                         break;
12256                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12257                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12258                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12259                         break;
12260                 default:
12261                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12262                         break;
12263                 }
12264                 break;
12265         case FLASH_5717VENDOR_ST_M_M25PE10:
12266         case FLASH_5717VENDOR_ST_A_M25PE10:
12267         case FLASH_5717VENDOR_ST_M_M45PE10:
12268         case FLASH_5717VENDOR_ST_A_M45PE10:
12269         case FLASH_5717VENDOR_ST_M_M25PE20:
12270         case FLASH_5717VENDOR_ST_A_M25PE20:
12271         case FLASH_5717VENDOR_ST_M_M45PE20:
12272         case FLASH_5717VENDOR_ST_A_M45PE20:
12273         case FLASH_5717VENDOR_ST_25USPT:
12274         case FLASH_5717VENDOR_ST_45USPT:
12275                 tp->nvram_jedecnum = JEDEC_ST;
12276                 tg3_flag_set(tp, NVRAM_BUFFERED);
12277                 tg3_flag_set(tp, FLASH);
12278
12279                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12280                 case FLASH_5717VENDOR_ST_M_M25PE20:
12281                 case FLASH_5717VENDOR_ST_M_M45PE20:
12282                         /* Detect size with tg3_nvram_get_size() */
12283                         break;
12284                 case FLASH_5717VENDOR_ST_A_M25PE20:
12285                 case FLASH_5717VENDOR_ST_A_M45PE20:
12286                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12287                         break;
12288                 default:
12289                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12290                         break;
12291                 }
12292                 break;
12293         default:
12294                 tg3_flag_set(tp, NO_NVRAM);
12295                 return;
12296         }
12297
12298         tg3_nvram_get_pagesize(tp, nvcfg1);
12299         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12300                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12301 }
12302
12303 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12304 {
12305         u32 nvcfg1, nvmpinstrp;
12306
12307         nvcfg1 = tr32(NVRAM_CFG1);
12308         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12309
12310         switch (nvmpinstrp) {
12311         case FLASH_5720_EEPROM_HD:
12312         case FLASH_5720_EEPROM_LD:
12313                 tp->nvram_jedecnum = JEDEC_ATMEL;
12314                 tg3_flag_set(tp, NVRAM_BUFFERED);
12315
12316                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12317                 tw32(NVRAM_CFG1, nvcfg1);
12318                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12319                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12320                 else
12321                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12322                 return;
12323         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12324         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12325         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12326         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12327         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12328         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12329         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12330         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12331         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12332         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12333         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12334         case FLASH_5720VENDOR_ATMEL_45USPT:
12335                 tp->nvram_jedecnum = JEDEC_ATMEL;
12336                 tg3_flag_set(tp, NVRAM_BUFFERED);
12337                 tg3_flag_set(tp, FLASH);
12338
12339                 switch (nvmpinstrp) {
12340                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12341                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12342                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12343                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12344                         break;
12345                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12346                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12347                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12348                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12349                         break;
12350                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12351                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12352                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12353                         break;
12354                 default:
12355                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12356                         break;
12357                 }
12358                 break;
12359         case FLASH_5720VENDOR_M_ST_M25PE10:
12360         case FLASH_5720VENDOR_M_ST_M45PE10:
12361         case FLASH_5720VENDOR_A_ST_M25PE10:
12362         case FLASH_5720VENDOR_A_ST_M45PE10:
12363         case FLASH_5720VENDOR_M_ST_M25PE20:
12364         case FLASH_5720VENDOR_M_ST_M45PE20:
12365         case FLASH_5720VENDOR_A_ST_M25PE20:
12366         case FLASH_5720VENDOR_A_ST_M45PE20:
12367         case FLASH_5720VENDOR_M_ST_M25PE40:
12368         case FLASH_5720VENDOR_M_ST_M45PE40:
12369         case FLASH_5720VENDOR_A_ST_M25PE40:
12370         case FLASH_5720VENDOR_A_ST_M45PE40:
12371         case FLASH_5720VENDOR_M_ST_M25PE80:
12372         case FLASH_5720VENDOR_M_ST_M45PE80:
12373         case FLASH_5720VENDOR_A_ST_M25PE80:
12374         case FLASH_5720VENDOR_A_ST_M45PE80:
12375         case FLASH_5720VENDOR_ST_25USPT:
12376         case FLASH_5720VENDOR_ST_45USPT:
12377                 tp->nvram_jedecnum = JEDEC_ST;
12378                 tg3_flag_set(tp, NVRAM_BUFFERED);
12379                 tg3_flag_set(tp, FLASH);
12380
12381                 switch (nvmpinstrp) {
12382                 case FLASH_5720VENDOR_M_ST_M25PE20:
12383                 case FLASH_5720VENDOR_M_ST_M45PE20:
12384                 case FLASH_5720VENDOR_A_ST_M25PE20:
12385                 case FLASH_5720VENDOR_A_ST_M45PE20:
12386                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12387                         break;
12388                 case FLASH_5720VENDOR_M_ST_M25PE40:
12389                 case FLASH_5720VENDOR_M_ST_M45PE40:
12390                 case FLASH_5720VENDOR_A_ST_M25PE40:
12391                 case FLASH_5720VENDOR_A_ST_M45PE40:
12392                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12393                         break;
12394                 case FLASH_5720VENDOR_M_ST_M25PE80:
12395                 case FLASH_5720VENDOR_M_ST_M45PE80:
12396                 case FLASH_5720VENDOR_A_ST_M25PE80:
12397                 case FLASH_5720VENDOR_A_ST_M45PE80:
12398                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12399                         break;
12400                 default:
12401                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12402                         break;
12403                 }
12404                 break;
12405         default:
12406                 tg3_flag_set(tp, NO_NVRAM);
12407                 return;
12408         }
12409
12410         tg3_nvram_get_pagesize(tp, nvcfg1);
12411         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12412                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12413 }
12414
12415 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12416 static void __devinit tg3_nvram_init(struct tg3 *tp)
12417 {
12418         tw32_f(GRC_EEPROM_ADDR,
12419              (EEPROM_ADDR_FSM_RESET |
12420               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12421                EEPROM_ADDR_CLKPERD_SHIFT)));
12422
12423         msleep(1);
12424
12425         /* Enable seeprom accesses. */
12426         tw32_f(GRC_LOCAL_CTRL,
12427              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12428         udelay(100);
12429
12430         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12431             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12432                 tg3_flag_set(tp, NVRAM);
12433
12434                 if (tg3_nvram_lock(tp)) {
12435                         netdev_warn(tp->dev,
12436                                     "Cannot get nvram lock, %s failed\n",
12437                                     __func__);
12438                         return;
12439                 }
12440                 tg3_enable_nvram_access(tp);
12441
12442                 tp->nvram_size = 0;
12443
12444                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12445                         tg3_get_5752_nvram_info(tp);
12446                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12447                         tg3_get_5755_nvram_info(tp);
12448                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12449                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12450                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12451                         tg3_get_5787_nvram_info(tp);
12452                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12453                         tg3_get_5761_nvram_info(tp);
12454                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12455                         tg3_get_5906_nvram_info(tp);
12456                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12457                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12458                         tg3_get_57780_nvram_info(tp);
12459                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12460                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12461                         tg3_get_5717_nvram_info(tp);
12462                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12463                         tg3_get_5720_nvram_info(tp);
12464                 else
12465                         tg3_get_nvram_info(tp);
12466
12467                 if (tp->nvram_size == 0)
12468                         tg3_get_nvram_size(tp);
12469
12470                 tg3_disable_nvram_access(tp);
12471                 tg3_nvram_unlock(tp);
12472
12473         } else {
12474                 tg3_flag_clear(tp, NVRAM);
12475                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12476
12477                 tg3_get_eeprom_size(tp);
12478         }
12479 }
12480
12481 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12482                                     u32 offset, u32 len, u8 *buf)
12483 {
12484         int i, j, rc = 0;
12485         u32 val;
12486
12487         for (i = 0; i < len; i += 4) {
12488                 u32 addr;
12489                 __be32 data;
12490
12491                 addr = offset + i;
12492
12493                 memcpy(&data, buf + i, 4);
12494
12495                 /*
12496                  * The SEEPROM interface expects the data to always be opposite
12497                  * the native endian format.  We accomplish this by reversing
12498                  * all the operations that would have been performed on the
12499                  * data from a call to tg3_nvram_read_be32().
12500                  */
12501                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12502
12503                 val = tr32(GRC_EEPROM_ADDR);
12504                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12505
12506                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12507                         EEPROM_ADDR_READ);
12508                 tw32(GRC_EEPROM_ADDR, val |
12509                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12510                         (addr & EEPROM_ADDR_ADDR_MASK) |
12511                         EEPROM_ADDR_START |
12512                         EEPROM_ADDR_WRITE);
12513
12514                 for (j = 0; j < 1000; j++) {
12515                         val = tr32(GRC_EEPROM_ADDR);
12516
12517                         if (val & EEPROM_ADDR_COMPLETE)
12518                                 break;
12519                         msleep(1);
12520                 }
12521                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12522                         rc = -EBUSY;
12523                         break;
12524                 }
12525         }
12526
12527         return rc;
12528 }
12529
12530 /* offset and length are dword aligned */
12531 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12532                 u8 *buf)
12533 {
12534         int ret = 0;
12535         u32 pagesize = tp->nvram_pagesize;
12536         u32 pagemask = pagesize - 1;
12537         u32 nvram_cmd;
12538         u8 *tmp;
12539
12540         tmp = kmalloc(pagesize, GFP_KERNEL);
12541         if (tmp == NULL)
12542                 return -ENOMEM;
12543
12544         while (len) {
12545                 int j;
12546                 u32 phy_addr, page_off, size;
12547
12548                 phy_addr = offset & ~pagemask;
12549
12550                 for (j = 0; j < pagesize; j += 4) {
12551                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12552                                                   (__be32 *) (tmp + j));
12553                         if (ret)
12554                                 break;
12555                 }
12556                 if (ret)
12557                         break;
12558
12559                 page_off = offset & pagemask;
12560                 size = pagesize;
12561                 if (len < size)
12562                         size = len;
12563
12564                 len -= size;
12565
12566                 memcpy(tmp + page_off, buf, size);
12567
12568                 offset = offset + (pagesize - page_off);
12569
12570                 tg3_enable_nvram_access(tp);
12571
12572                 /*
12573                  * Before we can erase the flash page, we need
12574                  * to issue a special "write enable" command.
12575                  */
12576                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12577
12578                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12579                         break;
12580
12581                 /* Erase the target page */
12582                 tw32(NVRAM_ADDR, phy_addr);
12583
12584                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12585                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12586
12587                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12588                         break;
12589
12590                 /* Issue another write enable to start the write. */
12591                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12592
12593                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12594                         break;
12595
12596                 for (j = 0; j < pagesize; j += 4) {
12597                         __be32 data;
12598
12599                         data = *((__be32 *) (tmp + j));
12600
12601                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12602
12603                         tw32(NVRAM_ADDR, phy_addr + j);
12604
12605                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12606                                 NVRAM_CMD_WR;
12607
12608                         if (j == 0)
12609                                 nvram_cmd |= NVRAM_CMD_FIRST;
12610                         else if (j == (pagesize - 4))
12611                                 nvram_cmd |= NVRAM_CMD_LAST;
12612
12613                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12614                                 break;
12615                 }
12616                 if (ret)
12617                         break;
12618         }
12619
12620         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12621         tg3_nvram_exec_cmd(tp, nvram_cmd);
12622
12623         kfree(tmp);
12624
12625         return ret;
12626 }
12627
12628 /* offset and length are dword aligned */
12629 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12630                 u8 *buf)
12631 {
12632         int i, ret = 0;
12633
12634         for (i = 0; i < len; i += 4, offset += 4) {
12635                 u32 page_off, phy_addr, nvram_cmd;
12636                 __be32 data;
12637
12638                 memcpy(&data, buf + i, 4);
12639                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12640
12641                 page_off = offset % tp->nvram_pagesize;
12642
12643                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12644
12645                 tw32(NVRAM_ADDR, phy_addr);
12646
12647                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12648
12649                 if (page_off == 0 || i == 0)
12650                         nvram_cmd |= NVRAM_CMD_FIRST;
12651                 if (page_off == (tp->nvram_pagesize - 4))
12652                         nvram_cmd |= NVRAM_CMD_LAST;
12653
12654                 if (i == (len - 4))
12655                         nvram_cmd |= NVRAM_CMD_LAST;
12656
12657                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12658                     !tg3_flag(tp, 5755_PLUS) &&
12659                     (tp->nvram_jedecnum == JEDEC_ST) &&
12660                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12661
12662                         if ((ret = tg3_nvram_exec_cmd(tp,
12663                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12664                                 NVRAM_CMD_DONE)))
12665
12666                                 break;
12667                 }
12668                 if (!tg3_flag(tp, FLASH)) {
12669                         /* We always do complete word writes to eeprom. */
12670                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12671                 }
12672
12673                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12674                         break;
12675         }
12676         return ret;
12677 }
12678
12679 /* offset and length are dword aligned */
12680 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12681 {
12682         int ret;
12683
12684         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12685                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12686                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12687                 udelay(40);
12688         }
12689
12690         if (!tg3_flag(tp, NVRAM)) {
12691                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12692         } else {
12693                 u32 grc_mode;
12694
12695                 ret = tg3_nvram_lock(tp);
12696                 if (ret)
12697                         return ret;
12698
12699                 tg3_enable_nvram_access(tp);
12700                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12701                         tw32(NVRAM_WRITE1, 0x406);
12702
12703                 grc_mode = tr32(GRC_MODE);
12704                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12705
12706                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12707                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12708                                 buf);
12709                 } else {
12710                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12711                                 buf);
12712                 }
12713
12714                 grc_mode = tr32(GRC_MODE);
12715                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12716
12717                 tg3_disable_nvram_access(tp);
12718                 tg3_nvram_unlock(tp);
12719         }
12720
12721         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12722                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12723                 udelay(40);
12724         }
12725
12726         return ret;
12727 }
12728
12729 struct subsys_tbl_ent {
12730         u16 subsys_vendor, subsys_devid;
12731         u32 phy_id;
12732 };
12733
12734 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12735         /* Broadcom boards. */
12736         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12737           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12738         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12739           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12740         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12741           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12742         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12743           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12744         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12745           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12746         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12747           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12748         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12749           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12750         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12751           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12752         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12753           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12754         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12755           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12756         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12757           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12758
12759         /* 3com boards. */
12760         { TG3PCI_SUBVENDOR_ID_3COM,
12761           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12762         { TG3PCI_SUBVENDOR_ID_3COM,
12763           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12764         { TG3PCI_SUBVENDOR_ID_3COM,
12765           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12766         { TG3PCI_SUBVENDOR_ID_3COM,
12767           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12768         { TG3PCI_SUBVENDOR_ID_3COM,
12769           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12770
12771         /* DELL boards. */
12772         { TG3PCI_SUBVENDOR_ID_DELL,
12773           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12774         { TG3PCI_SUBVENDOR_ID_DELL,
12775           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12776         { TG3PCI_SUBVENDOR_ID_DELL,
12777           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12778         { TG3PCI_SUBVENDOR_ID_DELL,
12779           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12780
12781         /* Compaq boards. */
12782         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12783           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12784         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12785           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12786         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12787           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12788         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12789           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12790         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12791           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12792
12793         /* IBM boards. */
12794         { TG3PCI_SUBVENDOR_ID_IBM,
12795           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12796 };
12797
12798 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12799 {
12800         int i;
12801
12802         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12803                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12804                      tp->pdev->subsystem_vendor) &&
12805                     (subsys_id_to_phy_id[i].subsys_devid ==
12806                      tp->pdev->subsystem_device))
12807                         return &subsys_id_to_phy_id[i];
12808         }
12809         return NULL;
12810 }
12811
12812 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12813 {
12814         u32 val;
12815
12816         tp->phy_id = TG3_PHY_ID_INVALID;
12817         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12818
12819         /* Assume an onboard device and WOL capable by default.  */
12820         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12821         tg3_flag_set(tp, WOL_CAP);
12822
12823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12824                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12825                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12826                         tg3_flag_set(tp, IS_NIC);
12827                 }
12828                 val = tr32(VCPU_CFGSHDW);
12829                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12830                         tg3_flag_set(tp, ASPM_WORKAROUND);
12831                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12832                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12833                         tg3_flag_set(tp, WOL_ENABLE);
12834                         device_set_wakeup_enable(&tp->pdev->dev, true);
12835                 }
12836                 goto done;
12837         }
12838
12839         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12840         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12841                 u32 nic_cfg, led_cfg;
12842                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12843                 int eeprom_phy_serdes = 0;
12844
12845                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12846                 tp->nic_sram_data_cfg = nic_cfg;
12847
12848                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12849                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12850                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12851                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12852                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12853                     (ver > 0) && (ver < 0x100))
12854                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12855
12856                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12857                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12858
12859                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12860                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12861                         eeprom_phy_serdes = 1;
12862
12863                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12864                 if (nic_phy_id != 0) {
12865                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12866                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12867
12868                         eeprom_phy_id  = (id1 >> 16) << 10;
12869                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12870                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12871                 } else
12872                         eeprom_phy_id = 0;
12873
12874                 tp->phy_id = eeprom_phy_id;
12875                 if (eeprom_phy_serdes) {
12876                         if (!tg3_flag(tp, 5705_PLUS))
12877                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12878                         else
12879                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12880                 }
12881
12882                 if (tg3_flag(tp, 5750_PLUS))
12883                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12884                                     SHASTA_EXT_LED_MODE_MASK);
12885                 else
12886                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12887
12888                 switch (led_cfg) {
12889                 default:
12890                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12891                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12892                         break;
12893
12894                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12895                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12896                         break;
12897
12898                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12899                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12900
12901                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12902                          * read on some older 5700/5701 bootcode.
12903                          */
12904                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12905                             ASIC_REV_5700 ||
12906                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12907                             ASIC_REV_5701)
12908                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12909
12910                         break;
12911
12912                 case SHASTA_EXT_LED_SHARED:
12913                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12914                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12915                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12916                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12917                                                  LED_CTRL_MODE_PHY_2);
12918                         break;
12919
12920                 case SHASTA_EXT_LED_MAC:
12921                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12922                         break;
12923
12924                 case SHASTA_EXT_LED_COMBO:
12925                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12926                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12927                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12928                                                  LED_CTRL_MODE_PHY_2);
12929                         break;
12930
12931                 }
12932
12933                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12934                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12935                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12936                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12937
12938                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12939                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12940
12941                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12942                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12943                         if ((tp->pdev->subsystem_vendor ==
12944                              PCI_VENDOR_ID_ARIMA) &&
12945                             (tp->pdev->subsystem_device == 0x205a ||
12946                              tp->pdev->subsystem_device == 0x2063))
12947                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12948                 } else {
12949                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12950                         tg3_flag_set(tp, IS_NIC);
12951                 }
12952
12953                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12954                         tg3_flag_set(tp, ENABLE_ASF);
12955                         if (tg3_flag(tp, 5750_PLUS))
12956                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12957                 }
12958
12959                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12960                     tg3_flag(tp, 5750_PLUS))
12961                         tg3_flag_set(tp, ENABLE_APE);
12962
12963                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12964                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12965                         tg3_flag_clear(tp, WOL_CAP);
12966
12967                 if (tg3_flag(tp, WOL_CAP) &&
12968                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12969                         tg3_flag_set(tp, WOL_ENABLE);
12970                         device_set_wakeup_enable(&tp->pdev->dev, true);
12971                 }
12972
12973                 if (cfg2 & (1 << 17))
12974                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12975
12976                 /* serdes signal pre-emphasis in register 0x590 set by */
12977                 /* bootcode if bit 18 is set */
12978                 if (cfg2 & (1 << 18))
12979                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12980
12981                 if ((tg3_flag(tp, 57765_PLUS) ||
12982                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12983                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12984                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12985                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12986
12987                 if (tg3_flag(tp, PCI_EXPRESS) &&
12988                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12989                     !tg3_flag(tp, 57765_PLUS)) {
12990                         u32 cfg3;
12991
12992                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12993                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12994                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12995                 }
12996
12997                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12998                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12999                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13000                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13001                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13002                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13003         }
13004 done:
13005         if (tg3_flag(tp, WOL_CAP))
13006                 device_set_wakeup_enable(&tp->pdev->dev,
13007                                          tg3_flag(tp, WOL_ENABLE));
13008         else
13009                 device_set_wakeup_capable(&tp->pdev->dev, false);
13010 }
13011
13012 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13013 {
13014         int i;
13015         u32 val;
13016
13017         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13018         tw32(OTP_CTRL, cmd);
13019
13020         /* Wait for up to 1 ms for command to execute. */
13021         for (i = 0; i < 100; i++) {
13022                 val = tr32(OTP_STATUS);
13023                 if (val & OTP_STATUS_CMD_DONE)
13024                         break;
13025                 udelay(10);
13026         }
13027
13028         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13029 }
13030
13031 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13032  * configuration is a 32-bit value that straddles the alignment boundary.
13033  * We do two 32-bit reads and then shift and merge the results.
13034  */
13035 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13036 {
13037         u32 bhalf_otp, thalf_otp;
13038
13039         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13040
13041         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13042                 return 0;
13043
13044         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13045
13046         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13047                 return 0;
13048
13049         thalf_otp = tr32(OTP_READ_DATA);
13050
13051         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13052
13053         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13054                 return 0;
13055
13056         bhalf_otp = tr32(OTP_READ_DATA);
13057
13058         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13059 }
13060
13061 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13062 {
13063         u32 adv = ADVERTISED_Autoneg |
13064                   ADVERTISED_Pause;
13065
13066         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13067                 adv |= ADVERTISED_1000baseT_Half |
13068                        ADVERTISED_1000baseT_Full;
13069
13070         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13071                 adv |= ADVERTISED_100baseT_Half |
13072                        ADVERTISED_100baseT_Full |
13073                        ADVERTISED_10baseT_Half |
13074                        ADVERTISED_10baseT_Full |
13075                        ADVERTISED_TP;
13076         else
13077                 adv |= ADVERTISED_FIBRE;
13078
13079         tp->link_config.advertising = adv;
13080         tp->link_config.speed = SPEED_INVALID;
13081         tp->link_config.duplex = DUPLEX_INVALID;
13082         tp->link_config.autoneg = AUTONEG_ENABLE;
13083         tp->link_config.active_speed = SPEED_INVALID;
13084         tp->link_config.active_duplex = DUPLEX_INVALID;
13085         tp->link_config.orig_speed = SPEED_INVALID;
13086         tp->link_config.orig_duplex = DUPLEX_INVALID;
13087         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13088 }
13089
13090 static int __devinit tg3_phy_probe(struct tg3 *tp)
13091 {
13092         u32 hw_phy_id_1, hw_phy_id_2;
13093         u32 hw_phy_id, hw_phy_id_masked;
13094         int err;
13095
13096         /* flow control autonegotiation is default behavior */
13097         tg3_flag_set(tp, PAUSE_AUTONEG);
13098         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13099
13100         if (tg3_flag(tp, USE_PHYLIB))
13101                 return tg3_phy_init(tp);
13102
13103         /* Reading the PHY ID register can conflict with ASF
13104          * firmware access to the PHY hardware.
13105          */
13106         err = 0;
13107         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13108                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13109         } else {
13110                 /* Now read the physical PHY_ID from the chip and verify
13111                  * that it is sane.  If it doesn't look good, we fall back
13112                  * to either the hard-coded table based PHY_ID and failing
13113                  * that the value found in the eeprom area.
13114                  */
13115                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13116                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13117
13118                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13119                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13120                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13121
13122                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13123         }
13124
13125         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13126                 tp->phy_id = hw_phy_id;
13127                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13128                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13129                 else
13130                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13131         } else {
13132                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13133                         /* Do nothing, phy ID already set up in
13134                          * tg3_get_eeprom_hw_cfg().
13135                          */
13136                 } else {
13137                         struct subsys_tbl_ent *p;
13138
13139                         /* No eeprom signature?  Try the hardcoded
13140                          * subsys device table.
13141                          */
13142                         p = tg3_lookup_by_subsys(tp);
13143                         if (!p)
13144                                 return -ENODEV;
13145
13146                         tp->phy_id = p->phy_id;
13147                         if (!tp->phy_id ||
13148                             tp->phy_id == TG3_PHY_ID_BCM8002)
13149                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13150                 }
13151         }
13152
13153         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13154             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13155              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13156              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13157               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13158              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13159               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13160                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13161
13162         tg3_phy_init_link_config(tp);
13163
13164         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13165             !tg3_flag(tp, ENABLE_APE) &&
13166             !tg3_flag(tp, ENABLE_ASF)) {
13167                 u32 bmsr, mask;
13168
13169                 tg3_readphy(tp, MII_BMSR, &bmsr);
13170                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13171                     (bmsr & BMSR_LSTATUS))
13172                         goto skip_phy_reset;
13173
13174                 err = tg3_phy_reset(tp);
13175                 if (err)
13176                         return err;
13177
13178                 tg3_phy_set_wirespeed(tp);
13179
13180                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13181                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13182                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13183                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13184                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13185                                             tp->link_config.flowctrl);
13186
13187                         tg3_writephy(tp, MII_BMCR,
13188                                      BMCR_ANENABLE | BMCR_ANRESTART);
13189                 }
13190         }
13191
13192 skip_phy_reset:
13193         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13194                 err = tg3_init_5401phy_dsp(tp);
13195                 if (err)
13196                         return err;
13197
13198                 err = tg3_init_5401phy_dsp(tp);
13199         }
13200
13201         return err;
13202 }
13203
13204 static void __devinit tg3_read_vpd(struct tg3 *tp)
13205 {
13206         u8 *vpd_data;
13207         unsigned int block_end, rosize, len;
13208         int j, i = 0;
13209
13210         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13211         if (!vpd_data)
13212                 goto out_no_vpd;
13213
13214         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13215                              PCI_VPD_LRDT_RO_DATA);
13216         if (i < 0)
13217                 goto out_not_found;
13218
13219         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13220         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13221         i += PCI_VPD_LRDT_TAG_SIZE;
13222
13223         if (block_end > TG3_NVM_VPD_LEN)
13224                 goto out_not_found;
13225
13226         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13227                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13228         if (j > 0) {
13229                 len = pci_vpd_info_field_size(&vpd_data[j]);
13230
13231                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13232                 if (j + len > block_end || len != 4 ||
13233                     memcmp(&vpd_data[j], "1028", 4))
13234                         goto partno;
13235
13236                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13237                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13238                 if (j < 0)
13239                         goto partno;
13240
13241                 len = pci_vpd_info_field_size(&vpd_data[j]);
13242
13243                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13244                 if (j + len > block_end)
13245                         goto partno;
13246
13247                 memcpy(tp->fw_ver, &vpd_data[j], len);
13248                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13249         }
13250
13251 partno:
13252         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13253                                       PCI_VPD_RO_KEYWORD_PARTNO);
13254         if (i < 0)
13255                 goto out_not_found;
13256
13257         len = pci_vpd_info_field_size(&vpd_data[i]);
13258
13259         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13260         if (len > TG3_BPN_SIZE ||
13261             (len + i) > TG3_NVM_VPD_LEN)
13262                 goto out_not_found;
13263
13264         memcpy(tp->board_part_number, &vpd_data[i], len);
13265
13266 out_not_found:
13267         kfree(vpd_data);
13268         if (tp->board_part_number[0])
13269                 return;
13270
13271 out_no_vpd:
13272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13273                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13274                         strcpy(tp->board_part_number, "BCM5717");
13275                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13276                         strcpy(tp->board_part_number, "BCM5718");
13277                 else
13278                         goto nomatch;
13279         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13280                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13281                         strcpy(tp->board_part_number, "BCM57780");
13282                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13283                         strcpy(tp->board_part_number, "BCM57760");
13284                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13285                         strcpy(tp->board_part_number, "BCM57790");
13286                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13287                         strcpy(tp->board_part_number, "BCM57788");
13288                 else
13289                         goto nomatch;
13290         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13291                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13292                         strcpy(tp->board_part_number, "BCM57761");
13293                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13294                         strcpy(tp->board_part_number, "BCM57765");
13295                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13296                         strcpy(tp->board_part_number, "BCM57781");
13297                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13298                         strcpy(tp->board_part_number, "BCM57785");
13299                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13300                         strcpy(tp->board_part_number, "BCM57791");
13301                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13302                         strcpy(tp->board_part_number, "BCM57795");
13303                 else
13304                         goto nomatch;
13305         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13306                 strcpy(tp->board_part_number, "BCM95906");
13307         } else {
13308 nomatch:
13309                 strcpy(tp->board_part_number, "none");
13310         }
13311 }
13312
13313 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13314 {
13315         u32 val;
13316
13317         if (tg3_nvram_read(tp, offset, &val) ||
13318             (val & 0xfc000000) != 0x0c000000 ||
13319             tg3_nvram_read(tp, offset + 4, &val) ||
13320             val != 0)
13321                 return 0;
13322
13323         return 1;
13324 }
13325
13326 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13327 {
13328         u32 val, offset, start, ver_offset;
13329         int i, dst_off;
13330         bool newver = false;
13331
13332         if (tg3_nvram_read(tp, 0xc, &offset) ||
13333             tg3_nvram_read(tp, 0x4, &start))
13334                 return;
13335
13336         offset = tg3_nvram_logical_addr(tp, offset);
13337
13338         if (tg3_nvram_read(tp, offset, &val))
13339                 return;
13340
13341         if ((val & 0xfc000000) == 0x0c000000) {
13342                 if (tg3_nvram_read(tp, offset + 4, &val))
13343                         return;
13344
13345                 if (val == 0)
13346                         newver = true;
13347         }
13348
13349         dst_off = strlen(tp->fw_ver);
13350
13351         if (newver) {
13352                 if (TG3_VER_SIZE - dst_off < 16 ||
13353                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13354                         return;
13355
13356                 offset = offset + ver_offset - start;
13357                 for (i = 0; i < 16; i += 4) {
13358                         __be32 v;
13359                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13360                                 return;
13361
13362                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13363                 }
13364         } else {
13365                 u32 major, minor;
13366
13367                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13368                         return;
13369
13370                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13371                         TG3_NVM_BCVER_MAJSFT;
13372                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13373                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13374                          "v%d.%02d", major, minor);
13375         }
13376 }
13377
13378 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13379 {
13380         u32 val, major, minor;
13381
13382         /* Use native endian representation */
13383         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13384                 return;
13385
13386         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13387                 TG3_NVM_HWSB_CFG1_MAJSFT;
13388         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13389                 TG3_NVM_HWSB_CFG1_MINSFT;
13390
13391         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13392 }
13393
13394 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13395 {
13396         u32 offset, major, minor, build;
13397
13398         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13399
13400         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13401                 return;
13402
13403         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13404         case TG3_EEPROM_SB_REVISION_0:
13405                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13406                 break;
13407         case TG3_EEPROM_SB_REVISION_2:
13408                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13409                 break;
13410         case TG3_EEPROM_SB_REVISION_3:
13411                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13412                 break;
13413         case TG3_EEPROM_SB_REVISION_4:
13414                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13415                 break;
13416         case TG3_EEPROM_SB_REVISION_5:
13417                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13418                 break;
13419         case TG3_EEPROM_SB_REVISION_6:
13420                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13421                 break;
13422         default:
13423                 return;
13424         }
13425
13426         if (tg3_nvram_read(tp, offset, &val))
13427                 return;
13428
13429         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13430                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13431         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13432                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13433         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13434
13435         if (minor > 99 || build > 26)
13436                 return;
13437
13438         offset = strlen(tp->fw_ver);
13439         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13440                  " v%d.%02d", major, minor);
13441
13442         if (build > 0) {
13443                 offset = strlen(tp->fw_ver);
13444                 if (offset < TG3_VER_SIZE - 1)
13445                         tp->fw_ver[offset] = 'a' + build - 1;
13446         }
13447 }
13448
13449 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13450 {
13451         u32 val, offset, start;
13452         int i, vlen;
13453
13454         for (offset = TG3_NVM_DIR_START;
13455              offset < TG3_NVM_DIR_END;
13456              offset += TG3_NVM_DIRENT_SIZE) {
13457                 if (tg3_nvram_read(tp, offset, &val))
13458                         return;
13459
13460                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13461                         break;
13462         }
13463
13464         if (offset == TG3_NVM_DIR_END)
13465                 return;
13466
13467         if (!tg3_flag(tp, 5705_PLUS))
13468                 start = 0x08000000;
13469         else if (tg3_nvram_read(tp, offset - 4, &start))
13470                 return;
13471
13472         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13473             !tg3_fw_img_is_valid(tp, offset) ||
13474             tg3_nvram_read(tp, offset + 8, &val))
13475                 return;
13476
13477         offset += val - start;
13478
13479         vlen = strlen(tp->fw_ver);
13480
13481         tp->fw_ver[vlen++] = ',';
13482         tp->fw_ver[vlen++] = ' ';
13483
13484         for (i = 0; i < 4; i++) {
13485                 __be32 v;
13486                 if (tg3_nvram_read_be32(tp, offset, &v))
13487                         return;
13488
13489                 offset += sizeof(v);
13490
13491                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13492                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13493                         break;
13494                 }
13495
13496                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13497                 vlen += sizeof(v);
13498         }
13499 }
13500
13501 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13502 {
13503         int vlen;
13504         u32 apedata;
13505         char *fwtype;
13506
13507         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13508                 return;
13509
13510         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13511         if (apedata != APE_SEG_SIG_MAGIC)
13512                 return;
13513
13514         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13515         if (!(apedata & APE_FW_STATUS_READY))
13516                 return;
13517
13518         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13519
13520         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13521                 tg3_flag_set(tp, APE_HAS_NCSI);
13522                 fwtype = "NCSI";
13523         } else {
13524                 fwtype = "DASH";
13525         }
13526
13527         vlen = strlen(tp->fw_ver);
13528
13529         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13530                  fwtype,
13531                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13532                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13533                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13534                  (apedata & APE_FW_VERSION_BLDMSK));
13535 }
13536
13537 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13538 {
13539         u32 val;
13540         bool vpd_vers = false;
13541
13542         if (tp->fw_ver[0] != 0)
13543                 vpd_vers = true;
13544
13545         if (tg3_flag(tp, NO_NVRAM)) {
13546                 strcat(tp->fw_ver, "sb");
13547                 return;
13548         }
13549
13550         if (tg3_nvram_read(tp, 0, &val))
13551                 return;
13552
13553         if (val == TG3_EEPROM_MAGIC)
13554                 tg3_read_bc_ver(tp);
13555         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13556                 tg3_read_sb_ver(tp, val);
13557         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13558                 tg3_read_hwsb_ver(tp);
13559         else
13560                 return;
13561
13562         if (vpd_vers)
13563                 goto done;
13564
13565         if (tg3_flag(tp, ENABLE_APE)) {
13566                 if (tg3_flag(tp, ENABLE_ASF))
13567                         tg3_read_dash_ver(tp);
13568         } else if (tg3_flag(tp, ENABLE_ASF)) {
13569                 tg3_read_mgmtfw_ver(tp);
13570         }
13571
13572 done:
13573         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13574 }
13575
13576 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13577
13578 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13579 {
13580         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13581                 return TG3_RX_RET_MAX_SIZE_5717;
13582         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13583                 return TG3_RX_RET_MAX_SIZE_5700;
13584         else
13585                 return TG3_RX_RET_MAX_SIZE_5705;
13586 }
13587
13588 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13589         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13590         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13591         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13592         { },
13593 };
13594
13595 static int __devinit tg3_get_invariants(struct tg3 *tp)
13596 {
13597         u32 misc_ctrl_reg;
13598         u32 pci_state_reg, grc_misc_cfg;
13599         u32 val;
13600         u16 pci_cmd;
13601         int err;
13602
13603         /* Force memory write invalidate off.  If we leave it on,
13604          * then on 5700_BX chips we have to enable a workaround.
13605          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13606          * to match the cacheline size.  The Broadcom driver have this
13607          * workaround but turns MWI off all the times so never uses
13608          * it.  This seems to suggest that the workaround is insufficient.
13609          */
13610         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13611         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13612         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13613
13614         /* Important! -- Make sure register accesses are byteswapped
13615          * correctly.  Also, for those chips that require it, make
13616          * sure that indirect register accesses are enabled before
13617          * the first operation.
13618          */
13619         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13620                               &misc_ctrl_reg);
13621         tp->misc_host_ctrl |= (misc_ctrl_reg &
13622                                MISC_HOST_CTRL_CHIPREV);
13623         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13624                                tp->misc_host_ctrl);
13625
13626         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13627                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13628         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13629                 u32 prod_id_asic_rev;
13630
13631                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13632                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13633                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13634                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13635                         pci_read_config_dword(tp->pdev,
13636                                               TG3PCI_GEN2_PRODID_ASICREV,
13637                                               &prod_id_asic_rev);
13638                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13639                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13640                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13641                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13642                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13643                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13644                         pci_read_config_dword(tp->pdev,
13645                                               TG3PCI_GEN15_PRODID_ASICREV,
13646                                               &prod_id_asic_rev);
13647                 else
13648                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13649                                               &prod_id_asic_rev);
13650
13651                 tp->pci_chip_rev_id = prod_id_asic_rev;
13652         }
13653
13654         /* Wrong chip ID in 5752 A0. This code can be removed later
13655          * as A0 is not in production.
13656          */
13657         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13658                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13659
13660         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13661          * we need to disable memory and use config. cycles
13662          * only to access all registers. The 5702/03 chips
13663          * can mistakenly decode the special cycles from the
13664          * ICH chipsets as memory write cycles, causing corruption
13665          * of register and memory space. Only certain ICH bridges
13666          * will drive special cycles with non-zero data during the
13667          * address phase which can fall within the 5703's address
13668          * range. This is not an ICH bug as the PCI spec allows
13669          * non-zero address during special cycles. However, only
13670          * these ICH bridges are known to drive non-zero addresses
13671          * during special cycles.
13672          *
13673          * Since special cycles do not cross PCI bridges, we only
13674          * enable this workaround if the 5703 is on the secondary
13675          * bus of these ICH bridges.
13676          */
13677         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13678             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13679                 static struct tg3_dev_id {
13680                         u32     vendor;
13681                         u32     device;
13682                         u32     rev;
13683                 } ich_chipsets[] = {
13684                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13685                           PCI_ANY_ID },
13686                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13687                           PCI_ANY_ID },
13688                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13689                           0xa },
13690                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13691                           PCI_ANY_ID },
13692                         { },
13693                 };
13694                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13695                 struct pci_dev *bridge = NULL;
13696
13697                 while (pci_id->vendor != 0) {
13698                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13699                                                 bridge);
13700                         if (!bridge) {
13701                                 pci_id++;
13702                                 continue;
13703                         }
13704                         if (pci_id->rev != PCI_ANY_ID) {
13705                                 if (bridge->revision > pci_id->rev)
13706                                         continue;
13707                         }
13708                         if (bridge->subordinate &&
13709                             (bridge->subordinate->number ==
13710                              tp->pdev->bus->number)) {
13711                                 tg3_flag_set(tp, ICH_WORKAROUND);
13712                                 pci_dev_put(bridge);
13713                                 break;
13714                         }
13715                 }
13716         }
13717
13718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13719                 static struct tg3_dev_id {
13720                         u32     vendor;
13721                         u32     device;
13722                 } bridge_chipsets[] = {
13723                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13724                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13725                         { },
13726                 };
13727                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13728                 struct pci_dev *bridge = NULL;
13729
13730                 while (pci_id->vendor != 0) {
13731                         bridge = pci_get_device(pci_id->vendor,
13732                                                 pci_id->device,
13733                                                 bridge);
13734                         if (!bridge) {
13735                                 pci_id++;
13736                                 continue;
13737                         }
13738                         if (bridge->subordinate &&
13739                             (bridge->subordinate->number <=
13740                              tp->pdev->bus->number) &&
13741                             (bridge->subordinate->subordinate >=
13742                              tp->pdev->bus->number)) {
13743                                 tg3_flag_set(tp, 5701_DMA_BUG);
13744                                 pci_dev_put(bridge);
13745                                 break;
13746                         }
13747                 }
13748         }
13749
13750         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13751          * DMA addresses > 40-bit. This bridge may have other additional
13752          * 57xx devices behind it in some 4-port NIC designs for example.
13753          * Any tg3 device found behind the bridge will also need the 40-bit
13754          * DMA workaround.
13755          */
13756         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13757             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13758                 tg3_flag_set(tp, 5780_CLASS);
13759                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13760                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13761         } else {
13762                 struct pci_dev *bridge = NULL;
13763
13764                 do {
13765                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13766                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13767                                                 bridge);
13768                         if (bridge && bridge->subordinate &&
13769                             (bridge->subordinate->number <=
13770                              tp->pdev->bus->number) &&
13771                             (bridge->subordinate->subordinate >=
13772                              tp->pdev->bus->number)) {
13773                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13774                                 pci_dev_put(bridge);
13775                                 break;
13776                         }
13777                 } while (bridge);
13778         }
13779
13780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13781             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13782                 tp->pdev_peer = tg3_find_peer(tp);
13783
13784         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13785             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13786             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13787                 tg3_flag_set(tp, 5717_PLUS);
13788
13789         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13790             tg3_flag(tp, 5717_PLUS))
13791                 tg3_flag_set(tp, 57765_PLUS);
13792
13793         /* Intentionally exclude ASIC_REV_5906 */
13794         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13795             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13796             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13797             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13798             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13799             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13800             tg3_flag(tp, 57765_PLUS))
13801                 tg3_flag_set(tp, 5755_PLUS);
13802
13803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13804             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13805             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13806             tg3_flag(tp, 5755_PLUS) ||
13807             tg3_flag(tp, 5780_CLASS))
13808                 tg3_flag_set(tp, 5750_PLUS);
13809
13810         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13811             tg3_flag(tp, 5750_PLUS))
13812                 tg3_flag_set(tp, 5705_PLUS);
13813
13814         /* Determine TSO capabilities */
13815         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13816                 ; /* Do nothing. HW bug. */
13817         else if (tg3_flag(tp, 57765_PLUS))
13818                 tg3_flag_set(tp, HW_TSO_3);
13819         else if (tg3_flag(tp, 5755_PLUS) ||
13820                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13821                 tg3_flag_set(tp, HW_TSO_2);
13822         else if (tg3_flag(tp, 5750_PLUS)) {
13823                 tg3_flag_set(tp, HW_TSO_1);
13824                 tg3_flag_set(tp, TSO_BUG);
13825                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13826                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13827                         tg3_flag_clear(tp, TSO_BUG);
13828         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13829                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13830                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13831                         tg3_flag_set(tp, TSO_BUG);
13832                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13833                         tp->fw_needed = FIRMWARE_TG3TSO5;
13834                 else
13835                         tp->fw_needed = FIRMWARE_TG3TSO;
13836         }
13837
13838         /* Selectively allow TSO based on operating conditions */
13839         if (tg3_flag(tp, HW_TSO_1) ||
13840             tg3_flag(tp, HW_TSO_2) ||
13841             tg3_flag(tp, HW_TSO_3) ||
13842             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13843                 tg3_flag_set(tp, TSO_CAPABLE);
13844         else {
13845                 tg3_flag_clear(tp, TSO_CAPABLE);
13846                 tg3_flag_clear(tp, TSO_BUG);
13847                 tp->fw_needed = NULL;
13848         }
13849
13850         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13851                 tp->fw_needed = FIRMWARE_TG3;
13852
13853         tp->irq_max = 1;
13854
13855         if (tg3_flag(tp, 5750_PLUS)) {
13856                 tg3_flag_set(tp, SUPPORT_MSI);
13857                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13858                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13859                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13860                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13861                      tp->pdev_peer == tp->pdev))
13862                         tg3_flag_clear(tp, SUPPORT_MSI);
13863
13864                 if (tg3_flag(tp, 5755_PLUS) ||
13865                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13866                         tg3_flag_set(tp, 1SHOT_MSI);
13867                 }
13868
13869                 if (tg3_flag(tp, 57765_PLUS)) {
13870                         tg3_flag_set(tp, SUPPORT_MSIX);
13871                         tp->irq_max = TG3_IRQ_MAX_VECS;
13872                 }
13873         }
13874
13875         if (tg3_flag(tp, 5755_PLUS))
13876                 tg3_flag_set(tp, SHORT_DMA_BUG);
13877
13878         if (tg3_flag(tp, 5717_PLUS))
13879                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13880
13881         if (tg3_flag(tp, 57765_PLUS) &&
13882             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13883                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13884
13885         if (!tg3_flag(tp, 5705_PLUS) ||
13886             tg3_flag(tp, 5780_CLASS) ||
13887             tg3_flag(tp, USE_JUMBO_BDFLAG))
13888                 tg3_flag_set(tp, JUMBO_CAPABLE);
13889
13890         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13891                               &pci_state_reg);
13892
13893         if (pci_is_pcie(tp->pdev)) {
13894                 u16 lnkctl;
13895
13896                 tg3_flag_set(tp, PCI_EXPRESS);
13897
13898                 tp->pcie_readrq = 4096;
13899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13900                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13901                         tp->pcie_readrq = 2048;
13902
13903                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13904
13905                 pci_read_config_word(tp->pdev,
13906                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13907                                      &lnkctl);
13908                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13909                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13910                             ASIC_REV_5906) {
13911                                 tg3_flag_clear(tp, HW_TSO_2);
13912                                 tg3_flag_clear(tp, TSO_CAPABLE);
13913                         }
13914                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13915                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13916                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13917                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13918                                 tg3_flag_set(tp, CLKREQ_BUG);
13919                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13920                         tg3_flag_set(tp, L1PLLPD_EN);
13921                 }
13922         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13923                 /* BCM5785 devices are effectively PCIe devices, and should
13924                  * follow PCIe codepaths, but do not have a PCIe capabilities
13925                  * section.
13926                 */
13927                 tg3_flag_set(tp, PCI_EXPRESS);
13928         } else if (!tg3_flag(tp, 5705_PLUS) ||
13929                    tg3_flag(tp, 5780_CLASS)) {
13930                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13931                 if (!tp->pcix_cap) {
13932                         dev_err(&tp->pdev->dev,
13933                                 "Cannot find PCI-X capability, aborting\n");
13934                         return -EIO;
13935                 }
13936
13937                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13938                         tg3_flag_set(tp, PCIX_MODE);
13939         }
13940
13941         /* If we have an AMD 762 or VIA K8T800 chipset, write
13942          * reordering to the mailbox registers done by the host
13943          * controller can cause major troubles.  We read back from
13944          * every mailbox register write to force the writes to be
13945          * posted to the chip in order.
13946          */
13947         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13948             !tg3_flag(tp, PCI_EXPRESS))
13949                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13950
13951         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13952                              &tp->pci_cacheline_sz);
13953         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13954                              &tp->pci_lat_timer);
13955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13956             tp->pci_lat_timer < 64) {
13957                 tp->pci_lat_timer = 64;
13958                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13959                                       tp->pci_lat_timer);
13960         }
13961
13962         /* Important! -- It is critical that the PCI-X hw workaround
13963          * situation is decided before the first MMIO register access.
13964          */
13965         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13966                 /* 5700 BX chips need to have their TX producer index
13967                  * mailboxes written twice to workaround a bug.
13968                  */
13969                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13970
13971                 /* If we are in PCI-X mode, enable register write workaround.
13972                  *
13973                  * The workaround is to use indirect register accesses
13974                  * for all chip writes not to mailbox registers.
13975                  */
13976                 if (tg3_flag(tp, PCIX_MODE)) {
13977                         u32 pm_reg;
13978
13979                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13980
13981                         /* The chip can have it's power management PCI config
13982                          * space registers clobbered due to this bug.
13983                          * So explicitly force the chip into D0 here.
13984                          */
13985                         pci_read_config_dword(tp->pdev,
13986                                               tp->pm_cap + PCI_PM_CTRL,
13987                                               &pm_reg);
13988                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13989                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13990                         pci_write_config_dword(tp->pdev,
13991                                                tp->pm_cap + PCI_PM_CTRL,
13992                                                pm_reg);
13993
13994                         /* Also, force SERR#/PERR# in PCI command. */
13995                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13996                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13997                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13998                 }
13999         }
14000
14001         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14002                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14003         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14004                 tg3_flag_set(tp, PCI_32BIT);
14005
14006         /* Chip-specific fixup from Broadcom driver */
14007         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14008             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14009                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14010                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14011         }
14012
14013         /* Default fast path register access methods */
14014         tp->read32 = tg3_read32;
14015         tp->write32 = tg3_write32;
14016         tp->read32_mbox = tg3_read32;
14017         tp->write32_mbox = tg3_write32;
14018         tp->write32_tx_mbox = tg3_write32;
14019         tp->write32_rx_mbox = tg3_write32;
14020
14021         /* Various workaround register access methods */
14022         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14023                 tp->write32 = tg3_write_indirect_reg32;
14024         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14025                  (tg3_flag(tp, PCI_EXPRESS) &&
14026                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14027                 /*
14028                  * Back to back register writes can cause problems on these
14029                  * chips, the workaround is to read back all reg writes
14030                  * except those to mailbox regs.
14031                  *
14032                  * See tg3_write_indirect_reg32().
14033                  */
14034                 tp->write32 = tg3_write_flush_reg32;
14035         }
14036
14037         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14038                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14039                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14040                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14041         }
14042
14043         if (tg3_flag(tp, ICH_WORKAROUND)) {
14044                 tp->read32 = tg3_read_indirect_reg32;
14045                 tp->write32 = tg3_write_indirect_reg32;
14046                 tp->read32_mbox = tg3_read_indirect_mbox;
14047                 tp->write32_mbox = tg3_write_indirect_mbox;
14048                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14049                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14050
14051                 iounmap(tp->regs);
14052                 tp->regs = NULL;
14053
14054                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14055                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14056                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14057         }
14058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14059                 tp->read32_mbox = tg3_read32_mbox_5906;
14060                 tp->write32_mbox = tg3_write32_mbox_5906;
14061                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14062                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14063         }
14064
14065         if (tp->write32 == tg3_write_indirect_reg32 ||
14066             (tg3_flag(tp, PCIX_MODE) &&
14067              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14068               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14069                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14070
14071         /* The memory arbiter has to be enabled in order for SRAM accesses
14072          * to succeed.  Normally on powerup the tg3 chip firmware will make
14073          * sure it is enabled, but other entities such as system netboot
14074          * code might disable it.
14075          */
14076         val = tr32(MEMARB_MODE);
14077         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14078
14079         if (tg3_flag(tp, PCIX_MODE)) {
14080                 pci_read_config_dword(tp->pdev,
14081                                       tp->pcix_cap + PCI_X_STATUS, &val);
14082                 tp->pci_fn = val & 0x7;
14083         } else {
14084                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14085         }
14086
14087         /* Get eeprom hw config before calling tg3_set_power_state().
14088          * In particular, the TG3_FLAG_IS_NIC flag must be
14089          * determined before calling tg3_set_power_state() so that
14090          * we know whether or not to switch out of Vaux power.
14091          * When the flag is set, it means that GPIO1 is used for eeprom
14092          * write protect and also implies that it is a LOM where GPIOs
14093          * are not used to switch power.
14094          */
14095         tg3_get_eeprom_hw_cfg(tp);
14096
14097         if (tg3_flag(tp, ENABLE_APE)) {
14098                 /* Allow reads and writes to the
14099                  * APE register and memory space.
14100                  */
14101                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14102                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14103                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14104                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14105                                        pci_state_reg);
14106
14107                 tg3_ape_lock_init(tp);
14108         }
14109
14110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14113             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14114             tg3_flag(tp, 57765_PLUS))
14115                 tg3_flag_set(tp, CPMU_PRESENT);
14116
14117         /* Set up tp->grc_local_ctrl before calling
14118          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14119          * will bring 5700's external PHY out of reset.
14120          * It is also used as eeprom write protect on LOMs.
14121          */
14122         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14124             tg3_flag(tp, EEPROM_WRITE_PROT))
14125                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14126                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14127         /* Unused GPIO3 must be driven as output on 5752 because there
14128          * are no pull-up resistors on unused GPIO pins.
14129          */
14130         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14131                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14132
14133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14134             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14135             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14136                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14137
14138         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14139             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14140                 /* Turn off the debug UART. */
14141                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14142                 if (tg3_flag(tp, IS_NIC))
14143                         /* Keep VMain power. */
14144                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14145                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14146         }
14147
14148         /* Switch out of Vaux if it is a NIC */
14149         tg3_pwrsrc_switch_to_vmain(tp);
14150
14151         /* Derive initial jumbo mode from MTU assigned in
14152          * ether_setup() via the alloc_etherdev() call
14153          */
14154         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14155                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14156
14157         /* Determine WakeOnLan speed to use. */
14158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14159             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14160             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14161             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14162                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14163         } else {
14164                 tg3_flag_set(tp, WOL_SPEED_100MB);
14165         }
14166
14167         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14168                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14169
14170         /* A few boards don't want Ethernet@WireSpeed phy feature */
14171         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14172             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14173              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14174              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14175             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14176             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14177                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14178
14179         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14180             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14181                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14182         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14183                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14184
14185         if (tg3_flag(tp, 5705_PLUS) &&
14186             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14187             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14188             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14189             !tg3_flag(tp, 57765_PLUS)) {
14190                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14191                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14192                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14193                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14194                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14195                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14196                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14197                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14198                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14199                 } else
14200                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14201         }
14202
14203         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14204             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14205                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14206                 if (tp->phy_otp == 0)
14207                         tp->phy_otp = TG3_OTP_DEFAULT;
14208         }
14209
14210         if (tg3_flag(tp, CPMU_PRESENT))
14211                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14212         else
14213                 tp->mi_mode = MAC_MI_MODE_BASE;
14214
14215         tp->coalesce_mode = 0;
14216         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14217             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14218                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14219
14220         /* Set these bits to enable statistics workaround. */
14221         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14222             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14223             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14224                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14225                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14226         }
14227
14228         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14229             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14230                 tg3_flag_set(tp, USE_PHYLIB);
14231
14232         err = tg3_mdio_init(tp);
14233         if (err)
14234                 return err;
14235
14236         /* Initialize data/descriptor byte/word swapping. */
14237         val = tr32(GRC_MODE);
14238         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14239                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14240                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14241                         GRC_MODE_B2HRX_ENABLE |
14242                         GRC_MODE_HTX2B_ENABLE |
14243                         GRC_MODE_HOST_STACKUP);
14244         else
14245                 val &= GRC_MODE_HOST_STACKUP;
14246
14247         tw32(GRC_MODE, val | tp->grc_mode);
14248
14249         tg3_switch_clocks(tp);
14250
14251         /* Clear this out for sanity. */
14252         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14253
14254         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14255                               &pci_state_reg);
14256         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14257             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14258                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14259
14260                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14261                     chiprevid == CHIPREV_ID_5701_B0 ||
14262                     chiprevid == CHIPREV_ID_5701_B2 ||
14263                     chiprevid == CHIPREV_ID_5701_B5) {
14264                         void __iomem *sram_base;
14265
14266                         /* Write some dummy words into the SRAM status block
14267                          * area, see if it reads back correctly.  If the return
14268                          * value is bad, force enable the PCIX workaround.
14269                          */
14270                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14271
14272                         writel(0x00000000, sram_base);
14273                         writel(0x00000000, sram_base + 4);
14274                         writel(0xffffffff, sram_base + 4);
14275                         if (readl(sram_base) != 0x00000000)
14276                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14277                 }
14278         }
14279
14280         udelay(50);
14281         tg3_nvram_init(tp);
14282
14283         grc_misc_cfg = tr32(GRC_MISC_CFG);
14284         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14285
14286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14287             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14288              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14289                 tg3_flag_set(tp, IS_5788);
14290
14291         if (!tg3_flag(tp, IS_5788) &&
14292             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14293                 tg3_flag_set(tp, TAGGED_STATUS);
14294         if (tg3_flag(tp, TAGGED_STATUS)) {
14295                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14296                                       HOSTCC_MODE_CLRTICK_TXBD);
14297
14298                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14299                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14300                                        tp->misc_host_ctrl);
14301         }
14302
14303         /* Preserve the APE MAC_MODE bits */
14304         if (tg3_flag(tp, ENABLE_APE))
14305                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14306         else
14307                 tp->mac_mode = TG3_DEF_MAC_MODE;
14308
14309         /* these are limited to 10/100 only */
14310         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14311              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14312             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14313              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14314              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14315               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14316               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14317             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14318              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14319               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14320               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14321             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14322             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14323             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14324             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14325                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14326
14327         err = tg3_phy_probe(tp);
14328         if (err) {
14329                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14330                 /* ... but do not return immediately ... */
14331                 tg3_mdio_fini(tp);
14332         }
14333
14334         tg3_read_vpd(tp);
14335         tg3_read_fw_ver(tp);
14336
14337         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14338                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14339         } else {
14340                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14341                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14342                 else
14343                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14344         }
14345
14346         /* 5700 {AX,BX} chips have a broken status block link
14347          * change bit implementation, so we must use the
14348          * status register in those cases.
14349          */
14350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14351                 tg3_flag_set(tp, USE_LINKCHG_REG);
14352         else
14353                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14354
14355         /* The led_ctrl is set during tg3_phy_probe, here we might
14356          * have to force the link status polling mechanism based
14357          * upon subsystem IDs.
14358          */
14359         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14360             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14361             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14362                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14363                 tg3_flag_set(tp, USE_LINKCHG_REG);
14364         }
14365
14366         /* For all SERDES we poll the MAC status register. */
14367         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14368                 tg3_flag_set(tp, POLL_SERDES);
14369         else
14370                 tg3_flag_clear(tp, POLL_SERDES);
14371
14372         tp->rx_offset = NET_IP_ALIGN;
14373         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14374         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14375             tg3_flag(tp, PCIX_MODE)) {
14376                 tp->rx_offset = 0;
14377 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14378                 tp->rx_copy_thresh = ~(u16)0;
14379 #endif
14380         }
14381
14382         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14383         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14384         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14385
14386         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14387
14388         /* Increment the rx prod index on the rx std ring by at most
14389          * 8 for these chips to workaround hw errata.
14390          */
14391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14392             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14393             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14394                 tp->rx_std_max_post = 8;
14395
14396         if (tg3_flag(tp, ASPM_WORKAROUND))
14397                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14398                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14399
14400         return err;
14401 }
14402
14403 #ifdef CONFIG_SPARC
14404 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14405 {
14406         struct net_device *dev = tp->dev;
14407         struct pci_dev *pdev = tp->pdev;
14408         struct device_node *dp = pci_device_to_OF_node(pdev);
14409         const unsigned char *addr;
14410         int len;
14411
14412         addr = of_get_property(dp, "local-mac-address", &len);
14413         if (addr && len == 6) {
14414                 memcpy(dev->dev_addr, addr, 6);
14415                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14416                 return 0;
14417         }
14418         return -ENODEV;
14419 }
14420
14421 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14422 {
14423         struct net_device *dev = tp->dev;
14424
14425         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14426         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14427         return 0;
14428 }
14429 #endif
14430
14431 static int __devinit tg3_get_device_address(struct tg3 *tp)
14432 {
14433         struct net_device *dev = tp->dev;
14434         u32 hi, lo, mac_offset;
14435         int addr_ok = 0;
14436
14437 #ifdef CONFIG_SPARC
14438         if (!tg3_get_macaddr_sparc(tp))
14439                 return 0;
14440 #endif
14441
14442         mac_offset = 0x7c;
14443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14444             tg3_flag(tp, 5780_CLASS)) {
14445                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14446                         mac_offset = 0xcc;
14447                 if (tg3_nvram_lock(tp))
14448                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14449                 else
14450                         tg3_nvram_unlock(tp);
14451         } else if (tg3_flag(tp, 5717_PLUS)) {
14452                 if (tp->pci_fn & 1)
14453                         mac_offset = 0xcc;
14454                 if (tp->pci_fn > 1)
14455                         mac_offset += 0x18c;
14456         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14457                 mac_offset = 0x10;
14458
14459         /* First try to get it from MAC address mailbox. */
14460         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14461         if ((hi >> 16) == 0x484b) {
14462                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14463                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14464
14465                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14466                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14467                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14468                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14469                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14470
14471                 /* Some old bootcode may report a 0 MAC address in SRAM */
14472                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14473         }
14474         if (!addr_ok) {
14475                 /* Next, try NVRAM. */
14476                 if (!tg3_flag(tp, NO_NVRAM) &&
14477                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14478                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14479                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14480                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14481                 }
14482                 /* Finally just fetch it out of the MAC control regs. */
14483                 else {
14484                         hi = tr32(MAC_ADDR_0_HIGH);
14485                         lo = tr32(MAC_ADDR_0_LOW);
14486
14487                         dev->dev_addr[5] = lo & 0xff;
14488                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14489                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14490                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14491                         dev->dev_addr[1] = hi & 0xff;
14492                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14493                 }
14494         }
14495
14496         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14497 #ifdef CONFIG_SPARC
14498                 if (!tg3_get_default_macaddr_sparc(tp))
14499                         return 0;
14500 #endif
14501                 return -EINVAL;
14502         }
14503         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14504         return 0;
14505 }
14506
14507 #define BOUNDARY_SINGLE_CACHELINE       1
14508 #define BOUNDARY_MULTI_CACHELINE        2
14509
14510 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14511 {
14512         int cacheline_size;
14513         u8 byte;
14514         int goal;
14515
14516         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14517         if (byte == 0)
14518                 cacheline_size = 1024;
14519         else
14520                 cacheline_size = (int) byte * 4;
14521
14522         /* On 5703 and later chips, the boundary bits have no
14523          * effect.
14524          */
14525         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14526             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14527             !tg3_flag(tp, PCI_EXPRESS))
14528                 goto out;
14529
14530 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14531         goal = BOUNDARY_MULTI_CACHELINE;
14532 #else
14533 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14534         goal = BOUNDARY_SINGLE_CACHELINE;
14535 #else
14536         goal = 0;
14537 #endif
14538 #endif
14539
14540         if (tg3_flag(tp, 57765_PLUS)) {
14541                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14542                 goto out;
14543         }
14544
14545         if (!goal)
14546                 goto out;
14547
14548         /* PCI controllers on most RISC systems tend to disconnect
14549          * when a device tries to burst across a cache-line boundary.
14550          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14551          *
14552          * Unfortunately, for PCI-E there are only limited
14553          * write-side controls for this, and thus for reads
14554          * we will still get the disconnects.  We'll also waste
14555          * these PCI cycles for both read and write for chips
14556          * other than 5700 and 5701 which do not implement the
14557          * boundary bits.
14558          */
14559         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14560                 switch (cacheline_size) {
14561                 case 16:
14562                 case 32:
14563                 case 64:
14564                 case 128:
14565                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14566                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14567                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14568                         } else {
14569                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14570                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14571                         }
14572                         break;
14573
14574                 case 256:
14575                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14576                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14577                         break;
14578
14579                 default:
14580                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14581                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14582                         break;
14583                 }
14584         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14585                 switch (cacheline_size) {
14586                 case 16:
14587                 case 32:
14588                 case 64:
14589                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14590                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14591                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14592                                 break;
14593                         }
14594                         /* fallthrough */
14595                 case 128:
14596                 default:
14597                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14598                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14599                         break;
14600                 }
14601         } else {
14602                 switch (cacheline_size) {
14603                 case 16:
14604                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14605                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14606                                         DMA_RWCTRL_WRITE_BNDRY_16);
14607                                 break;
14608                         }
14609                         /* fallthrough */
14610                 case 32:
14611                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14612                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14613                                         DMA_RWCTRL_WRITE_BNDRY_32);
14614                                 break;
14615                         }
14616                         /* fallthrough */
14617                 case 64:
14618                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14619                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14620                                         DMA_RWCTRL_WRITE_BNDRY_64);
14621                                 break;
14622                         }
14623                         /* fallthrough */
14624                 case 128:
14625                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14626                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14627                                         DMA_RWCTRL_WRITE_BNDRY_128);
14628                                 break;
14629                         }
14630                         /* fallthrough */
14631                 case 256:
14632                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14633                                 DMA_RWCTRL_WRITE_BNDRY_256);
14634                         break;
14635                 case 512:
14636                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14637                                 DMA_RWCTRL_WRITE_BNDRY_512);
14638                         break;
14639                 case 1024:
14640                 default:
14641                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14642                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14643                         break;
14644                 }
14645         }
14646
14647 out:
14648         return val;
14649 }
14650
14651 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14652 {
14653         struct tg3_internal_buffer_desc test_desc;
14654         u32 sram_dma_descs;
14655         int i, ret;
14656
14657         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14658
14659         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14660         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14661         tw32(RDMAC_STATUS, 0);
14662         tw32(WDMAC_STATUS, 0);
14663
14664         tw32(BUFMGR_MODE, 0);
14665         tw32(FTQ_RESET, 0);
14666
14667         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14668         test_desc.addr_lo = buf_dma & 0xffffffff;
14669         test_desc.nic_mbuf = 0x00002100;
14670         test_desc.len = size;
14671
14672         /*
14673          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14674          * the *second* time the tg3 driver was getting loaded after an
14675          * initial scan.
14676          *
14677          * Broadcom tells me:
14678          *   ...the DMA engine is connected to the GRC block and a DMA
14679          *   reset may affect the GRC block in some unpredictable way...
14680          *   The behavior of resets to individual blocks has not been tested.
14681          *
14682          * Broadcom noted the GRC reset will also reset all sub-components.
14683          */
14684         if (to_device) {
14685                 test_desc.cqid_sqid = (13 << 8) | 2;
14686
14687                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14688                 udelay(40);
14689         } else {
14690                 test_desc.cqid_sqid = (16 << 8) | 7;
14691
14692                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14693                 udelay(40);
14694         }
14695         test_desc.flags = 0x00000005;
14696
14697         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14698                 u32 val;
14699
14700                 val = *(((u32 *)&test_desc) + i);
14701                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14702                                        sram_dma_descs + (i * sizeof(u32)));
14703                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14704         }
14705         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14706
14707         if (to_device)
14708                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14709         else
14710                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14711
14712         ret = -ENODEV;
14713         for (i = 0; i < 40; i++) {
14714                 u32 val;
14715
14716                 if (to_device)
14717                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14718                 else
14719                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14720                 if ((val & 0xffff) == sram_dma_descs) {
14721                         ret = 0;
14722                         break;
14723                 }
14724
14725                 udelay(100);
14726         }
14727
14728         return ret;
14729 }
14730
14731 #define TEST_BUFFER_SIZE        0x2000
14732
14733 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14734         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14735         { },
14736 };
14737
14738 static int __devinit tg3_test_dma(struct tg3 *tp)
14739 {
14740         dma_addr_t buf_dma;
14741         u32 *buf, saved_dma_rwctrl;
14742         int ret = 0;
14743
14744         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14745                                  &buf_dma, GFP_KERNEL);
14746         if (!buf) {
14747                 ret = -ENOMEM;
14748                 goto out_nofree;
14749         }
14750
14751         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14752                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14753
14754         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14755
14756         if (tg3_flag(tp, 57765_PLUS))
14757                 goto out;
14758
14759         if (tg3_flag(tp, PCI_EXPRESS)) {
14760                 /* DMA read watermark not used on PCIE */
14761                 tp->dma_rwctrl |= 0x00180000;
14762         } else if (!tg3_flag(tp, PCIX_MODE)) {
14763                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14764                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14765                         tp->dma_rwctrl |= 0x003f0000;
14766                 else
14767                         tp->dma_rwctrl |= 0x003f000f;
14768         } else {
14769                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14770                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14771                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14772                         u32 read_water = 0x7;
14773
14774                         /* If the 5704 is behind the EPB bridge, we can
14775                          * do the less restrictive ONE_DMA workaround for
14776                          * better performance.
14777                          */
14778                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14779                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14780                                 tp->dma_rwctrl |= 0x8000;
14781                         else if (ccval == 0x6 || ccval == 0x7)
14782                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14783
14784                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14785                                 read_water = 4;
14786                         /* Set bit 23 to enable PCIX hw bug fix */
14787                         tp->dma_rwctrl |=
14788                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14789                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14790                                 (1 << 23);
14791                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14792                         /* 5780 always in PCIX mode */
14793                         tp->dma_rwctrl |= 0x00144000;
14794                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14795                         /* 5714 always in PCIX mode */
14796                         tp->dma_rwctrl |= 0x00148000;
14797                 } else {
14798                         tp->dma_rwctrl |= 0x001b000f;
14799                 }
14800         }
14801
14802         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14803             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14804                 tp->dma_rwctrl &= 0xfffffff0;
14805
14806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14807             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14808                 /* Remove this if it causes problems for some boards. */
14809                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14810
14811                 /* On 5700/5701 chips, we need to set this bit.
14812                  * Otherwise the chip will issue cacheline transactions
14813                  * to streamable DMA memory with not all the byte
14814                  * enables turned on.  This is an error on several
14815                  * RISC PCI controllers, in particular sparc64.
14816                  *
14817                  * On 5703/5704 chips, this bit has been reassigned
14818                  * a different meaning.  In particular, it is used
14819                  * on those chips to enable a PCI-X workaround.
14820                  */
14821                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14822         }
14823
14824         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14825
14826 #if 0
14827         /* Unneeded, already done by tg3_get_invariants.  */
14828         tg3_switch_clocks(tp);
14829 #endif
14830
14831         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14832             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14833                 goto out;
14834
14835         /* It is best to perform DMA test with maximum write burst size
14836          * to expose the 5700/5701 write DMA bug.
14837          */
14838         saved_dma_rwctrl = tp->dma_rwctrl;
14839         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14840         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14841
14842         while (1) {
14843                 u32 *p = buf, i;
14844
14845                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14846                         p[i] = i;
14847
14848                 /* Send the buffer to the chip. */
14849                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14850                 if (ret) {
14851                         dev_err(&tp->pdev->dev,
14852                                 "%s: Buffer write failed. err = %d\n",
14853                                 __func__, ret);
14854                         break;
14855                 }
14856
14857 #if 0
14858                 /* validate data reached card RAM correctly. */
14859                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14860                         u32 val;
14861                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14862                         if (le32_to_cpu(val) != p[i]) {
14863                                 dev_err(&tp->pdev->dev,
14864                                         "%s: Buffer corrupted on device! "
14865                                         "(%d != %d)\n", __func__, val, i);
14866                                 /* ret = -ENODEV here? */
14867                         }
14868                         p[i] = 0;
14869                 }
14870 #endif
14871                 /* Now read it back. */
14872                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14873                 if (ret) {
14874                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14875                                 "err = %d\n", __func__, ret);
14876                         break;
14877                 }
14878
14879                 /* Verify it. */
14880                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14881                         if (p[i] == i)
14882                                 continue;
14883
14884                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14885                             DMA_RWCTRL_WRITE_BNDRY_16) {
14886                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14887                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14888                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14889                                 break;
14890                         } else {
14891                                 dev_err(&tp->pdev->dev,
14892                                         "%s: Buffer corrupted on read back! "
14893                                         "(%d != %d)\n", __func__, p[i], i);
14894                                 ret = -ENODEV;
14895                                 goto out;
14896                         }
14897                 }
14898
14899                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14900                         /* Success. */
14901                         ret = 0;
14902                         break;
14903                 }
14904         }
14905         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14906             DMA_RWCTRL_WRITE_BNDRY_16) {
14907                 /* DMA test passed without adjusting DMA boundary,
14908                  * now look for chipsets that are known to expose the
14909                  * DMA bug without failing the test.
14910                  */
14911                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14912                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14913                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14914                 } else {
14915                         /* Safe to use the calculated DMA boundary. */
14916                         tp->dma_rwctrl = saved_dma_rwctrl;
14917                 }
14918
14919                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14920         }
14921
14922 out:
14923         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14924 out_nofree:
14925         return ret;
14926 }
14927
14928 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14929 {
14930         if (tg3_flag(tp, 57765_PLUS)) {
14931                 tp->bufmgr_config.mbuf_read_dma_low_water =
14932                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14933                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14934                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14935                 tp->bufmgr_config.mbuf_high_water =
14936                         DEFAULT_MB_HIGH_WATER_57765;
14937
14938                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14939                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14940                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14941                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14942                 tp->bufmgr_config.mbuf_high_water_jumbo =
14943                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14944         } else if (tg3_flag(tp, 5705_PLUS)) {
14945                 tp->bufmgr_config.mbuf_read_dma_low_water =
14946                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14947                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14948                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14949                 tp->bufmgr_config.mbuf_high_water =
14950                         DEFAULT_MB_HIGH_WATER_5705;
14951                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14952                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14953                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14954                         tp->bufmgr_config.mbuf_high_water =
14955                                 DEFAULT_MB_HIGH_WATER_5906;
14956                 }
14957
14958                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14959                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14960                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14961                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14962                 tp->bufmgr_config.mbuf_high_water_jumbo =
14963                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14964         } else {
14965                 tp->bufmgr_config.mbuf_read_dma_low_water =
14966                         DEFAULT_MB_RDMA_LOW_WATER;
14967                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14968                         DEFAULT_MB_MACRX_LOW_WATER;
14969                 tp->bufmgr_config.mbuf_high_water =
14970                         DEFAULT_MB_HIGH_WATER;
14971
14972                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14973                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14974                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14975                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14976                 tp->bufmgr_config.mbuf_high_water_jumbo =
14977                         DEFAULT_MB_HIGH_WATER_JUMBO;
14978         }
14979
14980         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14981         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14982 }
14983
14984 static char * __devinit tg3_phy_string(struct tg3 *tp)
14985 {
14986         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14987         case TG3_PHY_ID_BCM5400:        return "5400";
14988         case TG3_PHY_ID_BCM5401:        return "5401";
14989         case TG3_PHY_ID_BCM5411:        return "5411";
14990         case TG3_PHY_ID_BCM5701:        return "5701";
14991         case TG3_PHY_ID_BCM5703:        return "5703";
14992         case TG3_PHY_ID_BCM5704:        return "5704";
14993         case TG3_PHY_ID_BCM5705:        return "5705";
14994         case TG3_PHY_ID_BCM5750:        return "5750";
14995         case TG3_PHY_ID_BCM5752:        return "5752";
14996         case TG3_PHY_ID_BCM5714:        return "5714";
14997         case TG3_PHY_ID_BCM5780:        return "5780";
14998         case TG3_PHY_ID_BCM5755:        return "5755";
14999         case TG3_PHY_ID_BCM5787:        return "5787";
15000         case TG3_PHY_ID_BCM5784:        return "5784";
15001         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15002         case TG3_PHY_ID_BCM5906:        return "5906";
15003         case TG3_PHY_ID_BCM5761:        return "5761";
15004         case TG3_PHY_ID_BCM5718C:       return "5718C";
15005         case TG3_PHY_ID_BCM5718S:       return "5718S";
15006         case TG3_PHY_ID_BCM57765:       return "57765";
15007         case TG3_PHY_ID_BCM5719C:       return "5719C";
15008         case TG3_PHY_ID_BCM5720C:       return "5720C";
15009         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15010         case 0:                 return "serdes";
15011         default:                return "unknown";
15012         }
15013 }
15014
15015 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15016 {
15017         if (tg3_flag(tp, PCI_EXPRESS)) {
15018                 strcpy(str, "PCI Express");
15019                 return str;
15020         } else if (tg3_flag(tp, PCIX_MODE)) {
15021                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15022
15023                 strcpy(str, "PCIX:");
15024
15025                 if ((clock_ctrl == 7) ||
15026                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15027                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15028                         strcat(str, "133MHz");
15029                 else if (clock_ctrl == 0)
15030                         strcat(str, "33MHz");
15031                 else if (clock_ctrl == 2)
15032                         strcat(str, "50MHz");
15033                 else if (clock_ctrl == 4)
15034                         strcat(str, "66MHz");
15035                 else if (clock_ctrl == 6)
15036                         strcat(str, "100MHz");
15037         } else {
15038                 strcpy(str, "PCI:");
15039                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15040                         strcat(str, "66MHz");
15041                 else
15042                         strcat(str, "33MHz");
15043         }
15044         if (tg3_flag(tp, PCI_32BIT))
15045                 strcat(str, ":32-bit");
15046         else
15047                 strcat(str, ":64-bit");
15048         return str;
15049 }
15050
15051 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15052 {
15053         struct pci_dev *peer;
15054         unsigned int func, devnr = tp->pdev->devfn & ~7;
15055
15056         for (func = 0; func < 8; func++) {
15057                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15058                 if (peer && peer != tp->pdev)
15059                         break;
15060                 pci_dev_put(peer);
15061         }
15062         /* 5704 can be configured in single-port mode, set peer to
15063          * tp->pdev in that case.
15064          */
15065         if (!peer) {
15066                 peer = tp->pdev;
15067                 return peer;
15068         }
15069
15070         /*
15071          * We don't need to keep the refcount elevated; there's no way
15072          * to remove one half of this device without removing the other
15073          */
15074         pci_dev_put(peer);
15075
15076         return peer;
15077 }
15078
15079 static void __devinit tg3_init_coal(struct tg3 *tp)
15080 {
15081         struct ethtool_coalesce *ec = &tp->coal;
15082
15083         memset(ec, 0, sizeof(*ec));
15084         ec->cmd = ETHTOOL_GCOALESCE;
15085         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15086         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15087         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15088         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15089         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15090         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15091         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15092         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15093         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15094
15095         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15096                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15097                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15098                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15099                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15100                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15101         }
15102
15103         if (tg3_flag(tp, 5705_PLUS)) {
15104                 ec->rx_coalesce_usecs_irq = 0;
15105                 ec->tx_coalesce_usecs_irq = 0;
15106                 ec->stats_block_coalesce_usecs = 0;
15107         }
15108 }
15109
15110 static const struct net_device_ops tg3_netdev_ops = {
15111         .ndo_open               = tg3_open,
15112         .ndo_stop               = tg3_close,
15113         .ndo_start_xmit         = tg3_start_xmit,
15114         .ndo_get_stats64        = tg3_get_stats64,
15115         .ndo_validate_addr      = eth_validate_addr,
15116         .ndo_set_multicast_list = tg3_set_rx_mode,
15117         .ndo_set_mac_address    = tg3_set_mac_addr,
15118         .ndo_do_ioctl           = tg3_ioctl,
15119         .ndo_tx_timeout         = tg3_tx_timeout,
15120         .ndo_change_mtu         = tg3_change_mtu,
15121         .ndo_fix_features       = tg3_fix_features,
15122         .ndo_set_features       = tg3_set_features,
15123 #ifdef CONFIG_NET_POLL_CONTROLLER
15124         .ndo_poll_controller    = tg3_poll_controller,
15125 #endif
15126 };
15127
15128 static int __devinit tg3_init_one(struct pci_dev *pdev,
15129                                   const struct pci_device_id *ent)
15130 {
15131         struct net_device *dev;
15132         struct tg3 *tp;
15133         int i, err, pm_cap;
15134         u32 sndmbx, rcvmbx, intmbx;
15135         char str[40];
15136         u64 dma_mask, persist_dma_mask;
15137         u32 features = 0;
15138
15139         printk_once(KERN_INFO "%s\n", version);
15140
15141         err = pci_enable_device(pdev);
15142         if (err) {
15143                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15144                 return err;
15145         }
15146
15147         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15148         if (err) {
15149                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15150                 goto err_out_disable_pdev;
15151         }
15152
15153         pci_set_master(pdev);
15154
15155         /* Find power-management capability. */
15156         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15157         if (pm_cap == 0) {
15158                 dev_err(&pdev->dev,
15159                         "Cannot find Power Management capability, aborting\n");
15160                 err = -EIO;
15161                 goto err_out_free_res;
15162         }
15163
15164         err = pci_set_power_state(pdev, PCI_D0);
15165         if (err) {
15166                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15167                 goto err_out_free_res;
15168         }
15169
15170         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15171         if (!dev) {
15172                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15173                 err = -ENOMEM;
15174                 goto err_out_power_down;
15175         }
15176
15177         SET_NETDEV_DEV(dev, &pdev->dev);
15178
15179         tp = netdev_priv(dev);
15180         tp->pdev = pdev;
15181         tp->dev = dev;
15182         tp->pm_cap = pm_cap;
15183         tp->rx_mode = TG3_DEF_RX_MODE;
15184         tp->tx_mode = TG3_DEF_TX_MODE;
15185
15186         if (tg3_debug > 0)
15187                 tp->msg_enable = tg3_debug;
15188         else
15189                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15190
15191         /* The word/byte swap controls here control register access byte
15192          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15193          * setting below.
15194          */
15195         tp->misc_host_ctrl =
15196                 MISC_HOST_CTRL_MASK_PCI_INT |
15197                 MISC_HOST_CTRL_WORD_SWAP |
15198                 MISC_HOST_CTRL_INDIR_ACCESS |
15199                 MISC_HOST_CTRL_PCISTATE_RW;
15200
15201         /* The NONFRM (non-frame) byte/word swap controls take effect
15202          * on descriptor entries, anything which isn't packet data.
15203          *
15204          * The StrongARM chips on the board (one for tx, one for rx)
15205          * are running in big-endian mode.
15206          */
15207         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15208                         GRC_MODE_WSWAP_NONFRM_DATA);
15209 #ifdef __BIG_ENDIAN
15210         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15211 #endif
15212         spin_lock_init(&tp->lock);
15213         spin_lock_init(&tp->indirect_lock);
15214         INIT_WORK(&tp->reset_task, tg3_reset_task);
15215
15216         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15217         if (!tp->regs) {
15218                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15219                 err = -ENOMEM;
15220                 goto err_out_free_dev;
15221         }
15222
15223         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15224             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15225             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15226             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15227             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15228             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15229             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15230             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15231                 tg3_flag_set(tp, ENABLE_APE);
15232                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15233                 if (!tp->aperegs) {
15234                         dev_err(&pdev->dev,
15235                                 "Cannot map APE registers, aborting\n");
15236                         err = -ENOMEM;
15237                         goto err_out_iounmap;
15238                 }
15239         }
15240
15241         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15242         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15243
15244         dev->ethtool_ops = &tg3_ethtool_ops;
15245         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15246         dev->netdev_ops = &tg3_netdev_ops;
15247         dev->irq = pdev->irq;
15248
15249         err = tg3_get_invariants(tp);
15250         if (err) {
15251                 dev_err(&pdev->dev,
15252                         "Problem fetching invariants of chip, aborting\n");
15253                 goto err_out_apeunmap;
15254         }
15255
15256         /* The EPB bridge inside 5714, 5715, and 5780 and any
15257          * device behind the EPB cannot support DMA addresses > 40-bit.
15258          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15259          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15260          * do DMA address check in tg3_start_xmit().
15261          */
15262         if (tg3_flag(tp, IS_5788))
15263                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15264         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15265                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15266 #ifdef CONFIG_HIGHMEM
15267                 dma_mask = DMA_BIT_MASK(64);
15268 #endif
15269         } else
15270                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15271
15272         /* Configure DMA attributes. */
15273         if (dma_mask > DMA_BIT_MASK(32)) {
15274                 err = pci_set_dma_mask(pdev, dma_mask);
15275                 if (!err) {
15276                         features |= NETIF_F_HIGHDMA;
15277                         err = pci_set_consistent_dma_mask(pdev,
15278                                                           persist_dma_mask);
15279                         if (err < 0) {
15280                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15281                                         "DMA for consistent allocations\n");
15282                                 goto err_out_apeunmap;
15283                         }
15284                 }
15285         }
15286         if (err || dma_mask == DMA_BIT_MASK(32)) {
15287                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15288                 if (err) {
15289                         dev_err(&pdev->dev,
15290                                 "No usable DMA configuration, aborting\n");
15291                         goto err_out_apeunmap;
15292                 }
15293         }
15294
15295         tg3_init_bufmgr_config(tp);
15296
15297         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15298
15299         /* 5700 B0 chips do not support checksumming correctly due
15300          * to hardware bugs.
15301          */
15302         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15303                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15304
15305                 if (tg3_flag(tp, 5755_PLUS))
15306                         features |= NETIF_F_IPV6_CSUM;
15307         }
15308
15309         /* TSO is on by default on chips that support hardware TSO.
15310          * Firmware TSO on older chips gives lower performance, so it
15311          * is off by default, but can be enabled using ethtool.
15312          */
15313         if ((tg3_flag(tp, HW_TSO_1) ||
15314              tg3_flag(tp, HW_TSO_2) ||
15315              tg3_flag(tp, HW_TSO_3)) &&
15316             (features & NETIF_F_IP_CSUM))
15317                 features |= NETIF_F_TSO;
15318         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15319                 if (features & NETIF_F_IPV6_CSUM)
15320                         features |= NETIF_F_TSO6;
15321                 if (tg3_flag(tp, HW_TSO_3) ||
15322                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15323                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15324                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15325                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15326                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15327                         features |= NETIF_F_TSO_ECN;
15328         }
15329
15330         dev->features |= features;
15331         dev->vlan_features |= features;
15332
15333         /*
15334          * Add loopback capability only for a subset of devices that support
15335          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15336          * loopback for the remaining devices.
15337          */
15338         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15339             !tg3_flag(tp, CPMU_PRESENT))
15340                 /* Add the loopback capability */
15341                 features |= NETIF_F_LOOPBACK;
15342
15343         dev->hw_features |= features;
15344
15345         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15346             !tg3_flag(tp, TSO_CAPABLE) &&
15347             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15348                 tg3_flag_set(tp, MAX_RXPEND_64);
15349                 tp->rx_pending = 63;
15350         }
15351
15352         err = tg3_get_device_address(tp);
15353         if (err) {
15354                 dev_err(&pdev->dev,
15355                         "Could not obtain valid ethernet address, aborting\n");
15356                 goto err_out_apeunmap;
15357         }
15358
15359         /*
15360          * Reset chip in case UNDI or EFI driver did not shutdown
15361          * DMA self test will enable WDMAC and we'll see (spurious)
15362          * pending DMA on the PCI bus at that point.
15363          */
15364         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15365             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15366                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15367                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15368         }
15369
15370         err = tg3_test_dma(tp);
15371         if (err) {
15372                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15373                 goto err_out_apeunmap;
15374         }
15375
15376         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15377         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15378         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15379         for (i = 0; i < tp->irq_max; i++) {
15380                 struct tg3_napi *tnapi = &tp->napi[i];
15381
15382                 tnapi->tp = tp;
15383                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15384
15385                 tnapi->int_mbox = intmbx;
15386                 if (i < 4)
15387                         intmbx += 0x8;
15388                 else
15389                         intmbx += 0x4;
15390
15391                 tnapi->consmbox = rcvmbx;
15392                 tnapi->prodmbox = sndmbx;
15393
15394                 if (i)
15395                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15396                 else
15397                         tnapi->coal_now = HOSTCC_MODE_NOW;
15398
15399                 if (!tg3_flag(tp, SUPPORT_MSIX))
15400                         break;
15401
15402                 /*
15403                  * If we support MSIX, we'll be using RSS.  If we're using
15404                  * RSS, the first vector only handles link interrupts and the
15405                  * remaining vectors handle rx and tx interrupts.  Reuse the
15406                  * mailbox values for the next iteration.  The values we setup
15407                  * above are still useful for the single vectored mode.
15408                  */
15409                 if (!i)
15410                         continue;
15411
15412                 rcvmbx += 0x8;
15413
15414                 if (sndmbx & 0x4)
15415                         sndmbx -= 0x4;
15416                 else
15417                         sndmbx += 0xc;
15418         }
15419
15420         tg3_init_coal(tp);
15421
15422         pci_set_drvdata(pdev, dev);
15423
15424         if (tg3_flag(tp, 5717_PLUS)) {
15425                 /* Resume a low-power mode */
15426                 tg3_frob_aux_power(tp, false);
15427         }
15428
15429         err = register_netdev(dev);
15430         if (err) {
15431                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15432                 goto err_out_apeunmap;
15433         }
15434
15435         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15436                     tp->board_part_number,
15437                     tp->pci_chip_rev_id,
15438                     tg3_bus_string(tp, str),
15439                     dev->dev_addr);
15440
15441         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15442                 struct phy_device *phydev;
15443                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15444                 netdev_info(dev,
15445                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15446                             phydev->drv->name, dev_name(&phydev->dev));
15447         } else {
15448                 char *ethtype;
15449
15450                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15451                         ethtype = "10/100Base-TX";
15452                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15453                         ethtype = "1000Base-SX";
15454                 else
15455                         ethtype = "10/100/1000Base-T";
15456
15457                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15458                             "(WireSpeed[%d], EEE[%d])\n",
15459                             tg3_phy_string(tp), ethtype,
15460                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15461                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15462         }
15463
15464         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15465                     (dev->features & NETIF_F_RXCSUM) != 0,
15466                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15467                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15468                     tg3_flag(tp, ENABLE_ASF) != 0,
15469                     tg3_flag(tp, TSO_CAPABLE) != 0);
15470         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15471                     tp->dma_rwctrl,
15472                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15473                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15474
15475         pci_save_state(pdev);
15476
15477         return 0;
15478
15479 err_out_apeunmap:
15480         if (tp->aperegs) {
15481                 iounmap(tp->aperegs);
15482                 tp->aperegs = NULL;
15483         }
15484
15485 err_out_iounmap:
15486         if (tp->regs) {
15487                 iounmap(tp->regs);
15488                 tp->regs = NULL;
15489         }
15490
15491 err_out_free_dev:
15492         free_netdev(dev);
15493
15494 err_out_power_down:
15495         pci_set_power_state(pdev, PCI_D3hot);
15496
15497 err_out_free_res:
15498         pci_release_regions(pdev);
15499
15500 err_out_disable_pdev:
15501         pci_disable_device(pdev);
15502         pci_set_drvdata(pdev, NULL);
15503         return err;
15504 }
15505
15506 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15507 {
15508         struct net_device *dev = pci_get_drvdata(pdev);
15509
15510         if (dev) {
15511                 struct tg3 *tp = netdev_priv(dev);
15512
15513                 if (tp->fw)
15514                         release_firmware(tp->fw);
15515
15516                 cancel_work_sync(&tp->reset_task);
15517
15518                 if (!tg3_flag(tp, USE_PHYLIB)) {
15519                         tg3_phy_fini(tp);
15520                         tg3_mdio_fini(tp);
15521                 }
15522
15523                 unregister_netdev(dev);
15524                 if (tp->aperegs) {
15525                         iounmap(tp->aperegs);
15526                         tp->aperegs = NULL;
15527                 }
15528                 if (tp->regs) {
15529                         iounmap(tp->regs);
15530                         tp->regs = NULL;
15531                 }
15532                 free_netdev(dev);
15533                 pci_release_regions(pdev);
15534                 pci_disable_device(pdev);
15535                 pci_set_drvdata(pdev, NULL);
15536         }
15537 }
15538
15539 #ifdef CONFIG_PM_SLEEP
15540 static int tg3_suspend(struct device *device)
15541 {
15542         struct pci_dev *pdev = to_pci_dev(device);
15543         struct net_device *dev = pci_get_drvdata(pdev);
15544         struct tg3 *tp = netdev_priv(dev);
15545         int err;
15546
15547         if (!netif_running(dev))
15548                 return 0;
15549
15550         flush_work_sync(&tp->reset_task);
15551         tg3_phy_stop(tp);
15552         tg3_netif_stop(tp);
15553
15554         del_timer_sync(&tp->timer);
15555
15556         tg3_full_lock(tp, 1);
15557         tg3_disable_ints(tp);
15558         tg3_full_unlock(tp);
15559
15560         netif_device_detach(dev);
15561
15562         tg3_full_lock(tp, 0);
15563         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15564         tg3_flag_clear(tp, INIT_COMPLETE);
15565         tg3_full_unlock(tp);
15566
15567         err = tg3_power_down_prepare(tp);
15568         if (err) {
15569                 int err2;
15570
15571                 tg3_full_lock(tp, 0);
15572
15573                 tg3_flag_set(tp, INIT_COMPLETE);
15574                 err2 = tg3_restart_hw(tp, 1);
15575                 if (err2)
15576                         goto out;
15577
15578                 tp->timer.expires = jiffies + tp->timer_offset;
15579                 add_timer(&tp->timer);
15580
15581                 netif_device_attach(dev);
15582                 tg3_netif_start(tp);
15583
15584 out:
15585                 tg3_full_unlock(tp);
15586
15587                 if (!err2)
15588                         tg3_phy_start(tp);
15589         }
15590
15591         return err;
15592 }
15593
15594 static int tg3_resume(struct device *device)
15595 {
15596         struct pci_dev *pdev = to_pci_dev(device);
15597         struct net_device *dev = pci_get_drvdata(pdev);
15598         struct tg3 *tp = netdev_priv(dev);
15599         int err;
15600
15601         if (!netif_running(dev))
15602                 return 0;
15603
15604         netif_device_attach(dev);
15605
15606         tg3_full_lock(tp, 0);
15607
15608         tg3_flag_set(tp, INIT_COMPLETE);
15609         err = tg3_restart_hw(tp, 1);
15610         if (err)
15611                 goto out;
15612
15613         tp->timer.expires = jiffies + tp->timer_offset;
15614         add_timer(&tp->timer);
15615
15616         tg3_netif_start(tp);
15617
15618 out:
15619         tg3_full_unlock(tp);
15620
15621         if (!err)
15622                 tg3_phy_start(tp);
15623
15624         return err;
15625 }
15626
15627 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15628 #define TG3_PM_OPS (&tg3_pm_ops)
15629
15630 #else
15631
15632 #define TG3_PM_OPS NULL
15633
15634 #endif /* CONFIG_PM_SLEEP */
15635
15636 /**
15637  * tg3_io_error_detected - called when PCI error is detected
15638  * @pdev: Pointer to PCI device
15639  * @state: The current pci connection state
15640  *
15641  * This function is called after a PCI bus error affecting
15642  * this device has been detected.
15643  */
15644 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15645                                               pci_channel_state_t state)
15646 {
15647         struct net_device *netdev = pci_get_drvdata(pdev);
15648         struct tg3 *tp = netdev_priv(netdev);
15649         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15650
15651         netdev_info(netdev, "PCI I/O error detected\n");
15652
15653         rtnl_lock();
15654
15655         if (!netif_running(netdev))
15656                 goto done;
15657
15658         tg3_phy_stop(tp);
15659
15660         tg3_netif_stop(tp);
15661
15662         del_timer_sync(&tp->timer);
15663         tg3_flag_clear(tp, RESTART_TIMER);
15664
15665         /* Want to make sure that the reset task doesn't run */
15666         cancel_work_sync(&tp->reset_task);
15667         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15668         tg3_flag_clear(tp, RESTART_TIMER);
15669
15670         netif_device_detach(netdev);
15671
15672         /* Clean up software state, even if MMIO is blocked */
15673         tg3_full_lock(tp, 0);
15674         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15675         tg3_full_unlock(tp);
15676
15677 done:
15678         if (state == pci_channel_io_perm_failure)
15679                 err = PCI_ERS_RESULT_DISCONNECT;
15680         else
15681                 pci_disable_device(pdev);
15682
15683         rtnl_unlock();
15684
15685         return err;
15686 }
15687
15688 /**
15689  * tg3_io_slot_reset - called after the pci bus has been reset.
15690  * @pdev: Pointer to PCI device
15691  *
15692  * Restart the card from scratch, as if from a cold-boot.
15693  * At this point, the card has exprienced a hard reset,
15694  * followed by fixups by BIOS, and has its config space
15695  * set up identically to what it was at cold boot.
15696  */
15697 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15698 {
15699         struct net_device *netdev = pci_get_drvdata(pdev);
15700         struct tg3 *tp = netdev_priv(netdev);
15701         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15702         int err;
15703
15704         rtnl_lock();
15705
15706         if (pci_enable_device(pdev)) {
15707                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15708                 goto done;
15709         }
15710
15711         pci_set_master(pdev);
15712         pci_restore_state(pdev);
15713         pci_save_state(pdev);
15714
15715         if (!netif_running(netdev)) {
15716                 rc = PCI_ERS_RESULT_RECOVERED;
15717                 goto done;
15718         }
15719
15720         err = tg3_power_up(tp);
15721         if (err)
15722                 goto done;
15723
15724         rc = PCI_ERS_RESULT_RECOVERED;
15725
15726 done:
15727         rtnl_unlock();
15728
15729         return rc;
15730 }
15731
15732 /**
15733  * tg3_io_resume - called when traffic can start flowing again.
15734  * @pdev: Pointer to PCI device
15735  *
15736  * This callback is called when the error recovery driver tells
15737  * us that its OK to resume normal operation.
15738  */
15739 static void tg3_io_resume(struct pci_dev *pdev)
15740 {
15741         struct net_device *netdev = pci_get_drvdata(pdev);
15742         struct tg3 *tp = netdev_priv(netdev);
15743         int err;
15744
15745         rtnl_lock();
15746
15747         if (!netif_running(netdev))
15748                 goto done;
15749
15750         tg3_full_lock(tp, 0);
15751         tg3_flag_set(tp, INIT_COMPLETE);
15752         err = tg3_restart_hw(tp, 1);
15753         tg3_full_unlock(tp);
15754         if (err) {
15755                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15756                 goto done;
15757         }
15758
15759         netif_device_attach(netdev);
15760
15761         tp->timer.expires = jiffies + tp->timer_offset;
15762         add_timer(&tp->timer);
15763
15764         tg3_netif_start(tp);
15765
15766         tg3_phy_start(tp);
15767
15768 done:
15769         rtnl_unlock();
15770 }
15771
15772 static struct pci_error_handlers tg3_err_handler = {
15773         .error_detected = tg3_io_error_detected,
15774         .slot_reset     = tg3_io_slot_reset,
15775         .resume         = tg3_io_resume
15776 };
15777
15778 static struct pci_driver tg3_driver = {
15779         .name           = DRV_MODULE_NAME,
15780         .id_table       = tg3_pci_tbl,
15781         .probe          = tg3_init_one,
15782         .remove         = __devexit_p(tg3_remove_one),
15783         .err_handler    = &tg3_err_handler,
15784         .driver.pm      = TG3_PM_OPS,
15785 };
15786
15787 static int __init tg3_init(void)
15788 {
15789         return pci_register_driver(&tg3_driver);
15790 }
15791
15792 static void __exit tg3_cleanup(void)
15793 {
15794         pci_unregister_driver(&tg3_driver);
15795 }
15796
15797 module_init(tg3_init);
15798 module_exit(tg3_cleanup);