tg3: Add support for link flap avoidance
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     130
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "February 14, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 udelay(10);
748         }
749
750         if (status != bit) {
751                 /* Revoke the lock request. */
752                 tg3_ape_write32(tp, gnt + off, bit);
753                 ret = -EBUSY;
754         }
755
756         return ret;
757 }
758
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761         u32 gnt, bit;
762
763         if (!tg3_flag(tp, ENABLE_APE))
764                 return;
765
766         switch (locknum) {
767         case TG3_APE_LOCK_GPIO:
768                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
769                         return;
770         case TG3_APE_LOCK_GRC:
771         case TG3_APE_LOCK_MEM:
772                 if (!tp->pci_fn)
773                         bit = APE_LOCK_GRANT_DRIVER;
774                 else
775                         bit = 1 << tp->pci_fn;
776                 break;
777         case TG3_APE_LOCK_PHY0:
778         case TG3_APE_LOCK_PHY1:
779         case TG3_APE_LOCK_PHY2:
780         case TG3_APE_LOCK_PHY3:
781                 bit = APE_LOCK_GRANT_DRIVER;
782                 break;
783         default:
784                 return;
785         }
786
787         if (tg3_asic_rev(tp) == ASIC_REV_5761)
788                 gnt = TG3_APE_LOCK_GRANT;
789         else
790                 gnt = TG3_APE_PER_LOCK_GRANT;
791
792         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797         u32 apedata;
798
799         while (timeout_us) {
800                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801                         return -EBUSY;
802
803                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805                         break;
806
807                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808
809                 udelay(10);
810                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811         }
812
813         return timeout_us ? 0 : -EBUSY;
814 }
815
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818         u32 i, apedata;
819
820         for (i = 0; i < timeout_us / 10; i++) {
821                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822
823                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824                         break;
825
826                 udelay(10);
827         }
828
829         return i == timeout_us / 10;
830 }
831
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833                                    u32 len)
834 {
835         int err;
836         u32 i, bufoff, msgoff, maxlen, apedata;
837
838         if (!tg3_flag(tp, APE_HAS_NCSI))
839                 return 0;
840
841         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842         if (apedata != APE_SEG_SIG_MAGIC)
843                 return -ENODEV;
844
845         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846         if (!(apedata & APE_FW_STATUS_READY))
847                 return -EAGAIN;
848
849         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850                  TG3_APE_SHMEM_BASE;
851         msgoff = bufoff + 2 * sizeof(u32);
852         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853
854         while (len) {
855                 u32 length;
856
857                 /* Cap xfer sizes to scratchpad limits. */
858                 length = (len > maxlen) ? maxlen : len;
859                 len -= length;
860
861                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862                 if (!(apedata & APE_FW_STATUS_READY))
863                         return -EAGAIN;
864
865                 /* Wait for up to 1 msec for APE to service previous event. */
866                 err = tg3_ape_event_lock(tp, 1000);
867                 if (err)
868                         return err;
869
870                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871                           APE_EVENT_STATUS_SCRTCHPD_READ |
872                           APE_EVENT_STATUS_EVENT_PENDING;
873                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874
875                 tg3_ape_write32(tp, bufoff, base_off);
876                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877
878                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880
881                 base_off += length;
882
883                 if (tg3_ape_wait_for_event(tp, 30000))
884                         return -EAGAIN;
885
886                 for (i = 0; length; i += 4, length -= 4) {
887                         u32 val = tg3_ape_read32(tp, msgoff + i);
888                         memcpy(data, &val, sizeof(u32));
889                         data++;
890                 }
891         }
892
893         return 0;
894 }
895
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898         int err;
899         u32 apedata;
900
901         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902         if (apedata != APE_SEG_SIG_MAGIC)
903                 return -EAGAIN;
904
905         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906         if (!(apedata & APE_FW_STATUS_READY))
907                 return -EAGAIN;
908
909         /* Wait for up to 1 millisecond for APE to service previous event. */
910         err = tg3_ape_event_lock(tp, 1000);
911         if (err)
912                 return err;
913
914         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915                         event | APE_EVENT_STATUS_EVENT_PENDING);
916
917         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919
920         return 0;
921 }
922
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925         u32 event;
926         u32 apedata;
927
928         if (!tg3_flag(tp, ENABLE_APE))
929                 return;
930
931         switch (kind) {
932         case RESET_KIND_INIT:
933                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934                                 APE_HOST_SEG_SIG_MAGIC);
935                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936                                 APE_HOST_SEG_LEN_MAGIC);
937                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942                                 APE_HOST_BEHAV_NO_PHYLOCK);
943                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944                                     TG3_APE_HOST_DRVR_STATE_START);
945
946                 event = APE_EVENT_STATUS_STATE_START;
947                 break;
948         case RESET_KIND_SHUTDOWN:
949                 /* With the interface we are currently using,
950                  * APE does not track driver state.  Wiping
951                  * out the HOST SEGMENT SIGNATURE forces
952                  * the APE to assume OS absent status.
953                  */
954                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955
956                 if (device_may_wakeup(&tp->pdev->dev) &&
957                     tg3_flag(tp, WOL_ENABLE)) {
958                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959                                             TG3_APE_HOST_WOL_SPEED_AUTO);
960                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961                 } else
962                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963
964                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965
966                 event = APE_EVENT_STATUS_STATE_UNLOAD;
967                 break;
968         case RESET_KIND_SUSPEND:
969                 event = APE_EVENT_STATUS_STATE_SUSPEND;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317         if (enable)
1318
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 udelay(8);
1639         }
1640 }
1641
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645         u32 reg, val;
1646
1647         val = 0;
1648         if (!tg3_readphy(tp, MII_BMCR, &reg))
1649                 val = reg << 16;
1650         if (!tg3_readphy(tp, MII_BMSR, &reg))
1651                 val |= (reg & 0xffff);
1652         *data++ = val;
1653
1654         val = 0;
1655         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656                 val = reg << 16;
1657         if (!tg3_readphy(tp, MII_LPA, &reg))
1658                 val |= (reg & 0xffff);
1659         *data++ = val;
1660
1661         val = 0;
1662         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664                         val = reg << 16;
1665                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666                         val |= (reg & 0xffff);
1667         }
1668         *data++ = val;
1669
1670         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671                 val = reg << 16;
1672         else
1673                 val = 0;
1674         *data++ = val;
1675 }
1676
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680         u32 data[4];
1681
1682         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683                 return;
1684
1685         tg3_phy_gather_ump_data(tp, data);
1686
1687         tg3_wait_for_event_ack(tp);
1688
1689         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695
1696         tg3_generate_fw_event(tp);
1697 }
1698
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703                 /* Wait for RX cpu to ACK the previous event. */
1704                 tg3_wait_for_event_ack(tp);
1705
1706                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707
1708                 tg3_generate_fw_event(tp);
1709
1710                 /* Wait for RX cpu to ACK this event. */
1711                 tg3_wait_for_event_ack(tp);
1712         }
1713 }
1714
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720
1721         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722                 switch (kind) {
1723                 case RESET_KIND_INIT:
1724                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725                                       DRV_STATE_START);
1726                         break;
1727
1728                 case RESET_KIND_SHUTDOWN:
1729                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730                                       DRV_STATE_UNLOAD);
1731                         break;
1732
1733                 case RESET_KIND_SUSPEND:
1734                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735                                       DRV_STATE_SUSPEND);
1736                         break;
1737
1738                 default:
1739                         break;
1740                 }
1741         }
1742
1743         if (kind == RESET_KIND_INIT ||
1744             kind == RESET_KIND_SUSPEND)
1745                 tg3_ape_driver_state_change(tp, kind);
1746 }
1747
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752                 switch (kind) {
1753                 case RESET_KIND_INIT:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_START_DONE);
1756                         break;
1757
1758                 case RESET_KIND_SHUTDOWN:
1759                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760                                       DRV_STATE_UNLOAD_DONE);
1761                         break;
1762
1763                 default:
1764                         break;
1765                 }
1766         }
1767
1768         if (kind == RESET_KIND_SHUTDOWN)
1769                 tg3_ape_driver_state_change(tp, kind);
1770 }
1771
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775         if (tg3_flag(tp, ENABLE_ASF)) {
1776                 switch (kind) {
1777                 case RESET_KIND_INIT:
1778                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779                                       DRV_STATE_START);
1780                         break;
1781
1782                 case RESET_KIND_SHUTDOWN:
1783                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784                                       DRV_STATE_UNLOAD);
1785                         break;
1786
1787                 case RESET_KIND_SUSPEND:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_SUSPEND);
1790                         break;
1791
1792                 default:
1793                         break;
1794                 }
1795         }
1796 }
1797
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800         int i;
1801         u32 val;
1802
1803         if (tg3_flag(tp, IS_SSB_CORE)) {
1804                 /* We don't use firmware. */
1805                 return 0;
1806         }
1807
1808         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809                 /* Wait up to 20ms for init done. */
1810                 for (i = 0; i < 200; i++) {
1811                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812                                 return 0;
1813                         udelay(100);
1814                 }
1815                 return -ENODEV;
1816         }
1817
1818         /* Wait for firmware initialization to complete. */
1819         for (i = 0; i < 100000; i++) {
1820                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822                         break;
1823                 udelay(10);
1824         }
1825
1826         /* Chip might not be fitted with firmware.  Some Sun onboard
1827          * parts are configured like that.  So don't signal the timeout
1828          * of the above loop as an error, but do report the lack of
1829          * running firmware once.
1830          */
1831         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1833
1834                 netdev_info(tp->dev, "No firmware running\n");
1835         }
1836
1837         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838                 /* The 57765 A0 needs a little more
1839                  * time to do some important work.
1840                  */
1841                 mdelay(10);
1842         }
1843
1844         return 0;
1845 }
1846
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849         if (!netif_carrier_ok(tp->dev)) {
1850                 netif_info(tp, link, tp->dev, "Link is down\n");
1851                 tg3_ump_link_report(tp);
1852         } else if (netif_msg_link(tp)) {
1853                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854                             (tp->link_config.active_speed == SPEED_1000 ?
1855                              1000 :
1856                              (tp->link_config.active_speed == SPEED_100 ?
1857                               100 : 10)),
1858                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1859                              "full" : "half"));
1860
1861                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863                             "on" : "off",
1864                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865                             "on" : "off");
1866
1867                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868                         netdev_info(tp->dev, "EEE is %s\n",
1869                                     tp->setlpicnt ? "enabled" : "disabled");
1870
1871                 tg3_ump_link_report(tp);
1872         }
1873
1874         tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1878 {
1879         u16 miireg;
1880
1881         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882                 miireg = ADVERTISE_1000XPAUSE;
1883         else if (flow_ctrl & FLOW_CTRL_TX)
1884                 miireg = ADVERTISE_1000XPSE_ASYM;
1885         else if (flow_ctrl & FLOW_CTRL_RX)
1886                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1887         else
1888                 miireg = 0;
1889
1890         return miireg;
1891 }
1892
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1894 {
1895         u8 cap = 0;
1896
1897         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900                 if (lcladv & ADVERTISE_1000XPAUSE)
1901                         cap = FLOW_CTRL_RX;
1902                 if (rmtadv & ADVERTISE_1000XPAUSE)
1903                         cap = FLOW_CTRL_TX;
1904         }
1905
1906         return cap;
1907 }
1908
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1910 {
1911         u8 autoneg;
1912         u8 flowctrl = 0;
1913         u32 old_rx_mode = tp->rx_mode;
1914         u32 old_tx_mode = tp->tx_mode;
1915
1916         if (tg3_flag(tp, USE_PHYLIB))
1917                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1918         else
1919                 autoneg = tp->link_config.autoneg;
1920
1921         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1924                 else
1925                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1926         } else
1927                 flowctrl = tp->link_config.flowctrl;
1928
1929         tp->link_config.active_flowctrl = flowctrl;
1930
1931         if (flowctrl & FLOW_CTRL_RX)
1932                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1933         else
1934                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1935
1936         if (old_rx_mode != tp->rx_mode)
1937                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1938
1939         if (flowctrl & FLOW_CTRL_TX)
1940                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1941         else
1942                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1943
1944         if (old_tx_mode != tp->tx_mode)
1945                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1946 }
1947
1948 static void tg3_adjust_link(struct net_device *dev)
1949 {
1950         u8 oldflowctrl, linkmesg = 0;
1951         u32 mac_mode, lcl_adv, rmt_adv;
1952         struct tg3 *tp = netdev_priv(dev);
1953         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1954
1955         spin_lock_bh(&tp->lock);
1956
1957         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958                                     MAC_MODE_HALF_DUPLEX);
1959
1960         oldflowctrl = tp->link_config.active_flowctrl;
1961
1962         if (phydev->link) {
1963                 lcl_adv = 0;
1964                 rmt_adv = 0;
1965
1966                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1968                 else if (phydev->speed == SPEED_1000 ||
1969                          tg3_asic_rev(tp) != ASIC_REV_5785)
1970                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1971                 else
1972                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1973
1974                 if (phydev->duplex == DUPLEX_HALF)
1975                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1976                 else {
1977                         lcl_adv = mii_advertise_flowctrl(
1978                                   tp->link_config.flowctrl);
1979
1980                         if (phydev->pause)
1981                                 rmt_adv = LPA_PAUSE_CAP;
1982                         if (phydev->asym_pause)
1983                                 rmt_adv |= LPA_PAUSE_ASYM;
1984                 }
1985
1986                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1987         } else
1988                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989
1990         if (mac_mode != tp->mac_mode) {
1991                 tp->mac_mode = mac_mode;
1992                 tw32_f(MAC_MODE, tp->mac_mode);
1993                 udelay(40);
1994         }
1995
1996         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997                 if (phydev->speed == SPEED_10)
1998                         tw32(MAC_MI_STAT,
1999                              MAC_MI_STAT_10MBPS_MODE |
2000                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001                 else
2002                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2003         }
2004
2005         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006                 tw32(MAC_TX_LENGTHS,
2007                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008                       (6 << TX_LENGTHS_IPG_SHIFT) |
2009                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2010         else
2011                 tw32(MAC_TX_LENGTHS,
2012                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013                       (6 << TX_LENGTHS_IPG_SHIFT) |
2014                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2015
2016         if (phydev->link != tp->old_link ||
2017             phydev->speed != tp->link_config.active_speed ||
2018             phydev->duplex != tp->link_config.active_duplex ||
2019             oldflowctrl != tp->link_config.active_flowctrl)
2020                 linkmesg = 1;
2021
2022         tp->old_link = phydev->link;
2023         tp->link_config.active_speed = phydev->speed;
2024         tp->link_config.active_duplex = phydev->duplex;
2025
2026         spin_unlock_bh(&tp->lock);
2027
2028         if (linkmesg)
2029                 tg3_link_report(tp);
2030 }
2031
2032 static int tg3_phy_init(struct tg3 *tp)
2033 {
2034         struct phy_device *phydev;
2035
2036         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2037                 return 0;
2038
2039         /* Bring the PHY back to a known state. */
2040         tg3_bmcr_reset(tp);
2041
2042         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2043
2044         /* Attach the MAC to the PHY. */
2045         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046                              tg3_adjust_link, phydev->interface);
2047         if (IS_ERR(phydev)) {
2048                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049                 return PTR_ERR(phydev);
2050         }
2051
2052         /* Mask with MAC supported features. */
2053         switch (phydev->interface) {
2054         case PHY_INTERFACE_MODE_GMII:
2055         case PHY_INTERFACE_MODE_RGMII:
2056                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057                         phydev->supported &= (PHY_GBIT_FEATURES |
2058                                               SUPPORTED_Pause |
2059                                               SUPPORTED_Asym_Pause);
2060                         break;
2061                 }
2062                 /* fallthru */
2063         case PHY_INTERFACE_MODE_MII:
2064                 phydev->supported &= (PHY_BASIC_FEATURES |
2065                                       SUPPORTED_Pause |
2066                                       SUPPORTED_Asym_Pause);
2067                 break;
2068         default:
2069                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2070                 return -EINVAL;
2071         }
2072
2073         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2074
2075         phydev->advertising = phydev->supported;
2076
2077         return 0;
2078 }
2079
2080 static void tg3_phy_start(struct tg3 *tp)
2081 {
2082         struct phy_device *phydev;
2083
2084         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2085                 return;
2086
2087         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2088
2089         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091                 phydev->speed = tp->link_config.speed;
2092                 phydev->duplex = tp->link_config.duplex;
2093                 phydev->autoneg = tp->link_config.autoneg;
2094                 phydev->advertising = tp->link_config.advertising;
2095         }
2096
2097         phy_start(phydev);
2098
2099         phy_start_aneg(phydev);
2100 }
2101
2102 static void tg3_phy_stop(struct tg3 *tp)
2103 {
2104         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2105                 return;
2106
2107         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2108 }
2109
2110 static void tg3_phy_fini(struct tg3 *tp)
2111 {
2112         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2115         }
2116 }
2117
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2119 {
2120         int err;
2121         u32 val;
2122
2123         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2124                 return 0;
2125
2126         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127                 /* Cannot do read-modify-write on 5401 */
2128                 err = tg3_phy_auxctl_write(tp,
2129                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2131                                            0x4c20);
2132                 goto done;
2133         }
2134
2135         err = tg3_phy_auxctl_read(tp,
2136                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2137         if (err)
2138                 return err;
2139
2140         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141         err = tg3_phy_auxctl_write(tp,
2142                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2143
2144 done:
2145         return err;
2146 }
2147
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2149 {
2150         u32 phytest;
2151
2152         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2153                 u32 phy;
2154
2155                 tg3_writephy(tp, MII_TG3_FET_TEST,
2156                              phytest | MII_TG3_FET_SHADOW_EN);
2157                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2158                         if (enable)
2159                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160                         else
2161                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2163                 }
2164                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2165         }
2166 }
2167
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2169 {
2170         u32 reg;
2171
2172         if (!tg3_flag(tp, 5705_PLUS) ||
2173             (tg3_flag(tp, 5717_PLUS) &&
2174              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2175                 return;
2176
2177         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178                 tg3_phy_fet_toggle_apd(tp, enable);
2179                 return;
2180         }
2181
2182         reg = MII_TG3_MISC_SHDW_WREN |
2183               MII_TG3_MISC_SHDW_SCR5_SEL |
2184               MII_TG3_MISC_SHDW_SCR5_LPED |
2185               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186               MII_TG3_MISC_SHDW_SCR5_SDTL |
2187               MII_TG3_MISC_SHDW_SCR5_C125OE;
2188         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2190
2191         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2192
2193
2194         reg = MII_TG3_MISC_SHDW_WREN |
2195               MII_TG3_MISC_SHDW_APD_SEL |
2196               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2197         if (enable)
2198                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2199
2200         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2201 }
2202
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2204 {
2205         u32 phy;
2206
2207         if (!tg3_flag(tp, 5705_PLUS) ||
2208             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2209                 return;
2210
2211         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2212                 u32 ephy;
2213
2214                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2216
2217                         tg3_writephy(tp, MII_TG3_FET_TEST,
2218                                      ephy | MII_TG3_FET_SHADOW_EN);
2219                         if (!tg3_readphy(tp, reg, &phy)) {
2220                                 if (enable)
2221                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222                                 else
2223                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224                                 tg3_writephy(tp, reg, phy);
2225                         }
2226                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2227                 }
2228         } else {
2229                 int ret;
2230
2231                 ret = tg3_phy_auxctl_read(tp,
2232                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2233                 if (!ret) {
2234                         if (enable)
2235                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2236                         else
2237                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238                         tg3_phy_auxctl_write(tp,
2239                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2240                 }
2241         }
2242 }
2243
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2245 {
2246         int ret;
2247         u32 val;
2248
2249         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2250                 return;
2251
2252         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2253         if (!ret)
2254                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2256 }
2257
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2259 {
2260         u32 otp, phy;
2261
2262         if (!tp->phy_otp)
2263                 return;
2264
2265         otp = tp->phy_otp;
2266
2267         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2268                 return;
2269
2270         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2273
2274         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2277
2278         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2281
2282         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2284
2285         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2287
2288         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2291
2292         tg3_phy_toggle_auxctl_smdsp(tp, false);
2293 }
2294
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2296 {
2297         u32 val;
2298
2299         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2300                 return;
2301
2302         tp->setlpicnt = 0;
2303
2304         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305             current_link_up == 1 &&
2306             tp->link_config.active_duplex == DUPLEX_FULL &&
2307             (tp->link_config.active_speed == SPEED_100 ||
2308              tp->link_config.active_speed == SPEED_1000)) {
2309                 u32 eeectl;
2310
2311                 if (tp->link_config.active_speed == SPEED_1000)
2312                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2313                 else
2314                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2315
2316                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2317
2318                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319                                   TG3_CL45_D7_EEERES_STAT, &val);
2320
2321                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2323                         tp->setlpicnt = 2;
2324         }
2325
2326         if (!tp->setlpicnt) {
2327                 if (current_link_up == 1 &&
2328                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2331                 }
2332
2333                 val = tr32(TG3_CPMU_EEE_MODE);
2334                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2335         }
2336 }
2337
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2339 {
2340         u32 val;
2341
2342         if (tp->link_config.active_speed == SPEED_1000 &&
2343             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345              tg3_flag(tp, 57765_CLASS)) &&
2346             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347                 val = MII_TG3_DSP_TAP26_ALNOKO |
2348                       MII_TG3_DSP_TAP26_RMRXSTO;
2349                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2351         }
2352
2353         val = tr32(TG3_CPMU_EEE_MODE);
2354         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2355 }
2356
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2358 {
2359         int limit = 100;
2360
2361         while (limit--) {
2362                 u32 tmp32;
2363
2364                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365                         if ((tmp32 & 0x1000) == 0)
2366                                 break;
2367                 }
2368         }
2369         if (limit < 0)
2370                 return -EBUSY;
2371
2372         return 0;
2373 }
2374
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2376 {
2377         static const u32 test_pat[4][6] = {
2378         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382         };
2383         int chan;
2384
2385         for (chan = 0; chan < 4; chan++) {
2386                 int i;
2387
2388                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389                              (chan * 0x2000) | 0x0200);
2390                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2391
2392                 for (i = 0; i < 6; i++)
2393                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2394                                      test_pat[chan][i]);
2395
2396                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397                 if (tg3_wait_macro_done(tp)) {
2398                         *resetp = 1;
2399                         return -EBUSY;
2400                 }
2401
2402                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403                              (chan * 0x2000) | 0x0200);
2404                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405                 if (tg3_wait_macro_done(tp)) {
2406                         *resetp = 1;
2407                         return -EBUSY;
2408                 }
2409
2410                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411                 if (tg3_wait_macro_done(tp)) {
2412                         *resetp = 1;
2413                         return -EBUSY;
2414                 }
2415
2416                 for (i = 0; i < 6; i += 2) {
2417                         u32 low, high;
2418
2419                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421                             tg3_wait_macro_done(tp)) {
2422                                 *resetp = 1;
2423                                 return -EBUSY;
2424                         }
2425                         low &= 0x7fff;
2426                         high &= 0x000f;
2427                         if (low != test_pat[chan][i] ||
2428                             high != test_pat[chan][i+1]) {
2429                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2432
2433                                 return -EBUSY;
2434                         }
2435                 }
2436         }
2437
2438         return 0;
2439 }
2440
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2442 {
2443         int chan;
2444
2445         for (chan = 0; chan < 4; chan++) {
2446                 int i;
2447
2448                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449                              (chan * 0x2000) | 0x0200);
2450                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451                 for (i = 0; i < 6; i++)
2452                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454                 if (tg3_wait_macro_done(tp))
2455                         return -EBUSY;
2456         }
2457
2458         return 0;
2459 }
2460
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2462 {
2463         u32 reg32, phy9_orig;
2464         int retries, do_phy_reset, err;
2465
2466         retries = 10;
2467         do_phy_reset = 1;
2468         do {
2469                 if (do_phy_reset) {
2470                         err = tg3_bmcr_reset(tp);
2471                         if (err)
2472                                 return err;
2473                         do_phy_reset = 0;
2474                 }
2475
2476                 /* Disable transmitter and interrupt.  */
2477                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2478                         continue;
2479
2480                 reg32 |= 0x3000;
2481                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2482
2483                 /* Set full-duplex, 1000 mbps.  */
2484                 tg3_writephy(tp, MII_BMCR,
2485                              BMCR_FULLDPLX | BMCR_SPEED1000);
2486
2487                 /* Set to master mode.  */
2488                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2489                         continue;
2490
2491                 tg3_writephy(tp, MII_CTRL1000,
2492                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2493
2494                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2495                 if (err)
2496                         return err;
2497
2498                 /* Block the PHY control access.  */
2499                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2500
2501                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2502                 if (!err)
2503                         break;
2504         } while (--retries);
2505
2506         err = tg3_phy_reset_chanpat(tp);
2507         if (err)
2508                 return err;
2509
2510         tg3_phydsp_write(tp, 0x8005, 0x0000);
2511
2512         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2514
2515         tg3_phy_toggle_auxctl_smdsp(tp, false);
2516
2517         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2518
2519         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2520                 reg32 &= ~0x3000;
2521                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2522         } else if (!err)
2523                 err = -EBUSY;
2524
2525         return err;
2526 }
2527
2528 static void tg3_carrier_off(struct tg3 *tp)
2529 {
2530         netif_carrier_off(tp->dev);
2531         tp->link_up = false;
2532 }
2533
2534 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2535 {
2536         if (tg3_flag(tp, ENABLE_ASF))
2537                 netdev_warn(tp->dev,
2538                             "Management side-band traffic will be interrupted during phy settings change\n");
2539 }
2540
2541 /* This will reset the tigon3 PHY if there is no valid
2542  * link unless the FORCE argument is non-zero.
2543  */
2544 static int tg3_phy_reset(struct tg3 *tp)
2545 {
2546         u32 val, cpmuctrl;
2547         int err;
2548
2549         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2550                 val = tr32(GRC_MISC_CFG);
2551                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2552                 udelay(40);
2553         }
2554         err  = tg3_readphy(tp, MII_BMSR, &val);
2555         err |= tg3_readphy(tp, MII_BMSR, &val);
2556         if (err != 0)
2557                 return -EBUSY;
2558
2559         if (netif_running(tp->dev) && tp->link_up) {
2560                 netif_carrier_off(tp->dev);
2561                 tg3_link_report(tp);
2562         }
2563
2564         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2565             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2566             tg3_asic_rev(tp) == ASIC_REV_5705) {
2567                 err = tg3_phy_reset_5703_4_5(tp);
2568                 if (err)
2569                         return err;
2570                 goto out;
2571         }
2572
2573         cpmuctrl = 0;
2574         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2575             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2576                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2577                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2578                         tw32(TG3_CPMU_CTRL,
2579                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2580         }
2581
2582         err = tg3_bmcr_reset(tp);
2583         if (err)
2584                 return err;
2585
2586         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2587                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2588                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2589
2590                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2591         }
2592
2593         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2594             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2595                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2596                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2597                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2598                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2599                         udelay(40);
2600                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2601                 }
2602         }
2603
2604         if (tg3_flag(tp, 5717_PLUS) &&
2605             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2606                 return 0;
2607
2608         tg3_phy_apply_otp(tp);
2609
2610         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2611                 tg3_phy_toggle_apd(tp, true);
2612         else
2613                 tg3_phy_toggle_apd(tp, false);
2614
2615 out:
2616         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2617             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2618                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2619                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2620                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2621         }
2622
2623         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2624                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2625                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2626         }
2627
2628         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2629                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2631                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2632                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2633                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2634                 }
2635         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2636                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2637                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2638                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2639                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2640                                 tg3_writephy(tp, MII_TG3_TEST1,
2641                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2642                         } else
2643                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2644
2645                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2646                 }
2647         }
2648
2649         /* Set Extended packet length bit (bit 14) on all chips that */
2650         /* support jumbo frames */
2651         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2652                 /* Cannot do read-modify-write on 5401 */
2653                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2654         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2655                 /* Set bit 14 with read-modify-write to preserve other bits */
2656                 err = tg3_phy_auxctl_read(tp,
2657                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2658                 if (!err)
2659                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2660                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2661         }
2662
2663         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2664          * jumbo frames transmission.
2665          */
2666         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2667                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2668                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2669                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2670         }
2671
2672         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2673                 /* adjust output voltage */
2674                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2675         }
2676
2677         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2678                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2679
2680         tg3_phy_toggle_automdix(tp, 1);
2681         tg3_phy_set_wirespeed(tp);
2682         return 0;
2683 }
2684
2685 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2686 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2687 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2688                                           TG3_GPIO_MSG_NEED_VAUX)
2689 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2690         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2691          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2692          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2693          (TG3_GPIO_MSG_DRVR_PRES << 12))
2694
2695 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2696         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2697          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2698          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2699          (TG3_GPIO_MSG_NEED_VAUX << 12))
2700
2701 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2702 {
2703         u32 status, shift;
2704
2705         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2706             tg3_asic_rev(tp) == ASIC_REV_5719)
2707                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2708         else
2709                 status = tr32(TG3_CPMU_DRV_STATUS);
2710
2711         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2712         status &= ~(TG3_GPIO_MSG_MASK << shift);
2713         status |= (newstat << shift);
2714
2715         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2716             tg3_asic_rev(tp) == ASIC_REV_5719)
2717                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2718         else
2719                 tw32(TG3_CPMU_DRV_STATUS, status);
2720
2721         return status >> TG3_APE_GPIO_MSG_SHIFT;
2722 }
2723
2724 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2725 {
2726         if (!tg3_flag(tp, IS_NIC))
2727                 return 0;
2728
2729         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2730             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2731             tg3_asic_rev(tp) == ASIC_REV_5720) {
2732                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2733                         return -EIO;
2734
2735                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2736
2737                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2738                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2739
2740                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2741         } else {
2742                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2743                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2744         }
2745
2746         return 0;
2747 }
2748
2749 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2750 {
2751         u32 grc_local_ctrl;
2752
2753         if (!tg3_flag(tp, IS_NIC) ||
2754             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2755             tg3_asic_rev(tp) == ASIC_REV_5701)
2756                 return;
2757
2758         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2759
2760         tw32_wait_f(GRC_LOCAL_CTRL,
2761                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2763
2764         tw32_wait_f(GRC_LOCAL_CTRL,
2765                     grc_local_ctrl,
2766                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2767
2768         tw32_wait_f(GRC_LOCAL_CTRL,
2769                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2770                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2771 }
2772
2773 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2774 {
2775         if (!tg3_flag(tp, IS_NIC))
2776                 return;
2777
2778         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2779             tg3_asic_rev(tp) == ASIC_REV_5701) {
2780                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2781                             (GRC_LCLCTRL_GPIO_OE0 |
2782                              GRC_LCLCTRL_GPIO_OE1 |
2783                              GRC_LCLCTRL_GPIO_OE2 |
2784                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2785                              GRC_LCLCTRL_GPIO_OUTPUT1),
2786                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2787         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2788                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2789                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2790                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2791                                      GRC_LCLCTRL_GPIO_OE1 |
2792                                      GRC_LCLCTRL_GPIO_OE2 |
2793                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2794                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2795                                      tp->grc_local_ctrl;
2796                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2798
2799                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2800                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2802
2803                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2804                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2805                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2806         } else {
2807                 u32 no_gpio2;
2808                 u32 grc_local_ctrl = 0;
2809
2810                 /* Workaround to prevent overdrawing Amps. */
2811                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2812                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2813                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2814                                     grc_local_ctrl,
2815                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2816                 }
2817
2818                 /* On 5753 and variants, GPIO2 cannot be used. */
2819                 no_gpio2 = tp->nic_sram_data_cfg &
2820                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2821
2822                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2823                                   GRC_LCLCTRL_GPIO_OE1 |
2824                                   GRC_LCLCTRL_GPIO_OE2 |
2825                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2826                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2827                 if (no_gpio2) {
2828                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2829                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2830                 }
2831                 tw32_wait_f(GRC_LOCAL_CTRL,
2832                             tp->grc_local_ctrl | grc_local_ctrl,
2833                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2834
2835                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2836
2837                 tw32_wait_f(GRC_LOCAL_CTRL,
2838                             tp->grc_local_ctrl | grc_local_ctrl,
2839                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2840
2841                 if (!no_gpio2) {
2842                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2843                         tw32_wait_f(GRC_LOCAL_CTRL,
2844                                     tp->grc_local_ctrl | grc_local_ctrl,
2845                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2846                 }
2847         }
2848 }
2849
2850 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2851 {
2852         u32 msg = 0;
2853
2854         /* Serialize power state transitions */
2855         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2856                 return;
2857
2858         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2859                 msg = TG3_GPIO_MSG_NEED_VAUX;
2860
2861         msg = tg3_set_function_status(tp, msg);
2862
2863         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2864                 goto done;
2865
2866         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2867                 tg3_pwrsrc_switch_to_vaux(tp);
2868         else
2869                 tg3_pwrsrc_die_with_vmain(tp);
2870
2871 done:
2872         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2873 }
2874
2875 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2876 {
2877         bool need_vaux = false;
2878
2879         /* The GPIOs do something completely different on 57765. */
2880         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2881                 return;
2882
2883         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2884             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2885             tg3_asic_rev(tp) == ASIC_REV_5720) {
2886                 tg3_frob_aux_power_5717(tp, include_wol ?
2887                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2888                 return;
2889         }
2890
2891         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2892                 struct net_device *dev_peer;
2893
2894                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2895
2896                 /* remove_one() may have been run on the peer. */
2897                 if (dev_peer) {
2898                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2899
2900                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2901                                 return;
2902
2903                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2904                             tg3_flag(tp_peer, ENABLE_ASF))
2905                                 need_vaux = true;
2906                 }
2907         }
2908
2909         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2910             tg3_flag(tp, ENABLE_ASF))
2911                 need_vaux = true;
2912
2913         if (need_vaux)
2914                 tg3_pwrsrc_switch_to_vaux(tp);
2915         else
2916                 tg3_pwrsrc_die_with_vmain(tp);
2917 }
2918
2919 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2920 {
2921         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2922                 return 1;
2923         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2924                 if (speed != SPEED_10)
2925                         return 1;
2926         } else if (speed == SPEED_10)
2927                 return 1;
2928
2929         return 0;
2930 }
2931
2932 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2933 {
2934         u32 val;
2935
2936         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2937                 return;
2938
2939         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2940                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2941                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2942                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2943
2944                         sg_dig_ctrl |=
2945                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2946                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2947                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2948                 }
2949                 return;
2950         }
2951
2952         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2953                 tg3_bmcr_reset(tp);
2954                 val = tr32(GRC_MISC_CFG);
2955                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2956                 udelay(40);
2957                 return;
2958         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2959                 u32 phytest;
2960                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2961                         u32 phy;
2962
2963                         tg3_writephy(tp, MII_ADVERTISE, 0);
2964                         tg3_writephy(tp, MII_BMCR,
2965                                      BMCR_ANENABLE | BMCR_ANRESTART);
2966
2967                         tg3_writephy(tp, MII_TG3_FET_TEST,
2968                                      phytest | MII_TG3_FET_SHADOW_EN);
2969                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2970                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2971                                 tg3_writephy(tp,
2972                                              MII_TG3_FET_SHDW_AUXMODE4,
2973                                              phy);
2974                         }
2975                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2976                 }
2977                 return;
2978         } else if (do_low_power) {
2979                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2980                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2981
2982                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2983                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2984                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2985                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2986         }
2987
2988         /* The PHY should not be powered down on some chips because
2989          * of bugs.
2990          */
2991         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2992             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2993             (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2994              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2995             (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2996              !tp->pci_fn))
2997                 return;
2998
2999         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3000             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3001                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3002                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3003                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3004                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3005         }
3006
3007         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3008 }
3009
3010 /* tp->lock is held. */
3011 static int tg3_nvram_lock(struct tg3 *tp)
3012 {
3013         if (tg3_flag(tp, NVRAM)) {
3014                 int i;
3015
3016                 if (tp->nvram_lock_cnt == 0) {
3017                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3018                         for (i = 0; i < 8000; i++) {
3019                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3020                                         break;
3021                                 udelay(20);
3022                         }
3023                         if (i == 8000) {
3024                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3025                                 return -ENODEV;
3026                         }
3027                 }
3028                 tp->nvram_lock_cnt++;
3029         }
3030         return 0;
3031 }
3032
3033 /* tp->lock is held. */
3034 static void tg3_nvram_unlock(struct tg3 *tp)
3035 {
3036         if (tg3_flag(tp, NVRAM)) {
3037                 if (tp->nvram_lock_cnt > 0)
3038                         tp->nvram_lock_cnt--;
3039                 if (tp->nvram_lock_cnt == 0)
3040                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3041         }
3042 }
3043
3044 /* tp->lock is held. */
3045 static void tg3_enable_nvram_access(struct tg3 *tp)
3046 {
3047         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3048                 u32 nvaccess = tr32(NVRAM_ACCESS);
3049
3050                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3051         }
3052 }
3053
3054 /* tp->lock is held. */
3055 static void tg3_disable_nvram_access(struct tg3 *tp)
3056 {
3057         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3058                 u32 nvaccess = tr32(NVRAM_ACCESS);
3059
3060                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3061         }
3062 }
3063
3064 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3065                                         u32 offset, u32 *val)
3066 {
3067         u32 tmp;
3068         int i;
3069
3070         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3071                 return -EINVAL;
3072
3073         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3074                                         EEPROM_ADDR_DEVID_MASK |
3075                                         EEPROM_ADDR_READ);
3076         tw32(GRC_EEPROM_ADDR,
3077              tmp |
3078              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3079              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3080               EEPROM_ADDR_ADDR_MASK) |
3081              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3082
3083         for (i = 0; i < 1000; i++) {
3084                 tmp = tr32(GRC_EEPROM_ADDR);
3085
3086                 if (tmp & EEPROM_ADDR_COMPLETE)
3087                         break;
3088                 msleep(1);
3089         }
3090         if (!(tmp & EEPROM_ADDR_COMPLETE))
3091                 return -EBUSY;
3092
3093         tmp = tr32(GRC_EEPROM_DATA);
3094
3095         /*
3096          * The data will always be opposite the native endian
3097          * format.  Perform a blind byteswap to compensate.
3098          */
3099         *val = swab32(tmp);
3100
3101         return 0;
3102 }
3103
3104 #define NVRAM_CMD_TIMEOUT 10000
3105
3106 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3107 {
3108         int i;
3109
3110         tw32(NVRAM_CMD, nvram_cmd);
3111         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3112                 udelay(10);
3113                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3114                         udelay(10);
3115                         break;
3116                 }
3117         }
3118
3119         if (i == NVRAM_CMD_TIMEOUT)
3120                 return -EBUSY;
3121
3122         return 0;
3123 }
3124
3125 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3126 {
3127         if (tg3_flag(tp, NVRAM) &&
3128             tg3_flag(tp, NVRAM_BUFFERED) &&
3129             tg3_flag(tp, FLASH) &&
3130             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3131             (tp->nvram_jedecnum == JEDEC_ATMEL))
3132
3133                 addr = ((addr / tp->nvram_pagesize) <<
3134                         ATMEL_AT45DB0X1B_PAGE_POS) +
3135                        (addr % tp->nvram_pagesize);
3136
3137         return addr;
3138 }
3139
3140 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3141 {
3142         if (tg3_flag(tp, NVRAM) &&
3143             tg3_flag(tp, NVRAM_BUFFERED) &&
3144             tg3_flag(tp, FLASH) &&
3145             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3146             (tp->nvram_jedecnum == JEDEC_ATMEL))
3147
3148                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3149                         tp->nvram_pagesize) +
3150                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3151
3152         return addr;
3153 }
3154
3155 /* NOTE: Data read in from NVRAM is byteswapped according to
3156  * the byteswapping settings for all other register accesses.
3157  * tg3 devices are BE devices, so on a BE machine, the data
3158  * returned will be exactly as it is seen in NVRAM.  On a LE
3159  * machine, the 32-bit value will be byteswapped.
3160  */
3161 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3162 {
3163         int ret;
3164
3165         if (!tg3_flag(tp, NVRAM))
3166                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3167
3168         offset = tg3_nvram_phys_addr(tp, offset);
3169
3170         if (offset > NVRAM_ADDR_MSK)
3171                 return -EINVAL;
3172
3173         ret = tg3_nvram_lock(tp);
3174         if (ret)
3175                 return ret;
3176
3177         tg3_enable_nvram_access(tp);
3178
3179         tw32(NVRAM_ADDR, offset);
3180         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3181                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3182
3183         if (ret == 0)
3184                 *val = tr32(NVRAM_RDDATA);
3185
3186         tg3_disable_nvram_access(tp);
3187
3188         tg3_nvram_unlock(tp);
3189
3190         return ret;
3191 }
3192
3193 /* Ensures NVRAM data is in bytestream format. */
3194 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3195 {
3196         u32 v;
3197         int res = tg3_nvram_read(tp, offset, &v);
3198         if (!res)
3199                 *val = cpu_to_be32(v);
3200         return res;
3201 }
3202
3203 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3204                                     u32 offset, u32 len, u8 *buf)
3205 {
3206         int i, j, rc = 0;
3207         u32 val;
3208
3209         for (i = 0; i < len; i += 4) {
3210                 u32 addr;
3211                 __be32 data;
3212
3213                 addr = offset + i;
3214
3215                 memcpy(&data, buf + i, 4);
3216
3217                 /*
3218                  * The SEEPROM interface expects the data to always be opposite
3219                  * the native endian format.  We accomplish this by reversing
3220                  * all the operations that would have been performed on the
3221                  * data from a call to tg3_nvram_read_be32().
3222                  */
3223                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3224
3225                 val = tr32(GRC_EEPROM_ADDR);
3226                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3227
3228                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3229                         EEPROM_ADDR_READ);
3230                 tw32(GRC_EEPROM_ADDR, val |
3231                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3232                         (addr & EEPROM_ADDR_ADDR_MASK) |
3233                         EEPROM_ADDR_START |
3234                         EEPROM_ADDR_WRITE);
3235
3236                 for (j = 0; j < 1000; j++) {
3237                         val = tr32(GRC_EEPROM_ADDR);
3238
3239                         if (val & EEPROM_ADDR_COMPLETE)
3240                                 break;
3241                         msleep(1);
3242                 }
3243                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3244                         rc = -EBUSY;
3245                         break;
3246                 }
3247         }
3248
3249         return rc;
3250 }
3251
3252 /* offset and length are dword aligned */
3253 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3254                 u8 *buf)
3255 {
3256         int ret = 0;
3257         u32 pagesize = tp->nvram_pagesize;
3258         u32 pagemask = pagesize - 1;
3259         u32 nvram_cmd;
3260         u8 *tmp;
3261
3262         tmp = kmalloc(pagesize, GFP_KERNEL);
3263         if (tmp == NULL)
3264                 return -ENOMEM;
3265
3266         while (len) {
3267                 int j;
3268                 u32 phy_addr, page_off, size;
3269
3270                 phy_addr = offset & ~pagemask;
3271
3272                 for (j = 0; j < pagesize; j += 4) {
3273                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3274                                                   (__be32 *) (tmp + j));
3275                         if (ret)
3276                                 break;
3277                 }
3278                 if (ret)
3279                         break;
3280
3281                 page_off = offset & pagemask;
3282                 size = pagesize;
3283                 if (len < size)
3284                         size = len;
3285
3286                 len -= size;
3287
3288                 memcpy(tmp + page_off, buf, size);
3289
3290                 offset = offset + (pagesize - page_off);
3291
3292                 tg3_enable_nvram_access(tp);
3293
3294                 /*
3295                  * Before we can erase the flash page, we need
3296                  * to issue a special "write enable" command.
3297                  */
3298                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3299
3300                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3301                         break;
3302
3303                 /* Erase the target page */
3304                 tw32(NVRAM_ADDR, phy_addr);
3305
3306                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3307                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3308
3309                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3310                         break;
3311
3312                 /* Issue another write enable to start the write. */
3313                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3314
3315                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3316                         break;
3317
3318                 for (j = 0; j < pagesize; j += 4) {
3319                         __be32 data;
3320
3321                         data = *((__be32 *) (tmp + j));
3322
3323                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3324
3325                         tw32(NVRAM_ADDR, phy_addr + j);
3326
3327                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3328                                 NVRAM_CMD_WR;
3329
3330                         if (j == 0)
3331                                 nvram_cmd |= NVRAM_CMD_FIRST;
3332                         else if (j == (pagesize - 4))
3333                                 nvram_cmd |= NVRAM_CMD_LAST;
3334
3335                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3336                         if (ret)
3337                                 break;
3338                 }
3339                 if (ret)
3340                         break;
3341         }
3342
3343         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3344         tg3_nvram_exec_cmd(tp, nvram_cmd);
3345
3346         kfree(tmp);
3347
3348         return ret;
3349 }
3350
3351 /* offset and length are dword aligned */
3352 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3353                 u8 *buf)
3354 {
3355         int i, ret = 0;
3356
3357         for (i = 0; i < len; i += 4, offset += 4) {
3358                 u32 page_off, phy_addr, nvram_cmd;
3359                 __be32 data;
3360
3361                 memcpy(&data, buf + i, 4);
3362                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3363
3364                 page_off = offset % tp->nvram_pagesize;
3365
3366                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3367
3368                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3369
3370                 if (page_off == 0 || i == 0)
3371                         nvram_cmd |= NVRAM_CMD_FIRST;
3372                 if (page_off == (tp->nvram_pagesize - 4))
3373                         nvram_cmd |= NVRAM_CMD_LAST;
3374
3375                 if (i == (len - 4))
3376                         nvram_cmd |= NVRAM_CMD_LAST;
3377
3378                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3379                     !tg3_flag(tp, FLASH) ||
3380                     !tg3_flag(tp, 57765_PLUS))
3381                         tw32(NVRAM_ADDR, phy_addr);
3382
3383                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3384                     !tg3_flag(tp, 5755_PLUS) &&
3385                     (tp->nvram_jedecnum == JEDEC_ST) &&
3386                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3387                         u32 cmd;
3388
3389                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3390                         ret = tg3_nvram_exec_cmd(tp, cmd);
3391                         if (ret)
3392                                 break;
3393                 }
3394                 if (!tg3_flag(tp, FLASH)) {
3395                         /* We always do complete word writes to eeprom. */
3396                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3397                 }
3398
3399                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3400                 if (ret)
3401                         break;
3402         }
3403         return ret;
3404 }
3405
3406 /* offset and length are dword aligned */
3407 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3408 {
3409         int ret;
3410
3411         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3412                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3413                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3414                 udelay(40);
3415         }
3416
3417         if (!tg3_flag(tp, NVRAM)) {
3418                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3419         } else {
3420                 u32 grc_mode;
3421
3422                 ret = tg3_nvram_lock(tp);
3423                 if (ret)
3424                         return ret;
3425
3426                 tg3_enable_nvram_access(tp);
3427                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3428                         tw32(NVRAM_WRITE1, 0x406);
3429
3430                 grc_mode = tr32(GRC_MODE);
3431                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3432
3433                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3434                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3435                                 buf);
3436                 } else {
3437                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3438                                 buf);
3439                 }
3440
3441                 grc_mode = tr32(GRC_MODE);
3442                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3443
3444                 tg3_disable_nvram_access(tp);
3445                 tg3_nvram_unlock(tp);
3446         }
3447
3448         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3449                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3450                 udelay(40);
3451         }
3452
3453         return ret;
3454 }
3455
3456 #define RX_CPU_SCRATCH_BASE     0x30000
3457 #define RX_CPU_SCRATCH_SIZE     0x04000
3458 #define TX_CPU_SCRATCH_BASE     0x34000
3459 #define TX_CPU_SCRATCH_SIZE     0x04000
3460
3461 /* tp->lock is held. */
3462 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3463 {
3464         int i;
3465         const int iters = 10000;
3466
3467         for (i = 0; i < iters; i++) {
3468                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3469                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3470                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3471                         break;
3472         }
3473
3474         return (i == iters) ? -EBUSY : 0;
3475 }
3476
3477 /* tp->lock is held. */
3478 static int tg3_rxcpu_pause(struct tg3 *tp)
3479 {
3480         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3481
3482         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3483         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3484         udelay(10);
3485
3486         return rc;
3487 }
3488
3489 /* tp->lock is held. */
3490 static int tg3_txcpu_pause(struct tg3 *tp)
3491 {
3492         return tg3_pause_cpu(tp, TX_CPU_BASE);
3493 }
3494
3495 /* tp->lock is held. */
3496 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3497 {
3498         tw32(cpu_base + CPU_STATE, 0xffffffff);
3499         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3500 }
3501
3502 /* tp->lock is held. */
3503 static void tg3_rxcpu_resume(struct tg3 *tp)
3504 {
3505         tg3_resume_cpu(tp, RX_CPU_BASE);
3506 }
3507
3508 /* tp->lock is held. */
3509 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3510 {
3511         int rc;
3512
3513         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3514
3515         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3516                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3517
3518                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3519                 return 0;
3520         }
3521         if (cpu_base == RX_CPU_BASE) {
3522                 rc = tg3_rxcpu_pause(tp);
3523         } else {
3524                 /*
3525                  * There is only an Rx CPU for the 5750 derivative in the
3526                  * BCM4785.
3527                  */
3528                 if (tg3_flag(tp, IS_SSB_CORE))
3529                         return 0;
3530
3531                 rc = tg3_txcpu_pause(tp);
3532         }
3533
3534         if (rc) {
3535                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3536                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3537                 return -ENODEV;
3538         }
3539
3540         /* Clear firmware's nvram arbitration. */
3541         if (tg3_flag(tp, NVRAM))
3542                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3543         return 0;
3544 }
3545
3546 static int tg3_fw_data_len(struct tg3 *tp,
3547                            const struct tg3_firmware_hdr *fw_hdr)
3548 {
3549         int fw_len;
3550
3551         /* Non fragmented firmware have one firmware header followed by a
3552          * contiguous chunk of data to be written. The length field in that
3553          * header is not the length of data to be written but the complete
3554          * length of the bss. The data length is determined based on
3555          * tp->fw->size minus headers.
3556          *
3557          * Fragmented firmware have a main header followed by multiple
3558          * fragments. Each fragment is identical to non fragmented firmware
3559          * with a firmware header followed by a contiguous chunk of data. In
3560          * the main header, the length field is unused and set to 0xffffffff.
3561          * In each fragment header the length is the entire size of that
3562          * fragment i.e. fragment data + header length. Data length is
3563          * therefore length field in the header minus TG3_FW_HDR_LEN.
3564          */
3565         if (tp->fw_len == 0xffffffff)
3566                 fw_len = be32_to_cpu(fw_hdr->len);
3567         else
3568                 fw_len = tp->fw->size;
3569
3570         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3571 }
3572
3573 /* tp->lock is held. */
3574 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3575                                  u32 cpu_scratch_base, int cpu_scratch_size,
3576                                  const struct tg3_firmware_hdr *fw_hdr)
3577 {
3578         int err, i;
3579         void (*write_op)(struct tg3 *, u32, u32);
3580         int total_len = tp->fw->size;
3581
3582         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3583                 netdev_err(tp->dev,
3584                            "%s: Trying to load TX cpu firmware which is 5705\n",
3585                            __func__);
3586                 return -EINVAL;
3587         }
3588
3589         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3590                 write_op = tg3_write_mem;
3591         else
3592                 write_op = tg3_write_indirect_reg32;
3593
3594         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3595                 /* It is possible that bootcode is still loading at this point.
3596                  * Get the nvram lock first before halting the cpu.
3597                  */
3598                 int lock_err = tg3_nvram_lock(tp);
3599                 err = tg3_halt_cpu(tp, cpu_base);
3600                 if (!lock_err)
3601                         tg3_nvram_unlock(tp);
3602                 if (err)
3603                         goto out;
3604
3605                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3606                         write_op(tp, cpu_scratch_base + i, 0);
3607                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3608                 tw32(cpu_base + CPU_MODE,
3609                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3610         } else {
3611                 /* Subtract additional main header for fragmented firmware and
3612                  * advance to the first fragment
3613                  */
3614                 total_len -= TG3_FW_HDR_LEN;
3615                 fw_hdr++;
3616         }
3617
3618         do {
3619                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3620                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3621                         write_op(tp, cpu_scratch_base +
3622                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3623                                      (i * sizeof(u32)),
3624                                  be32_to_cpu(fw_data[i]));
3625
3626                 total_len -= be32_to_cpu(fw_hdr->len);
3627
3628                 /* Advance to next fragment */
3629                 fw_hdr = (struct tg3_firmware_hdr *)
3630                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3631         } while (total_len > 0);
3632
3633         err = 0;
3634
3635 out:
3636         return err;
3637 }
3638
3639 /* tp->lock is held. */
3640 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3641 {
3642         int i;
3643         const int iters = 5;
3644
3645         tw32(cpu_base + CPU_STATE, 0xffffffff);
3646         tw32_f(cpu_base + CPU_PC, pc);
3647
3648         for (i = 0; i < iters; i++) {
3649                 if (tr32(cpu_base + CPU_PC) == pc)
3650                         break;
3651                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3652                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3653                 tw32_f(cpu_base + CPU_PC, pc);
3654                 udelay(1000);
3655         }
3656
3657         return (i == iters) ? -EBUSY : 0;
3658 }
3659
3660 /* tp->lock is held. */
3661 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3662 {
3663         const struct tg3_firmware_hdr *fw_hdr;
3664         int err;
3665
3666         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3667
3668         /* Firmware blob starts with version numbers, followed by
3669            start address and length. We are setting complete length.
3670            length = end_address_of_bss - start_address_of_text.
3671            Remainder is the blob to be loaded contiguously
3672            from start address. */
3673
3674         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3675                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3676                                     fw_hdr);
3677         if (err)
3678                 return err;
3679
3680         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3681                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3682                                     fw_hdr);
3683         if (err)
3684                 return err;
3685
3686         /* Now startup only the RX cpu. */
3687         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3688                                        be32_to_cpu(fw_hdr->base_addr));
3689         if (err) {
3690                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3691                            "should be %08x\n", __func__,
3692                            tr32(RX_CPU_BASE + CPU_PC),
3693                                 be32_to_cpu(fw_hdr->base_addr));
3694                 return -ENODEV;
3695         }
3696
3697         tg3_rxcpu_resume(tp);
3698
3699         return 0;
3700 }
3701
3702 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3703 {
3704         const int iters = 1000;
3705         int i;
3706         u32 val;
3707
3708         /* Wait for boot code to complete initialization and enter service
3709          * loop. It is then safe to download service patches
3710          */
3711         for (i = 0; i < iters; i++) {
3712                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3713                         break;
3714
3715                 udelay(10);
3716         }
3717
3718         if (i == iters) {
3719                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3720                 return -EBUSY;
3721         }
3722
3723         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3724         if (val & 0xff) {
3725                 netdev_warn(tp->dev,
3726                             "Other patches exist. Not downloading EEE patch\n");
3727                 return -EEXIST;
3728         }
3729
3730         return 0;
3731 }
3732
3733 /* tp->lock is held. */
3734 static void tg3_load_57766_firmware(struct tg3 *tp)
3735 {
3736         struct tg3_firmware_hdr *fw_hdr;
3737
3738         if (!tg3_flag(tp, NO_NVRAM))
3739                 return;
3740
3741         if (tg3_validate_rxcpu_state(tp))
3742                 return;
3743
3744         if (!tp->fw)
3745                 return;
3746
3747         /* This firmware blob has a different format than older firmware
3748          * releases as given below. The main difference is we have fragmented
3749          * data to be written to non-contiguous locations.
3750          *
3751          * In the beginning we have a firmware header identical to other
3752          * firmware which consists of version, base addr and length. The length
3753          * here is unused and set to 0xffffffff.
3754          *
3755          * This is followed by a series of firmware fragments which are
3756          * individually identical to previous firmware. i.e. they have the
3757          * firmware header and followed by data for that fragment. The version
3758          * field of the individual fragment header is unused.
3759          */
3760
3761         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3762         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3763                 return;
3764
3765         if (tg3_rxcpu_pause(tp))
3766                 return;
3767
3768         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3769         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3770
3771         tg3_rxcpu_resume(tp);
3772 }
3773
3774 /* tp->lock is held. */
3775 static int tg3_load_tso_firmware(struct tg3 *tp)
3776 {
3777         const struct tg3_firmware_hdr *fw_hdr;
3778         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3779         int err;
3780
3781         if (!tg3_flag(tp, FW_TSO))
3782                 return 0;
3783
3784         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3785
3786         /* Firmware blob starts with version numbers, followed by
3787            start address and length. We are setting complete length.
3788            length = end_address_of_bss - start_address_of_text.
3789            Remainder is the blob to be loaded contiguously
3790            from start address. */
3791
3792         cpu_scratch_size = tp->fw_len;
3793
3794         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3795                 cpu_base = RX_CPU_BASE;
3796                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3797         } else {
3798                 cpu_base = TX_CPU_BASE;
3799                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3800                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3801         }
3802
3803         err = tg3_load_firmware_cpu(tp, cpu_base,
3804                                     cpu_scratch_base, cpu_scratch_size,
3805                                     fw_hdr);
3806         if (err)
3807                 return err;
3808
3809         /* Now startup the cpu. */
3810         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3811                                        be32_to_cpu(fw_hdr->base_addr));
3812         if (err) {
3813                 netdev_err(tp->dev,
3814                            "%s fails to set CPU PC, is %08x should be %08x\n",
3815                            __func__, tr32(cpu_base + CPU_PC),
3816                            be32_to_cpu(fw_hdr->base_addr));
3817                 return -ENODEV;
3818         }
3819
3820         tg3_resume_cpu(tp, cpu_base);
3821         return 0;
3822 }
3823
3824
3825 /* tp->lock is held. */
3826 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3827 {
3828         u32 addr_high, addr_low;
3829         int i;
3830
3831         addr_high = ((tp->dev->dev_addr[0] << 8) |
3832                      tp->dev->dev_addr[1]);
3833         addr_low = ((tp->dev->dev_addr[2] << 24) |
3834                     (tp->dev->dev_addr[3] << 16) |
3835                     (tp->dev->dev_addr[4] <<  8) |
3836                     (tp->dev->dev_addr[5] <<  0));
3837         for (i = 0; i < 4; i++) {
3838                 if (i == 1 && skip_mac_1)
3839                         continue;
3840                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3841                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3842         }
3843
3844         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3845             tg3_asic_rev(tp) == ASIC_REV_5704) {
3846                 for (i = 0; i < 12; i++) {
3847                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3848                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3849                 }
3850         }
3851
3852         addr_high = (tp->dev->dev_addr[0] +
3853                      tp->dev->dev_addr[1] +
3854                      tp->dev->dev_addr[2] +
3855                      tp->dev->dev_addr[3] +
3856                      tp->dev->dev_addr[4] +
3857                      tp->dev->dev_addr[5]) &
3858                 TX_BACKOFF_SEED_MASK;
3859         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3860 }
3861
3862 static void tg3_enable_register_access(struct tg3 *tp)
3863 {
3864         /*
3865          * Make sure register accesses (indirect or otherwise) will function
3866          * correctly.
3867          */
3868         pci_write_config_dword(tp->pdev,
3869                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3870 }
3871
3872 static int tg3_power_up(struct tg3 *tp)
3873 {
3874         int err;
3875
3876         tg3_enable_register_access(tp);
3877
3878         err = pci_set_power_state(tp->pdev, PCI_D0);
3879         if (!err) {
3880                 /* Switch out of Vaux if it is a NIC */
3881                 tg3_pwrsrc_switch_to_vmain(tp);
3882         } else {
3883                 netdev_err(tp->dev, "Transition to D0 failed\n");
3884         }
3885
3886         return err;
3887 }
3888
3889 static int tg3_setup_phy(struct tg3 *, int);
3890
3891 static int tg3_power_down_prepare(struct tg3 *tp)
3892 {
3893         u32 misc_host_ctrl;
3894         bool device_should_wake, do_low_power;
3895
3896         tg3_enable_register_access(tp);
3897
3898         /* Restore the CLKREQ setting. */
3899         if (tg3_flag(tp, CLKREQ_BUG))
3900                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3901                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3902
3903         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3904         tw32(TG3PCI_MISC_HOST_CTRL,
3905              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3906
3907         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3908                              tg3_flag(tp, WOL_ENABLE);
3909
3910         if (tg3_flag(tp, USE_PHYLIB)) {
3911                 do_low_power = false;
3912                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3913                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3914                         struct phy_device *phydev;
3915                         u32 phyid, advertising;
3916
3917                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3918
3919                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3920
3921                         tp->link_config.speed = phydev->speed;
3922                         tp->link_config.duplex = phydev->duplex;
3923                         tp->link_config.autoneg = phydev->autoneg;
3924                         tp->link_config.advertising = phydev->advertising;
3925
3926                         advertising = ADVERTISED_TP |
3927                                       ADVERTISED_Pause |
3928                                       ADVERTISED_Autoneg |
3929                                       ADVERTISED_10baseT_Half;
3930
3931                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3932                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3933                                         advertising |=
3934                                                 ADVERTISED_100baseT_Half |
3935                                                 ADVERTISED_100baseT_Full |
3936                                                 ADVERTISED_10baseT_Full;
3937                                 else
3938                                         advertising |= ADVERTISED_10baseT_Full;
3939                         }
3940
3941                         phydev->advertising = advertising;
3942
3943                         phy_start_aneg(phydev);
3944
3945                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3946                         if (phyid != PHY_ID_BCMAC131) {
3947                                 phyid &= PHY_BCM_OUI_MASK;
3948                                 if (phyid == PHY_BCM_OUI_1 ||
3949                                     phyid == PHY_BCM_OUI_2 ||
3950                                     phyid == PHY_BCM_OUI_3)
3951                                         do_low_power = true;
3952                         }
3953                 }
3954         } else {
3955                 do_low_power = true;
3956
3957                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3958                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3959
3960                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3961                         tg3_setup_phy(tp, 0);
3962         }
3963
3964         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3965                 u32 val;
3966
3967                 val = tr32(GRC_VCPU_EXT_CTRL);
3968                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3969         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3970                 int i;
3971                 u32 val;
3972
3973                 for (i = 0; i < 200; i++) {
3974                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3975                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3976                                 break;
3977                         msleep(1);
3978                 }
3979         }
3980         if (tg3_flag(tp, WOL_CAP))
3981                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3982                                                      WOL_DRV_STATE_SHUTDOWN |
3983                                                      WOL_DRV_WOL |
3984                                                      WOL_SET_MAGIC_PKT);
3985
3986         if (device_should_wake) {
3987                 u32 mac_mode;
3988
3989                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3990                         if (do_low_power &&
3991                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3992                                 tg3_phy_auxctl_write(tp,
3993                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3994                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3995                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3996                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3997                                 udelay(40);
3998                         }
3999
4000                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4001                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4002                         else if (tp->phy_flags &
4003                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4004                                 if (tp->link_config.active_speed == SPEED_1000)
4005                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4006                                 else
4007                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4008                         } else
4009                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4010
4011                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4012                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4013                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4014                                              SPEED_100 : SPEED_10;
4015                                 if (tg3_5700_link_polarity(tp, speed))
4016                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4017                                 else
4018                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4019                         }
4020                 } else {
4021                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4022                 }
4023
4024                 if (!tg3_flag(tp, 5750_PLUS))
4025                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4026
4027                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4028                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4029                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4030                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4031
4032                 if (tg3_flag(tp, ENABLE_APE))
4033                         mac_mode |= MAC_MODE_APE_TX_EN |
4034                                     MAC_MODE_APE_RX_EN |
4035                                     MAC_MODE_TDE_ENABLE;
4036
4037                 tw32_f(MAC_MODE, mac_mode);
4038                 udelay(100);
4039
4040                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4041                 udelay(10);
4042         }
4043
4044         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4045             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4046              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4047                 u32 base_val;
4048
4049                 base_val = tp->pci_clock_ctrl;
4050                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4051                              CLOCK_CTRL_TXCLK_DISABLE);
4052
4053                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4054                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4055         } else if (tg3_flag(tp, 5780_CLASS) ||
4056                    tg3_flag(tp, CPMU_PRESENT) ||
4057                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4058                 /* do nothing */
4059         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4060                 u32 newbits1, newbits2;
4061
4062                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4063                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4064                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4065                                     CLOCK_CTRL_TXCLK_DISABLE |
4066                                     CLOCK_CTRL_ALTCLK);
4067                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4068                 } else if (tg3_flag(tp, 5705_PLUS)) {
4069                         newbits1 = CLOCK_CTRL_625_CORE;
4070                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4071                 } else {
4072                         newbits1 = CLOCK_CTRL_ALTCLK;
4073                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4074                 }
4075
4076                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4077                             40);
4078
4079                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4080                             40);
4081
4082                 if (!tg3_flag(tp, 5705_PLUS)) {
4083                         u32 newbits3;
4084
4085                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4086                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4087                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4088                                             CLOCK_CTRL_TXCLK_DISABLE |
4089                                             CLOCK_CTRL_44MHZ_CORE);
4090                         } else {
4091                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4092                         }
4093
4094                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4095                                     tp->pci_clock_ctrl | newbits3, 40);
4096                 }
4097         }
4098
4099         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4100                 tg3_power_down_phy(tp, do_low_power);
4101
4102         tg3_frob_aux_power(tp, true);
4103
4104         /* Workaround for unstable PLL clock */
4105         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4106             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4107              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4108                 u32 val = tr32(0x7d00);
4109
4110                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4111                 tw32(0x7d00, val);
4112                 if (!tg3_flag(tp, ENABLE_ASF)) {
4113                         int err;
4114
4115                         err = tg3_nvram_lock(tp);
4116                         tg3_halt_cpu(tp, RX_CPU_BASE);
4117                         if (!err)
4118                                 tg3_nvram_unlock(tp);
4119                 }
4120         }
4121
4122         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4123
4124         return 0;
4125 }
4126
4127 static void tg3_power_down(struct tg3 *tp)
4128 {
4129         tg3_power_down_prepare(tp);
4130
4131         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4132         pci_set_power_state(tp->pdev, PCI_D3hot);
4133 }
4134
4135 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4136 {
4137         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4138         case MII_TG3_AUX_STAT_10HALF:
4139                 *speed = SPEED_10;
4140                 *duplex = DUPLEX_HALF;
4141                 break;
4142
4143         case MII_TG3_AUX_STAT_10FULL:
4144                 *speed = SPEED_10;
4145                 *duplex = DUPLEX_FULL;
4146                 break;
4147
4148         case MII_TG3_AUX_STAT_100HALF:
4149                 *speed = SPEED_100;
4150                 *duplex = DUPLEX_HALF;
4151                 break;
4152
4153         case MII_TG3_AUX_STAT_100FULL:
4154                 *speed = SPEED_100;
4155                 *duplex = DUPLEX_FULL;
4156                 break;
4157
4158         case MII_TG3_AUX_STAT_1000HALF:
4159                 *speed = SPEED_1000;
4160                 *duplex = DUPLEX_HALF;
4161                 break;
4162
4163         case MII_TG3_AUX_STAT_1000FULL:
4164                 *speed = SPEED_1000;
4165                 *duplex = DUPLEX_FULL;
4166                 break;
4167
4168         default:
4169                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4170                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4171                                  SPEED_10;
4172                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4173                                   DUPLEX_HALF;
4174                         break;
4175                 }
4176                 *speed = SPEED_UNKNOWN;
4177                 *duplex = DUPLEX_UNKNOWN;
4178                 break;
4179         }
4180 }
4181
4182 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4183 {
4184         int err = 0;
4185         u32 val, new_adv;
4186
4187         new_adv = ADVERTISE_CSMA;
4188         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4189         new_adv |= mii_advertise_flowctrl(flowctrl);
4190
4191         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4192         if (err)
4193                 goto done;
4194
4195         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4196                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4197
4198                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4199                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4200                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4201
4202                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4203                 if (err)
4204                         goto done;
4205         }
4206
4207         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4208                 goto done;
4209
4210         tw32(TG3_CPMU_EEE_MODE,
4211              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4212
4213         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4214         if (!err) {
4215                 u32 err2;
4216
4217                 val = 0;
4218                 /* Advertise 100-BaseTX EEE ability */
4219                 if (advertise & ADVERTISED_100baseT_Full)
4220                         val |= MDIO_AN_EEE_ADV_100TX;
4221                 /* Advertise 1000-BaseT EEE ability */
4222                 if (advertise & ADVERTISED_1000baseT_Full)
4223                         val |= MDIO_AN_EEE_ADV_1000T;
4224                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4225                 if (err)
4226                         val = 0;
4227
4228                 switch (tg3_asic_rev(tp)) {
4229                 case ASIC_REV_5717:
4230                 case ASIC_REV_57765:
4231                 case ASIC_REV_57766:
4232                 case ASIC_REV_5719:
4233                         /* If we advertised any eee advertisements above... */
4234                         if (val)
4235                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4236                                       MII_TG3_DSP_TAP26_RMRXSTO |
4237                                       MII_TG3_DSP_TAP26_OPCSINPT;
4238                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4239                         /* Fall through */
4240                 case ASIC_REV_5720:
4241                 case ASIC_REV_5762:
4242                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4243                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4244                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4245                 }
4246
4247                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4248                 if (!err)
4249                         err = err2;
4250         }
4251
4252 done:
4253         return err;
4254 }
4255
4256 static void tg3_phy_copper_begin(struct tg3 *tp)
4257 {
4258         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4259             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4260                 u32 adv, fc;
4261
4262                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4263                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4264                         adv = ADVERTISED_10baseT_Half |
4265                               ADVERTISED_10baseT_Full;
4266                         if (tg3_flag(tp, WOL_SPEED_100MB))
4267                                 adv |= ADVERTISED_100baseT_Half |
4268                                        ADVERTISED_100baseT_Full;
4269                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4270                                 adv |= ADVERTISED_1000baseT_Half |
4271                                        ADVERTISED_1000baseT_Full;
4272
4273                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4274                 } else {
4275                         adv = tp->link_config.advertising;
4276                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4277                                 adv &= ~(ADVERTISED_1000baseT_Half |
4278                                          ADVERTISED_1000baseT_Full);
4279
4280                         fc = tp->link_config.flowctrl;
4281                 }
4282
4283                 tg3_phy_autoneg_cfg(tp, adv, fc);
4284
4285                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4286                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4287                         /* Normally during power down we want to autonegotiate
4288                          * the lowest possible speed for WOL. However, to avoid
4289                          * link flap, we leave it untouched.
4290                          */
4291                         return;
4292                 }
4293
4294                 tg3_writephy(tp, MII_BMCR,
4295                              BMCR_ANENABLE | BMCR_ANRESTART);
4296         } else {
4297                 int i;
4298                 u32 bmcr, orig_bmcr;
4299
4300                 tp->link_config.active_speed = tp->link_config.speed;
4301                 tp->link_config.active_duplex = tp->link_config.duplex;
4302
4303                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4304                         /* With autoneg disabled, 5715 only links up when the
4305                          * advertisement register has the configured speed
4306                          * enabled.
4307                          */
4308                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4309                 }
4310
4311                 bmcr = 0;
4312                 switch (tp->link_config.speed) {
4313                 default:
4314                 case SPEED_10:
4315                         break;
4316
4317                 case SPEED_100:
4318                         bmcr |= BMCR_SPEED100;
4319                         break;
4320
4321                 case SPEED_1000:
4322                         bmcr |= BMCR_SPEED1000;
4323                         break;
4324                 }
4325
4326                 if (tp->link_config.duplex == DUPLEX_FULL)
4327                         bmcr |= BMCR_FULLDPLX;
4328
4329                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4330                     (bmcr != orig_bmcr)) {
4331                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4332                         for (i = 0; i < 1500; i++) {
4333                                 u32 tmp;
4334
4335                                 udelay(10);
4336                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4337                                     tg3_readphy(tp, MII_BMSR, &tmp))
4338                                         continue;
4339                                 if (!(tmp & BMSR_LSTATUS)) {
4340                                         udelay(40);
4341                                         break;
4342                                 }
4343                         }
4344                         tg3_writephy(tp, MII_BMCR, bmcr);
4345                         udelay(40);
4346                 }
4347         }
4348 }
4349
4350 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4351 {
4352         int err;
4353
4354         /* Turn off tap power management. */
4355         /* Set Extended packet length bit */
4356         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4357
4358         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4359         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4360         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4361         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4362         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4363
4364         udelay(40);
4365
4366         return err;
4367 }
4368
4369 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4370 {
4371         u32 advmsk, tgtadv, advertising;
4372
4373         advertising = tp->link_config.advertising;
4374         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4375
4376         advmsk = ADVERTISE_ALL;
4377         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4378                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4379                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4380         }
4381
4382         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4383                 return false;
4384
4385         if ((*lcladv & advmsk) != tgtadv)
4386                 return false;
4387
4388         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4389                 u32 tg3_ctrl;
4390
4391                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4392
4393                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4394                         return false;
4395
4396                 if (tgtadv &&
4397                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4398                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4399                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4400                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4401                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4402                 } else {
4403                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4404                 }
4405
4406                 if (tg3_ctrl != tgtadv)
4407                         return false;
4408         }
4409
4410         return true;
4411 }
4412
4413 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4414 {
4415         u32 lpeth = 0;
4416
4417         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4418                 u32 val;
4419
4420                 if (tg3_readphy(tp, MII_STAT1000, &val))
4421                         return false;
4422
4423                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4424         }
4425
4426         if (tg3_readphy(tp, MII_LPA, rmtadv))
4427                 return false;
4428
4429         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4430         tp->link_config.rmt_adv = lpeth;
4431
4432         return true;
4433 }
4434
4435 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4436 {
4437         if (curr_link_up != tp->link_up) {
4438                 if (curr_link_up) {
4439                         netif_carrier_on(tp->dev);
4440                 } else {
4441                         netif_carrier_off(tp->dev);
4442                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4443                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4444                 }
4445
4446                 tg3_link_report(tp);
4447                 return true;
4448         }
4449
4450         return false;
4451 }
4452
4453 static void tg3_clear_mac_status(struct tg3 *tp)
4454 {
4455         tw32(MAC_EVENT, 0);
4456
4457         tw32_f(MAC_STATUS,
4458                MAC_STATUS_SYNC_CHANGED |
4459                MAC_STATUS_CFG_CHANGED |
4460                MAC_STATUS_MI_COMPLETION |
4461                MAC_STATUS_LNKSTATE_CHANGED);
4462         udelay(40);
4463 }
4464
4465 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4466 {
4467         int current_link_up;
4468         u32 bmsr, val;
4469         u32 lcl_adv, rmt_adv;
4470         u16 current_speed;
4471         u8 current_duplex;
4472         int i, err;
4473
4474         tg3_clear_mac_status(tp);
4475
4476         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4477                 tw32_f(MAC_MI_MODE,
4478                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4479                 udelay(80);
4480         }
4481
4482         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4483
4484         /* Some third-party PHYs need to be reset on link going
4485          * down.
4486          */
4487         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4488              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4489              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4490             tp->link_up) {
4491                 tg3_readphy(tp, MII_BMSR, &bmsr);
4492                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4493                     !(bmsr & BMSR_LSTATUS))
4494                         force_reset = 1;
4495         }
4496         if (force_reset)
4497                 tg3_phy_reset(tp);
4498
4499         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4500                 tg3_readphy(tp, MII_BMSR, &bmsr);
4501                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4502                     !tg3_flag(tp, INIT_COMPLETE))
4503                         bmsr = 0;
4504
4505                 if (!(bmsr & BMSR_LSTATUS)) {
4506                         err = tg3_init_5401phy_dsp(tp);
4507                         if (err)
4508                                 return err;
4509
4510                         tg3_readphy(tp, MII_BMSR, &bmsr);
4511                         for (i = 0; i < 1000; i++) {
4512                                 udelay(10);
4513                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4514                                     (bmsr & BMSR_LSTATUS)) {
4515                                         udelay(40);
4516                                         break;
4517                                 }
4518                         }
4519
4520                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4521                             TG3_PHY_REV_BCM5401_B0 &&
4522                             !(bmsr & BMSR_LSTATUS) &&
4523                             tp->link_config.active_speed == SPEED_1000) {
4524                                 err = tg3_phy_reset(tp);
4525                                 if (!err)
4526                                         err = tg3_init_5401phy_dsp(tp);
4527                                 if (err)
4528                                         return err;
4529                         }
4530                 }
4531         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4532                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4533                 /* 5701 {A0,B0} CRC bug workaround */
4534                 tg3_writephy(tp, 0x15, 0x0a75);
4535                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4536                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4537                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4538         }
4539
4540         /* Clear pending interrupts... */
4541         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4542         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4543
4544         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4545                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4546         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4547                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4548
4549         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4550             tg3_asic_rev(tp) == ASIC_REV_5701) {
4551                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4552                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4553                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4554                 else
4555                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4556         }
4557
4558         current_link_up = 0;
4559         current_speed = SPEED_UNKNOWN;
4560         current_duplex = DUPLEX_UNKNOWN;
4561         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4562         tp->link_config.rmt_adv = 0;
4563
4564         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4565                 err = tg3_phy_auxctl_read(tp,
4566                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4567                                           &val);
4568                 if (!err && !(val & (1 << 10))) {
4569                         tg3_phy_auxctl_write(tp,
4570                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4571                                              val | (1 << 10));
4572                         goto relink;
4573                 }
4574         }
4575
4576         bmsr = 0;
4577         for (i = 0; i < 100; i++) {
4578                 tg3_readphy(tp, MII_BMSR, &bmsr);
4579                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4580                     (bmsr & BMSR_LSTATUS))
4581                         break;
4582                 udelay(40);
4583         }
4584
4585         if (bmsr & BMSR_LSTATUS) {
4586                 u32 aux_stat, bmcr;
4587
4588                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4589                 for (i = 0; i < 2000; i++) {
4590                         udelay(10);
4591                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4592                             aux_stat)
4593                                 break;
4594                 }
4595
4596                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4597                                              &current_speed,
4598                                              &current_duplex);
4599
4600                 bmcr = 0;
4601                 for (i = 0; i < 200; i++) {
4602                         tg3_readphy(tp, MII_BMCR, &bmcr);
4603                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4604                                 continue;
4605                         if (bmcr && bmcr != 0x7fff)
4606                                 break;
4607                         udelay(10);
4608                 }
4609
4610                 lcl_adv = 0;
4611                 rmt_adv = 0;
4612
4613                 tp->link_config.active_speed = current_speed;
4614                 tp->link_config.active_duplex = current_duplex;
4615
4616                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4617                         if ((bmcr & BMCR_ANENABLE) &&
4618                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4619                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4620                                 current_link_up = 1;
4621                 } else {
4622                         if (!(bmcr & BMCR_ANENABLE) &&
4623                             tp->link_config.speed == current_speed &&
4624                             tp->link_config.duplex == current_duplex) {
4625                                 current_link_up = 1;
4626                         }
4627                 }
4628
4629                 if (current_link_up == 1 &&
4630                     tp->link_config.active_duplex == DUPLEX_FULL) {
4631                         u32 reg, bit;
4632
4633                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4634                                 reg = MII_TG3_FET_GEN_STAT;
4635                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4636                         } else {
4637                                 reg = MII_TG3_EXT_STAT;
4638                                 bit = MII_TG3_EXT_STAT_MDIX;
4639                         }
4640
4641                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4642                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4643
4644                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4645                 }
4646         }
4647
4648 relink:
4649         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4650                 tg3_phy_copper_begin(tp);
4651
4652                 if (tg3_flag(tp, ROBOSWITCH)) {
4653                         current_link_up = 1;
4654                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4655                         current_speed = SPEED_1000;
4656                         current_duplex = DUPLEX_FULL;
4657                         tp->link_config.active_speed = current_speed;
4658                         tp->link_config.active_duplex = current_duplex;
4659                 }
4660
4661                 tg3_readphy(tp, MII_BMSR, &bmsr);
4662                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4663                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4664                         current_link_up = 1;
4665         }
4666
4667         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4668         if (current_link_up == 1) {
4669                 if (tp->link_config.active_speed == SPEED_100 ||
4670                     tp->link_config.active_speed == SPEED_10)
4671                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4672                 else
4673                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4674         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4675                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4676         else
4677                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4678
4679         /* In order for the 5750 core in BCM4785 chip to work properly
4680          * in RGMII mode, the Led Control Register must be set up.
4681          */
4682         if (tg3_flag(tp, RGMII_MODE)) {
4683                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4684                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4685
4686                 if (tp->link_config.active_speed == SPEED_10)
4687                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4688                 else if (tp->link_config.active_speed == SPEED_100)
4689                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4690                                      LED_CTRL_100MBPS_ON);
4691                 else if (tp->link_config.active_speed == SPEED_1000)
4692                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4693                                      LED_CTRL_1000MBPS_ON);
4694
4695                 tw32(MAC_LED_CTRL, led_ctrl);
4696                 udelay(40);
4697         }
4698
4699         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4700         if (tp->link_config.active_duplex == DUPLEX_HALF)
4701                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4702
4703         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4704                 if (current_link_up == 1 &&
4705                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4706                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4707                 else
4708                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4709         }
4710
4711         /* ??? Without this setting Netgear GA302T PHY does not
4712          * ??? send/receive packets...
4713          */
4714         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4715             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4716                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4717                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4718                 udelay(80);
4719         }
4720
4721         tw32_f(MAC_MODE, tp->mac_mode);
4722         udelay(40);
4723
4724         tg3_phy_eee_adjust(tp, current_link_up);
4725
4726         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4727                 /* Polled via timer. */
4728                 tw32_f(MAC_EVENT, 0);
4729         } else {
4730                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4731         }
4732         udelay(40);
4733
4734         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4735             current_link_up == 1 &&
4736             tp->link_config.active_speed == SPEED_1000 &&
4737             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4738                 udelay(120);
4739                 tw32_f(MAC_STATUS,
4740                      (MAC_STATUS_SYNC_CHANGED |
4741                       MAC_STATUS_CFG_CHANGED));
4742                 udelay(40);
4743                 tg3_write_mem(tp,
4744                               NIC_SRAM_FIRMWARE_MBOX,
4745                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4746         }
4747
4748         /* Prevent send BD corruption. */
4749         if (tg3_flag(tp, CLKREQ_BUG)) {
4750                 if (tp->link_config.active_speed == SPEED_100 ||
4751                     tp->link_config.active_speed == SPEED_10)
4752                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4753                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4754                 else
4755                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4756                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4757         }
4758
4759         tg3_test_and_report_link_chg(tp, current_link_up);
4760
4761         return 0;
4762 }
4763
4764 struct tg3_fiber_aneginfo {
4765         int state;
4766 #define ANEG_STATE_UNKNOWN              0
4767 #define ANEG_STATE_AN_ENABLE            1
4768 #define ANEG_STATE_RESTART_INIT         2
4769 #define ANEG_STATE_RESTART              3
4770 #define ANEG_STATE_DISABLE_LINK_OK      4
4771 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4772 #define ANEG_STATE_ABILITY_DETECT       6
4773 #define ANEG_STATE_ACK_DETECT_INIT      7
4774 #define ANEG_STATE_ACK_DETECT           8
4775 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4776 #define ANEG_STATE_COMPLETE_ACK         10
4777 #define ANEG_STATE_IDLE_DETECT_INIT     11
4778 #define ANEG_STATE_IDLE_DETECT          12
4779 #define ANEG_STATE_LINK_OK              13
4780 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4781 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4782
4783         u32 flags;
4784 #define MR_AN_ENABLE            0x00000001
4785 #define MR_RESTART_AN           0x00000002
4786 #define MR_AN_COMPLETE          0x00000004
4787 #define MR_PAGE_RX              0x00000008
4788 #define MR_NP_LOADED            0x00000010
4789 #define MR_TOGGLE_TX            0x00000020
4790 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4791 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4792 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4793 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4794 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4795 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4796 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4797 #define MR_TOGGLE_RX            0x00002000
4798 #define MR_NP_RX                0x00004000
4799
4800 #define MR_LINK_OK              0x80000000
4801
4802         unsigned long link_time, cur_time;
4803
4804         u32 ability_match_cfg;
4805         int ability_match_count;
4806
4807         char ability_match, idle_match, ack_match;
4808
4809         u32 txconfig, rxconfig;
4810 #define ANEG_CFG_NP             0x00000080
4811 #define ANEG_CFG_ACK            0x00000040
4812 #define ANEG_CFG_RF2            0x00000020
4813 #define ANEG_CFG_RF1            0x00000010
4814 #define ANEG_CFG_PS2            0x00000001
4815 #define ANEG_CFG_PS1            0x00008000
4816 #define ANEG_CFG_HD             0x00004000
4817 #define ANEG_CFG_FD             0x00002000
4818 #define ANEG_CFG_INVAL          0x00001f06
4819
4820 };
4821 #define ANEG_OK         0
4822 #define ANEG_DONE       1
4823 #define ANEG_TIMER_ENAB 2
4824 #define ANEG_FAILED     -1
4825
4826 #define ANEG_STATE_SETTLE_TIME  10000
4827
4828 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4829                                    struct tg3_fiber_aneginfo *ap)
4830 {
4831         u16 flowctrl;
4832         unsigned long delta;
4833         u32 rx_cfg_reg;
4834         int ret;
4835
4836         if (ap->state == ANEG_STATE_UNKNOWN) {
4837                 ap->rxconfig = 0;
4838                 ap->link_time = 0;
4839                 ap->cur_time = 0;
4840                 ap->ability_match_cfg = 0;
4841                 ap->ability_match_count = 0;
4842                 ap->ability_match = 0;
4843                 ap->idle_match = 0;
4844                 ap->ack_match = 0;
4845         }
4846         ap->cur_time++;
4847
4848         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4849                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4850
4851                 if (rx_cfg_reg != ap->ability_match_cfg) {
4852                         ap->ability_match_cfg = rx_cfg_reg;
4853                         ap->ability_match = 0;
4854                         ap->ability_match_count = 0;
4855                 } else {
4856                         if (++ap->ability_match_count > 1) {
4857                                 ap->ability_match = 1;
4858                                 ap->ability_match_cfg = rx_cfg_reg;
4859                         }
4860                 }
4861                 if (rx_cfg_reg & ANEG_CFG_ACK)
4862                         ap->ack_match = 1;
4863                 else
4864                         ap->ack_match = 0;
4865
4866                 ap->idle_match = 0;
4867         } else {
4868                 ap->idle_match = 1;
4869                 ap->ability_match_cfg = 0;
4870                 ap->ability_match_count = 0;
4871                 ap->ability_match = 0;
4872                 ap->ack_match = 0;
4873
4874                 rx_cfg_reg = 0;
4875         }
4876
4877         ap->rxconfig = rx_cfg_reg;
4878         ret = ANEG_OK;
4879
4880         switch (ap->state) {
4881         case ANEG_STATE_UNKNOWN:
4882                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4883                         ap->state = ANEG_STATE_AN_ENABLE;
4884
4885                 /* fallthru */
4886         case ANEG_STATE_AN_ENABLE:
4887                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4888                 if (ap->flags & MR_AN_ENABLE) {
4889                         ap->link_time = 0;
4890                         ap->cur_time = 0;
4891                         ap->ability_match_cfg = 0;
4892                         ap->ability_match_count = 0;
4893                         ap->ability_match = 0;
4894                         ap->idle_match = 0;
4895                         ap->ack_match = 0;
4896
4897                         ap->state = ANEG_STATE_RESTART_INIT;
4898                 } else {
4899                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4900                 }
4901                 break;
4902
4903         case ANEG_STATE_RESTART_INIT:
4904                 ap->link_time = ap->cur_time;
4905                 ap->flags &= ~(MR_NP_LOADED);
4906                 ap->txconfig = 0;
4907                 tw32(MAC_TX_AUTO_NEG, 0);
4908                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4909                 tw32_f(MAC_MODE, tp->mac_mode);
4910                 udelay(40);
4911
4912                 ret = ANEG_TIMER_ENAB;
4913                 ap->state = ANEG_STATE_RESTART;
4914
4915                 /* fallthru */
4916         case ANEG_STATE_RESTART:
4917                 delta = ap->cur_time - ap->link_time;
4918                 if (delta > ANEG_STATE_SETTLE_TIME)
4919                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4920                 else
4921                         ret = ANEG_TIMER_ENAB;
4922                 break;
4923
4924         case ANEG_STATE_DISABLE_LINK_OK:
4925                 ret = ANEG_DONE;
4926                 break;
4927
4928         case ANEG_STATE_ABILITY_DETECT_INIT:
4929                 ap->flags &= ~(MR_TOGGLE_TX);
4930                 ap->txconfig = ANEG_CFG_FD;
4931                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4932                 if (flowctrl & ADVERTISE_1000XPAUSE)
4933                         ap->txconfig |= ANEG_CFG_PS1;
4934                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4935                         ap->txconfig |= ANEG_CFG_PS2;
4936                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4937                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4938                 tw32_f(MAC_MODE, tp->mac_mode);
4939                 udelay(40);
4940
4941                 ap->state = ANEG_STATE_ABILITY_DETECT;
4942                 break;
4943
4944         case ANEG_STATE_ABILITY_DETECT:
4945                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4946                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4947                 break;
4948
4949         case ANEG_STATE_ACK_DETECT_INIT:
4950                 ap->txconfig |= ANEG_CFG_ACK;
4951                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4952                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4953                 tw32_f(MAC_MODE, tp->mac_mode);
4954                 udelay(40);
4955
4956                 ap->state = ANEG_STATE_ACK_DETECT;
4957
4958                 /* fallthru */
4959         case ANEG_STATE_ACK_DETECT:
4960                 if (ap->ack_match != 0) {
4961                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4962                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4963                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4964                         } else {
4965                                 ap->state = ANEG_STATE_AN_ENABLE;
4966                         }
4967                 } else if (ap->ability_match != 0 &&
4968                            ap->rxconfig == 0) {
4969                         ap->state = ANEG_STATE_AN_ENABLE;
4970                 }
4971                 break;
4972
4973         case ANEG_STATE_COMPLETE_ACK_INIT:
4974                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4975                         ret = ANEG_FAILED;
4976                         break;
4977                 }
4978                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4979                                MR_LP_ADV_HALF_DUPLEX |
4980                                MR_LP_ADV_SYM_PAUSE |
4981                                MR_LP_ADV_ASYM_PAUSE |
4982                                MR_LP_ADV_REMOTE_FAULT1 |
4983                                MR_LP_ADV_REMOTE_FAULT2 |
4984                                MR_LP_ADV_NEXT_PAGE |
4985                                MR_TOGGLE_RX |
4986                                MR_NP_RX);
4987                 if (ap->rxconfig & ANEG_CFG_FD)
4988                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4989                 if (ap->rxconfig & ANEG_CFG_HD)
4990                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4991                 if (ap->rxconfig & ANEG_CFG_PS1)
4992                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4993                 if (ap->rxconfig & ANEG_CFG_PS2)
4994                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4995                 if (ap->rxconfig & ANEG_CFG_RF1)
4996                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4997                 if (ap->rxconfig & ANEG_CFG_RF2)
4998                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4999                 if (ap->rxconfig & ANEG_CFG_NP)
5000                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5001
5002                 ap->link_time = ap->cur_time;
5003
5004                 ap->flags ^= (MR_TOGGLE_TX);
5005                 if (ap->rxconfig & 0x0008)
5006                         ap->flags |= MR_TOGGLE_RX;
5007                 if (ap->rxconfig & ANEG_CFG_NP)
5008                         ap->flags |= MR_NP_RX;
5009                 ap->flags |= MR_PAGE_RX;
5010
5011                 ap->state = ANEG_STATE_COMPLETE_ACK;
5012                 ret = ANEG_TIMER_ENAB;
5013                 break;
5014
5015         case ANEG_STATE_COMPLETE_ACK:
5016                 if (ap->ability_match != 0 &&
5017                     ap->rxconfig == 0) {
5018                         ap->state = ANEG_STATE_AN_ENABLE;
5019                         break;
5020                 }
5021                 delta = ap->cur_time - ap->link_time;
5022                 if (delta > ANEG_STATE_SETTLE_TIME) {
5023                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5024                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5025                         } else {
5026                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5027                                     !(ap->flags & MR_NP_RX)) {
5028                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5029                                 } else {
5030                                         ret = ANEG_FAILED;
5031                                 }
5032                         }
5033                 }
5034                 break;
5035
5036         case ANEG_STATE_IDLE_DETECT_INIT:
5037                 ap->link_time = ap->cur_time;
5038                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5039                 tw32_f(MAC_MODE, tp->mac_mode);
5040                 udelay(40);
5041
5042                 ap->state = ANEG_STATE_IDLE_DETECT;
5043                 ret = ANEG_TIMER_ENAB;
5044                 break;
5045
5046         case ANEG_STATE_IDLE_DETECT:
5047                 if (ap->ability_match != 0 &&
5048                     ap->rxconfig == 0) {
5049                         ap->state = ANEG_STATE_AN_ENABLE;
5050                         break;
5051                 }
5052                 delta = ap->cur_time - ap->link_time;
5053                 if (delta > ANEG_STATE_SETTLE_TIME) {
5054                         /* XXX another gem from the Broadcom driver :( */
5055                         ap->state = ANEG_STATE_LINK_OK;
5056                 }
5057                 break;
5058
5059         case ANEG_STATE_LINK_OK:
5060                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5061                 ret = ANEG_DONE;
5062                 break;
5063
5064         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5065                 /* ??? unimplemented */
5066                 break;
5067
5068         case ANEG_STATE_NEXT_PAGE_WAIT:
5069                 /* ??? unimplemented */
5070                 break;
5071
5072         default:
5073                 ret = ANEG_FAILED;
5074                 break;
5075         }
5076
5077         return ret;
5078 }
5079
5080 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5081 {
5082         int res = 0;
5083         struct tg3_fiber_aneginfo aninfo;
5084         int status = ANEG_FAILED;
5085         unsigned int tick;
5086         u32 tmp;
5087
5088         tw32_f(MAC_TX_AUTO_NEG, 0);
5089
5090         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5091         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5092         udelay(40);
5093
5094         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5095         udelay(40);
5096
5097         memset(&aninfo, 0, sizeof(aninfo));
5098         aninfo.flags |= MR_AN_ENABLE;
5099         aninfo.state = ANEG_STATE_UNKNOWN;
5100         aninfo.cur_time = 0;
5101         tick = 0;
5102         while (++tick < 195000) {
5103                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5104                 if (status == ANEG_DONE || status == ANEG_FAILED)
5105                         break;
5106
5107                 udelay(1);
5108         }
5109
5110         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5111         tw32_f(MAC_MODE, tp->mac_mode);
5112         udelay(40);
5113
5114         *txflags = aninfo.txconfig;
5115         *rxflags = aninfo.flags;
5116
5117         if (status == ANEG_DONE &&
5118             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5119                              MR_LP_ADV_FULL_DUPLEX)))
5120                 res = 1;
5121
5122         return res;
5123 }
5124
5125 static void tg3_init_bcm8002(struct tg3 *tp)
5126 {
5127         u32 mac_status = tr32(MAC_STATUS);
5128         int i;
5129
5130         /* Reset when initting first time or we have a link. */
5131         if (tg3_flag(tp, INIT_COMPLETE) &&
5132             !(mac_status & MAC_STATUS_PCS_SYNCED))
5133                 return;
5134
5135         /* Set PLL lock range. */
5136         tg3_writephy(tp, 0x16, 0x8007);
5137
5138         /* SW reset */
5139         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5140
5141         /* Wait for reset to complete. */
5142         /* XXX schedule_timeout() ... */
5143         for (i = 0; i < 500; i++)
5144                 udelay(10);
5145
5146         /* Config mode; select PMA/Ch 1 regs. */
5147         tg3_writephy(tp, 0x10, 0x8411);
5148
5149         /* Enable auto-lock and comdet, select txclk for tx. */
5150         tg3_writephy(tp, 0x11, 0x0a10);
5151
5152         tg3_writephy(tp, 0x18, 0x00a0);
5153         tg3_writephy(tp, 0x16, 0x41ff);
5154
5155         /* Assert and deassert POR. */
5156         tg3_writephy(tp, 0x13, 0x0400);
5157         udelay(40);
5158         tg3_writephy(tp, 0x13, 0x0000);
5159
5160         tg3_writephy(tp, 0x11, 0x0a50);
5161         udelay(40);
5162         tg3_writephy(tp, 0x11, 0x0a10);
5163
5164         /* Wait for signal to stabilize */
5165         /* XXX schedule_timeout() ... */
5166         for (i = 0; i < 15000; i++)
5167                 udelay(10);
5168
5169         /* Deselect the channel register so we can read the PHYID
5170          * later.
5171          */
5172         tg3_writephy(tp, 0x10, 0x8011);
5173 }
5174
5175 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5176 {
5177         u16 flowctrl;
5178         u32 sg_dig_ctrl, sg_dig_status;
5179         u32 serdes_cfg, expected_sg_dig_ctrl;
5180         int workaround, port_a;
5181         int current_link_up;
5182
5183         serdes_cfg = 0;
5184         expected_sg_dig_ctrl = 0;
5185         workaround = 0;
5186         port_a = 1;
5187         current_link_up = 0;
5188
5189         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5190             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5191                 workaround = 1;
5192                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5193                         port_a = 0;
5194
5195                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5196                 /* preserve bits 20-23 for voltage regulator */
5197                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5198         }
5199
5200         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5201
5202         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5203                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5204                         if (workaround) {
5205                                 u32 val = serdes_cfg;
5206
5207                                 if (port_a)
5208                                         val |= 0xc010000;
5209                                 else
5210                                         val |= 0x4010000;
5211                                 tw32_f(MAC_SERDES_CFG, val);
5212                         }
5213
5214                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5215                 }
5216                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5217                         tg3_setup_flow_control(tp, 0, 0);
5218                         current_link_up = 1;
5219                 }
5220                 goto out;
5221         }
5222
5223         /* Want auto-negotiation.  */
5224         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5225
5226         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5227         if (flowctrl & ADVERTISE_1000XPAUSE)
5228                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5229         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5230                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5231
5232         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5233                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5234                     tp->serdes_counter &&
5235                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5236                                     MAC_STATUS_RCVD_CFG)) ==
5237                      MAC_STATUS_PCS_SYNCED)) {
5238                         tp->serdes_counter--;
5239                         current_link_up = 1;
5240                         goto out;
5241                 }
5242 restart_autoneg:
5243                 if (workaround)
5244                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5245                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5246                 udelay(5);
5247                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5248
5249                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5250                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5251         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5252                                  MAC_STATUS_SIGNAL_DET)) {
5253                 sg_dig_status = tr32(SG_DIG_STATUS);
5254                 mac_status = tr32(MAC_STATUS);
5255
5256                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5257                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5258                         u32 local_adv = 0, remote_adv = 0;
5259
5260                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5261                                 local_adv |= ADVERTISE_1000XPAUSE;
5262                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5263                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5264
5265                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5266                                 remote_adv |= LPA_1000XPAUSE;
5267                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5268                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5269
5270                         tp->link_config.rmt_adv =
5271                                            mii_adv_to_ethtool_adv_x(remote_adv);
5272
5273                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5274                         current_link_up = 1;
5275                         tp->serdes_counter = 0;
5276                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5277                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5278                         if (tp->serdes_counter)
5279                                 tp->serdes_counter--;
5280                         else {
5281                                 if (workaround) {
5282                                         u32 val = serdes_cfg;
5283
5284                                         if (port_a)
5285                                                 val |= 0xc010000;
5286                                         else
5287                                                 val |= 0x4010000;
5288
5289                                         tw32_f(MAC_SERDES_CFG, val);
5290                                 }
5291
5292                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5293                                 udelay(40);
5294
5295                                 /* Link parallel detection - link is up */
5296                                 /* only if we have PCS_SYNC and not */
5297                                 /* receiving config code words */
5298                                 mac_status = tr32(MAC_STATUS);
5299                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5300                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5301                                         tg3_setup_flow_control(tp, 0, 0);
5302                                         current_link_up = 1;
5303                                         tp->phy_flags |=
5304                                                 TG3_PHYFLG_PARALLEL_DETECT;
5305                                         tp->serdes_counter =
5306                                                 SERDES_PARALLEL_DET_TIMEOUT;
5307                                 } else
5308                                         goto restart_autoneg;
5309                         }
5310                 }
5311         } else {
5312                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5313                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5314         }
5315
5316 out:
5317         return current_link_up;
5318 }
5319
5320 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5321 {
5322         int current_link_up = 0;
5323
5324         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5325                 goto out;
5326
5327         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5328                 u32 txflags, rxflags;
5329                 int i;
5330
5331                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5332                         u32 local_adv = 0, remote_adv = 0;
5333
5334                         if (txflags & ANEG_CFG_PS1)
5335                                 local_adv |= ADVERTISE_1000XPAUSE;
5336                         if (txflags & ANEG_CFG_PS2)
5337                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5338
5339                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5340                                 remote_adv |= LPA_1000XPAUSE;
5341                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5342                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5343
5344                         tp->link_config.rmt_adv =
5345                                            mii_adv_to_ethtool_adv_x(remote_adv);
5346
5347                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5348
5349                         current_link_up = 1;
5350                 }
5351                 for (i = 0; i < 30; i++) {
5352                         udelay(20);
5353                         tw32_f(MAC_STATUS,
5354                                (MAC_STATUS_SYNC_CHANGED |
5355                                 MAC_STATUS_CFG_CHANGED));
5356                         udelay(40);
5357                         if ((tr32(MAC_STATUS) &
5358                              (MAC_STATUS_SYNC_CHANGED |
5359                               MAC_STATUS_CFG_CHANGED)) == 0)
5360                                 break;
5361                 }
5362
5363                 mac_status = tr32(MAC_STATUS);
5364                 if (current_link_up == 0 &&
5365                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5366                     !(mac_status & MAC_STATUS_RCVD_CFG))
5367                         current_link_up = 1;
5368         } else {
5369                 tg3_setup_flow_control(tp, 0, 0);
5370
5371                 /* Forcing 1000FD link up. */
5372                 current_link_up = 1;
5373
5374                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5375                 udelay(40);
5376
5377                 tw32_f(MAC_MODE, tp->mac_mode);
5378                 udelay(40);
5379         }
5380
5381 out:
5382         return current_link_up;
5383 }
5384
5385 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5386 {
5387         u32 orig_pause_cfg;
5388         u16 orig_active_speed;
5389         u8 orig_active_duplex;
5390         u32 mac_status;
5391         int current_link_up;
5392         int i;
5393
5394         orig_pause_cfg = tp->link_config.active_flowctrl;
5395         orig_active_speed = tp->link_config.active_speed;
5396         orig_active_duplex = tp->link_config.active_duplex;
5397
5398         if (!tg3_flag(tp, HW_AUTONEG) &&
5399             tp->link_up &&
5400             tg3_flag(tp, INIT_COMPLETE)) {
5401                 mac_status = tr32(MAC_STATUS);
5402                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5403                                MAC_STATUS_SIGNAL_DET |
5404                                MAC_STATUS_CFG_CHANGED |
5405                                MAC_STATUS_RCVD_CFG);
5406                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5407                                    MAC_STATUS_SIGNAL_DET)) {
5408                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5409                                             MAC_STATUS_CFG_CHANGED));
5410                         return 0;
5411                 }
5412         }
5413
5414         tw32_f(MAC_TX_AUTO_NEG, 0);
5415
5416         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5417         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5418         tw32_f(MAC_MODE, tp->mac_mode);
5419         udelay(40);
5420
5421         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5422                 tg3_init_bcm8002(tp);
5423
5424         /* Enable link change event even when serdes polling.  */
5425         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5426         udelay(40);
5427
5428         current_link_up = 0;
5429         tp->link_config.rmt_adv = 0;
5430         mac_status = tr32(MAC_STATUS);
5431
5432         if (tg3_flag(tp, HW_AUTONEG))
5433                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5434         else
5435                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5436
5437         tp->napi[0].hw_status->status =
5438                 (SD_STATUS_UPDATED |
5439                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5440
5441         for (i = 0; i < 100; i++) {
5442                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5443                                     MAC_STATUS_CFG_CHANGED));
5444                 udelay(5);
5445                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5446                                          MAC_STATUS_CFG_CHANGED |
5447                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5448                         break;
5449         }
5450
5451         mac_status = tr32(MAC_STATUS);
5452         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5453                 current_link_up = 0;
5454                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5455                     tp->serdes_counter == 0) {
5456                         tw32_f(MAC_MODE, (tp->mac_mode |
5457                                           MAC_MODE_SEND_CONFIGS));
5458                         udelay(1);
5459                         tw32_f(MAC_MODE, tp->mac_mode);
5460                 }
5461         }
5462
5463         if (current_link_up == 1) {
5464                 tp->link_config.active_speed = SPEED_1000;
5465                 tp->link_config.active_duplex = DUPLEX_FULL;
5466                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5467                                     LED_CTRL_LNKLED_OVERRIDE |
5468                                     LED_CTRL_1000MBPS_ON));
5469         } else {
5470                 tp->link_config.active_speed = SPEED_UNKNOWN;
5471                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5472                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5473                                     LED_CTRL_LNKLED_OVERRIDE |
5474                                     LED_CTRL_TRAFFIC_OVERRIDE));
5475         }
5476
5477         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5478                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5479                 if (orig_pause_cfg != now_pause_cfg ||
5480                     orig_active_speed != tp->link_config.active_speed ||
5481                     orig_active_duplex != tp->link_config.active_duplex)
5482                         tg3_link_report(tp);
5483         }
5484
5485         return 0;
5486 }
5487
5488 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5489 {
5490         int current_link_up = 0, err = 0;
5491         u32 bmsr, bmcr;
5492         u16 current_speed = SPEED_UNKNOWN;
5493         u8 current_duplex = DUPLEX_UNKNOWN;
5494         u32 local_adv, remote_adv, sgsr;
5495
5496         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5497              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5498              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5499              (sgsr & SERDES_TG3_SGMII_MODE)) {
5500
5501                 if (force_reset)
5502                         tg3_phy_reset(tp);
5503
5504                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5505
5506                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5507                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5508                 } else {
5509                         current_link_up = 1;
5510                         if (sgsr & SERDES_TG3_SPEED_1000) {
5511                                 current_speed = SPEED_1000;
5512                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5513                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5514                                 current_speed = SPEED_100;
5515                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5516                         } else {
5517                                 current_speed = SPEED_10;
5518                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5519                         }
5520
5521                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5522                                 current_duplex = DUPLEX_FULL;
5523                         else
5524                                 current_duplex = DUPLEX_HALF;
5525                 }
5526
5527                 tw32_f(MAC_MODE, tp->mac_mode);
5528                 udelay(40);
5529
5530                 tg3_clear_mac_status(tp);
5531
5532                 goto fiber_setup_done;
5533         }
5534
5535         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5536         tw32_f(MAC_MODE, tp->mac_mode);
5537         udelay(40);
5538
5539         tg3_clear_mac_status(tp);
5540
5541         if (force_reset)
5542                 tg3_phy_reset(tp);
5543
5544         tp->link_config.rmt_adv = 0;
5545
5546         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5547         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5548         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5549                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5550                         bmsr |= BMSR_LSTATUS;
5551                 else
5552                         bmsr &= ~BMSR_LSTATUS;
5553         }
5554
5555         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5556
5557         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5558             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5559                 /* do nothing, just check for link up at the end */
5560         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5561                 u32 adv, newadv;
5562
5563                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5564                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5565                                  ADVERTISE_1000XPAUSE |
5566                                  ADVERTISE_1000XPSE_ASYM |
5567                                  ADVERTISE_SLCT);
5568
5569                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5570                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5571
5572                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5573                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5574                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5575                         tg3_writephy(tp, MII_BMCR, bmcr);
5576
5577                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5578                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5579                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5580
5581                         return err;
5582                 }
5583         } else {
5584                 u32 new_bmcr;
5585
5586                 bmcr &= ~BMCR_SPEED1000;
5587                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5588
5589                 if (tp->link_config.duplex == DUPLEX_FULL)
5590                         new_bmcr |= BMCR_FULLDPLX;
5591
5592                 if (new_bmcr != bmcr) {
5593                         /* BMCR_SPEED1000 is a reserved bit that needs
5594                          * to be set on write.
5595                          */
5596                         new_bmcr |= BMCR_SPEED1000;
5597
5598                         /* Force a linkdown */
5599                         if (tp->link_up) {
5600                                 u32 adv;
5601
5602                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5603                                 adv &= ~(ADVERTISE_1000XFULL |
5604                                          ADVERTISE_1000XHALF |
5605                                          ADVERTISE_SLCT);
5606                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5607                                 tg3_writephy(tp, MII_BMCR, bmcr |
5608                                                            BMCR_ANRESTART |
5609                                                            BMCR_ANENABLE);
5610                                 udelay(10);
5611                                 tg3_carrier_off(tp);
5612                         }
5613                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5614                         bmcr = new_bmcr;
5615                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5616                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5617                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5618                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5619                                         bmsr |= BMSR_LSTATUS;
5620                                 else
5621                                         bmsr &= ~BMSR_LSTATUS;
5622                         }
5623                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5624                 }
5625         }
5626
5627         if (bmsr & BMSR_LSTATUS) {
5628                 current_speed = SPEED_1000;
5629                 current_link_up = 1;
5630                 if (bmcr & BMCR_FULLDPLX)
5631                         current_duplex = DUPLEX_FULL;
5632                 else
5633                         current_duplex = DUPLEX_HALF;
5634
5635                 local_adv = 0;
5636                 remote_adv = 0;
5637
5638                 if (bmcr & BMCR_ANENABLE) {
5639                         u32 common;
5640
5641                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5642                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5643                         common = local_adv & remote_adv;
5644                         if (common & (ADVERTISE_1000XHALF |
5645                                       ADVERTISE_1000XFULL)) {
5646                                 if (common & ADVERTISE_1000XFULL)
5647                                         current_duplex = DUPLEX_FULL;
5648                                 else
5649                                         current_duplex = DUPLEX_HALF;
5650
5651                                 tp->link_config.rmt_adv =
5652                                            mii_adv_to_ethtool_adv_x(remote_adv);
5653                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5654                                 /* Link is up via parallel detect */
5655                         } else {
5656                                 current_link_up = 0;
5657                         }
5658                 }
5659         }
5660
5661 fiber_setup_done:
5662         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5663                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5664
5665         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5666         if (tp->link_config.active_duplex == DUPLEX_HALF)
5667                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5668
5669         tw32_f(MAC_MODE, tp->mac_mode);
5670         udelay(40);
5671
5672         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5673
5674         tp->link_config.active_speed = current_speed;
5675         tp->link_config.active_duplex = current_duplex;
5676
5677         tg3_test_and_report_link_chg(tp, current_link_up);
5678         return err;
5679 }
5680
5681 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5682 {
5683         if (tp->serdes_counter) {
5684                 /* Give autoneg time to complete. */
5685                 tp->serdes_counter--;
5686                 return;
5687         }
5688
5689         if (!tp->link_up &&
5690             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5691                 u32 bmcr;
5692
5693                 tg3_readphy(tp, MII_BMCR, &bmcr);
5694                 if (bmcr & BMCR_ANENABLE) {
5695                         u32 phy1, phy2;
5696
5697                         /* Select shadow register 0x1f */
5698                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5699                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5700
5701                         /* Select expansion interrupt status register */
5702                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5703                                          MII_TG3_DSP_EXP1_INT_STAT);
5704                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5705                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5706
5707                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5708                                 /* We have signal detect and not receiving
5709                                  * config code words, link is up by parallel
5710                                  * detection.
5711                                  */
5712
5713                                 bmcr &= ~BMCR_ANENABLE;
5714                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5715                                 tg3_writephy(tp, MII_BMCR, bmcr);
5716                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5717                         }
5718                 }
5719         } else if (tp->link_up &&
5720                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5721                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5722                 u32 phy2;
5723
5724                 /* Select expansion interrupt status register */
5725                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5726                                  MII_TG3_DSP_EXP1_INT_STAT);
5727                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5728                 if (phy2 & 0x20) {
5729                         u32 bmcr;
5730
5731                         /* Config code words received, turn on autoneg. */
5732                         tg3_readphy(tp, MII_BMCR, &bmcr);
5733                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5734
5735                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5736
5737                 }
5738         }
5739 }
5740
5741 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5742 {
5743         u32 val;
5744         int err;
5745
5746         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5747                 err = tg3_setup_fiber_phy(tp, force_reset);
5748         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5749                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5750         else
5751                 err = tg3_setup_copper_phy(tp, force_reset);
5752
5753         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5754                 u32 scale;
5755
5756                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5757                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5758                         scale = 65;
5759                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5760                         scale = 6;
5761                 else
5762                         scale = 12;
5763
5764                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5765                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5766                 tw32(GRC_MISC_CFG, val);
5767         }
5768
5769         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5770               (6 << TX_LENGTHS_IPG_SHIFT);
5771         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5772             tg3_asic_rev(tp) == ASIC_REV_5762)
5773                 val |= tr32(MAC_TX_LENGTHS) &
5774                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5775                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5776
5777         if (tp->link_config.active_speed == SPEED_1000 &&
5778             tp->link_config.active_duplex == DUPLEX_HALF)
5779                 tw32(MAC_TX_LENGTHS, val |
5780                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5781         else
5782                 tw32(MAC_TX_LENGTHS, val |
5783                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5784
5785         if (!tg3_flag(tp, 5705_PLUS)) {
5786                 if (tp->link_up) {
5787                         tw32(HOSTCC_STAT_COAL_TICKS,
5788                              tp->coal.stats_block_coalesce_usecs);
5789                 } else {
5790                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5791                 }
5792         }
5793
5794         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5795                 val = tr32(PCIE_PWR_MGMT_THRESH);
5796                 if (!tp->link_up)
5797                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5798                               tp->pwrmgmt_thresh;
5799                 else
5800                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5801                 tw32(PCIE_PWR_MGMT_THRESH, val);
5802         }
5803
5804         return err;
5805 }
5806
5807 /* tp->lock must be held */
5808 static u64 tg3_refclk_read(struct tg3 *tp)
5809 {
5810         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5811         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5812 }
5813
5814 /* tp->lock must be held */
5815 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5816 {
5817         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5818         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5819         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5820         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5821 }
5822
5823 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5824 static inline void tg3_full_unlock(struct tg3 *tp);
5825 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5826 {
5827         struct tg3 *tp = netdev_priv(dev);
5828
5829         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5830                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5831                                 SOF_TIMESTAMPING_SOFTWARE    |
5832                                 SOF_TIMESTAMPING_TX_HARDWARE |
5833                                 SOF_TIMESTAMPING_RX_HARDWARE |
5834                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5835
5836         if (tp->ptp_clock)
5837                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5838         else
5839                 info->phc_index = -1;
5840
5841         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5842
5843         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5844                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5845                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5846                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5847         return 0;
5848 }
5849
5850 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5851 {
5852         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5853         bool neg_adj = false;
5854         u32 correction = 0;
5855
5856         if (ppb < 0) {
5857                 neg_adj = true;
5858                 ppb = -ppb;
5859         }
5860
5861         /* Frequency adjustment is performed using hardware with a 24 bit
5862          * accumulator and a programmable correction value. On each clk, the
5863          * correction value gets added to the accumulator and when it
5864          * overflows, the time counter is incremented/decremented.
5865          *
5866          * So conversion from ppb to correction value is
5867          *              ppb * (1 << 24) / 1000000000
5868          */
5869         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5870                      TG3_EAV_REF_CLK_CORRECT_MASK;
5871
5872         tg3_full_lock(tp, 0);
5873
5874         if (correction)
5875                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5876                      TG3_EAV_REF_CLK_CORRECT_EN |
5877                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5878         else
5879                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5880
5881         tg3_full_unlock(tp);
5882
5883         return 0;
5884 }
5885
5886 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5887 {
5888         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5889
5890         tg3_full_lock(tp, 0);
5891         tp->ptp_adjust += delta;
5892         tg3_full_unlock(tp);
5893
5894         return 0;
5895 }
5896
5897 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5898 {
5899         u64 ns;
5900         u32 remainder;
5901         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5902
5903         tg3_full_lock(tp, 0);
5904         ns = tg3_refclk_read(tp);
5905         ns += tp->ptp_adjust;
5906         tg3_full_unlock(tp);
5907
5908         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5909         ts->tv_nsec = remainder;
5910
5911         return 0;
5912 }
5913
5914 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5915                            const struct timespec *ts)
5916 {
5917         u64 ns;
5918         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5919
5920         ns = timespec_to_ns(ts);
5921
5922         tg3_full_lock(tp, 0);
5923         tg3_refclk_write(tp, ns);
5924         tp->ptp_adjust = 0;
5925         tg3_full_unlock(tp);
5926
5927         return 0;
5928 }
5929
5930 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5931                           struct ptp_clock_request *rq, int on)
5932 {
5933         return -EOPNOTSUPP;
5934 }
5935
5936 static const struct ptp_clock_info tg3_ptp_caps = {
5937         .owner          = THIS_MODULE,
5938         .name           = "tg3 clock",
5939         .max_adj        = 250000000,
5940         .n_alarm        = 0,
5941         .n_ext_ts       = 0,
5942         .n_per_out      = 0,
5943         .pps            = 0,
5944         .adjfreq        = tg3_ptp_adjfreq,
5945         .adjtime        = tg3_ptp_adjtime,
5946         .gettime        = tg3_ptp_gettime,
5947         .settime        = tg3_ptp_settime,
5948         .enable         = tg3_ptp_enable,
5949 };
5950
5951 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5952                                      struct skb_shared_hwtstamps *timestamp)
5953 {
5954         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5955         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5956                                            tp->ptp_adjust);
5957 }
5958
5959 /* tp->lock must be held */
5960 static void tg3_ptp_init(struct tg3 *tp)
5961 {
5962         if (!tg3_flag(tp, PTP_CAPABLE))
5963                 return;
5964
5965         /* Initialize the hardware clock to the system time. */
5966         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5967         tp->ptp_adjust = 0;
5968         tp->ptp_info = tg3_ptp_caps;
5969 }
5970
5971 /* tp->lock must be held */
5972 static void tg3_ptp_resume(struct tg3 *tp)
5973 {
5974         if (!tg3_flag(tp, PTP_CAPABLE))
5975                 return;
5976
5977         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5978         tp->ptp_adjust = 0;
5979 }
5980
5981 static void tg3_ptp_fini(struct tg3 *tp)
5982 {
5983         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5984                 return;
5985
5986         ptp_clock_unregister(tp->ptp_clock);
5987         tp->ptp_clock = NULL;
5988         tp->ptp_adjust = 0;
5989 }
5990
5991 static inline int tg3_irq_sync(struct tg3 *tp)
5992 {
5993         return tp->irq_sync;
5994 }
5995
5996 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5997 {
5998         int i;
5999
6000         dst = (u32 *)((u8 *)dst + off);
6001         for (i = 0; i < len; i += sizeof(u32))
6002                 *dst++ = tr32(off + i);
6003 }
6004
6005 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6006 {
6007         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6008         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6009         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6010         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6011         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6012         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6013         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6014         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6015         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6016         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6017         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6018         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6019         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6020         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6021         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6022         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6023         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6024         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6025         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6026
6027         if (tg3_flag(tp, SUPPORT_MSIX))
6028                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6029
6030         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6031         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6032         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6033         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6034         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6035         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6036         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6037         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6038
6039         if (!tg3_flag(tp, 5705_PLUS)) {
6040                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6041                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6042                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6043         }
6044
6045         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6046         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6047         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6048         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6049         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6050
6051         if (tg3_flag(tp, NVRAM))
6052                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6053 }
6054
6055 static void tg3_dump_state(struct tg3 *tp)
6056 {
6057         int i;
6058         u32 *regs;
6059
6060         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6061         if (!regs)
6062                 return;
6063
6064         if (tg3_flag(tp, PCI_EXPRESS)) {
6065                 /* Read up to but not including private PCI registers */
6066                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6067                         regs[i / sizeof(u32)] = tr32(i);
6068         } else
6069                 tg3_dump_legacy_regs(tp, regs);
6070
6071         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6072                 if (!regs[i + 0] && !regs[i + 1] &&
6073                     !regs[i + 2] && !regs[i + 3])
6074                         continue;
6075
6076                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6077                            i * 4,
6078                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6079         }
6080
6081         kfree(regs);
6082
6083         for (i = 0; i < tp->irq_cnt; i++) {
6084                 struct tg3_napi *tnapi = &tp->napi[i];
6085
6086                 /* SW status block */
6087                 netdev_err(tp->dev,
6088                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6089                            i,
6090                            tnapi->hw_status->status,
6091                            tnapi->hw_status->status_tag,
6092                            tnapi->hw_status->rx_jumbo_consumer,
6093                            tnapi->hw_status->rx_consumer,
6094                            tnapi->hw_status->rx_mini_consumer,
6095                            tnapi->hw_status->idx[0].rx_producer,
6096                            tnapi->hw_status->idx[0].tx_consumer);
6097
6098                 netdev_err(tp->dev,
6099                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6100                            i,
6101                            tnapi->last_tag, tnapi->last_irq_tag,
6102                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6103                            tnapi->rx_rcb_ptr,
6104                            tnapi->prodring.rx_std_prod_idx,
6105                            tnapi->prodring.rx_std_cons_idx,
6106                            tnapi->prodring.rx_jmb_prod_idx,
6107                            tnapi->prodring.rx_jmb_cons_idx);
6108         }
6109 }
6110
6111 /* This is called whenever we suspect that the system chipset is re-
6112  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6113  * is bogus tx completions. We try to recover by setting the
6114  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6115  * in the workqueue.
6116  */
6117 static void tg3_tx_recover(struct tg3 *tp)
6118 {
6119         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6120                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6121
6122         netdev_warn(tp->dev,
6123                     "The system may be re-ordering memory-mapped I/O "
6124                     "cycles to the network device, attempting to recover. "
6125                     "Please report the problem to the driver maintainer "
6126                     "and include system chipset information.\n");
6127
6128         spin_lock(&tp->lock);
6129         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6130         spin_unlock(&tp->lock);
6131 }
6132
6133 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6134 {
6135         /* Tell compiler to fetch tx indices from memory. */
6136         barrier();
6137         return tnapi->tx_pending -
6138                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6139 }
6140
6141 /* Tigon3 never reports partial packet sends.  So we do not
6142  * need special logic to handle SKBs that have not had all
6143  * of their frags sent yet, like SunGEM does.
6144  */
6145 static void tg3_tx(struct tg3_napi *tnapi)
6146 {
6147         struct tg3 *tp = tnapi->tp;
6148         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6149         u32 sw_idx = tnapi->tx_cons;
6150         struct netdev_queue *txq;
6151         int index = tnapi - tp->napi;
6152         unsigned int pkts_compl = 0, bytes_compl = 0;
6153
6154         if (tg3_flag(tp, ENABLE_TSS))
6155                 index--;
6156
6157         txq = netdev_get_tx_queue(tp->dev, index);
6158
6159         while (sw_idx != hw_idx) {
6160                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6161                 struct sk_buff *skb = ri->skb;
6162                 int i, tx_bug = 0;
6163
6164                 if (unlikely(skb == NULL)) {
6165                         tg3_tx_recover(tp);
6166                         return;
6167                 }
6168
6169                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6170                         struct skb_shared_hwtstamps timestamp;
6171                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6172                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6173
6174                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6175
6176                         skb_tstamp_tx(skb, &timestamp);
6177                 }
6178
6179                 pci_unmap_single(tp->pdev,
6180                                  dma_unmap_addr(ri, mapping),
6181                                  skb_headlen(skb),
6182                                  PCI_DMA_TODEVICE);
6183
6184                 ri->skb = NULL;
6185
6186                 while (ri->fragmented) {
6187                         ri->fragmented = false;
6188                         sw_idx = NEXT_TX(sw_idx);
6189                         ri = &tnapi->tx_buffers[sw_idx];
6190                 }
6191
6192                 sw_idx = NEXT_TX(sw_idx);
6193
6194                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6195                         ri = &tnapi->tx_buffers[sw_idx];
6196                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6197                                 tx_bug = 1;
6198
6199                         pci_unmap_page(tp->pdev,
6200                                        dma_unmap_addr(ri, mapping),
6201                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6202                                        PCI_DMA_TODEVICE);
6203
6204                         while (ri->fragmented) {
6205                                 ri->fragmented = false;
6206                                 sw_idx = NEXT_TX(sw_idx);
6207                                 ri = &tnapi->tx_buffers[sw_idx];
6208                         }
6209
6210                         sw_idx = NEXT_TX(sw_idx);
6211                 }
6212
6213                 pkts_compl++;
6214                 bytes_compl += skb->len;
6215
6216                 dev_kfree_skb(skb);
6217
6218                 if (unlikely(tx_bug)) {
6219                         tg3_tx_recover(tp);
6220                         return;
6221                 }
6222         }
6223
6224         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6225
6226         tnapi->tx_cons = sw_idx;
6227
6228         /* Need to make the tx_cons update visible to tg3_start_xmit()
6229          * before checking for netif_queue_stopped().  Without the
6230          * memory barrier, there is a small possibility that tg3_start_xmit()
6231          * will miss it and cause the queue to be stopped forever.
6232          */
6233         smp_mb();
6234
6235         if (unlikely(netif_tx_queue_stopped(txq) &&
6236                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6237                 __netif_tx_lock(txq, smp_processor_id());
6238                 if (netif_tx_queue_stopped(txq) &&
6239                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6240                         netif_tx_wake_queue(txq);
6241                 __netif_tx_unlock(txq);
6242         }
6243 }
6244
6245 static void tg3_frag_free(bool is_frag, void *data)
6246 {
6247         if (is_frag)
6248                 put_page(virt_to_head_page(data));
6249         else
6250                 kfree(data);
6251 }
6252
6253 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6254 {
6255         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6256                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6257
6258         if (!ri->data)
6259                 return;
6260
6261         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6262                          map_sz, PCI_DMA_FROMDEVICE);
6263         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6264         ri->data = NULL;
6265 }
6266
6267
6268 /* Returns size of skb allocated or < 0 on error.
6269  *
6270  * We only need to fill in the address because the other members
6271  * of the RX descriptor are invariant, see tg3_init_rings.
6272  *
6273  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6274  * posting buffers we only dirty the first cache line of the RX
6275  * descriptor (containing the address).  Whereas for the RX status
6276  * buffers the cpu only reads the last cacheline of the RX descriptor
6277  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6278  */
6279 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6280                              u32 opaque_key, u32 dest_idx_unmasked,
6281                              unsigned int *frag_size)
6282 {
6283         struct tg3_rx_buffer_desc *desc;
6284         struct ring_info *map;
6285         u8 *data;
6286         dma_addr_t mapping;
6287         int skb_size, data_size, dest_idx;
6288
6289         switch (opaque_key) {
6290         case RXD_OPAQUE_RING_STD:
6291                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6292                 desc = &tpr->rx_std[dest_idx];
6293                 map = &tpr->rx_std_buffers[dest_idx];
6294                 data_size = tp->rx_pkt_map_sz;
6295                 break;
6296
6297         case RXD_OPAQUE_RING_JUMBO:
6298                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6299                 desc = &tpr->rx_jmb[dest_idx].std;
6300                 map = &tpr->rx_jmb_buffers[dest_idx];
6301                 data_size = TG3_RX_JMB_MAP_SZ;
6302                 break;
6303
6304         default:
6305                 return -EINVAL;
6306         }
6307
6308         /* Do not overwrite any of the map or rp information
6309          * until we are sure we can commit to a new buffer.
6310          *
6311          * Callers depend upon this behavior and assume that
6312          * we leave everything unchanged if we fail.
6313          */
6314         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6315                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6316         if (skb_size <= PAGE_SIZE) {
6317                 data = netdev_alloc_frag(skb_size);
6318                 *frag_size = skb_size;
6319         } else {
6320                 data = kmalloc(skb_size, GFP_ATOMIC);
6321                 *frag_size = 0;
6322         }
6323         if (!data)
6324                 return -ENOMEM;
6325
6326         mapping = pci_map_single(tp->pdev,
6327                                  data + TG3_RX_OFFSET(tp),
6328                                  data_size,
6329                                  PCI_DMA_FROMDEVICE);
6330         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6331                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6332                 return -EIO;
6333         }
6334
6335         map->data = data;
6336         dma_unmap_addr_set(map, mapping, mapping);
6337
6338         desc->addr_hi = ((u64)mapping >> 32);
6339         desc->addr_lo = ((u64)mapping & 0xffffffff);
6340
6341         return data_size;
6342 }
6343
6344 /* We only need to move over in the address because the other
6345  * members of the RX descriptor are invariant.  See notes above
6346  * tg3_alloc_rx_data for full details.
6347  */
6348 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6349                            struct tg3_rx_prodring_set *dpr,
6350                            u32 opaque_key, int src_idx,
6351                            u32 dest_idx_unmasked)
6352 {
6353         struct tg3 *tp = tnapi->tp;
6354         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6355         struct ring_info *src_map, *dest_map;
6356         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6357         int dest_idx;
6358
6359         switch (opaque_key) {
6360         case RXD_OPAQUE_RING_STD:
6361                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6362                 dest_desc = &dpr->rx_std[dest_idx];
6363                 dest_map = &dpr->rx_std_buffers[dest_idx];
6364                 src_desc = &spr->rx_std[src_idx];
6365                 src_map = &spr->rx_std_buffers[src_idx];
6366                 break;
6367
6368         case RXD_OPAQUE_RING_JUMBO:
6369                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6370                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6371                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6372                 src_desc = &spr->rx_jmb[src_idx].std;
6373                 src_map = &spr->rx_jmb_buffers[src_idx];
6374                 break;
6375
6376         default:
6377                 return;
6378         }
6379
6380         dest_map->data = src_map->data;
6381         dma_unmap_addr_set(dest_map, mapping,
6382                            dma_unmap_addr(src_map, mapping));
6383         dest_desc->addr_hi = src_desc->addr_hi;
6384         dest_desc->addr_lo = src_desc->addr_lo;
6385
6386         /* Ensure that the update to the skb happens after the physical
6387          * addresses have been transferred to the new BD location.
6388          */
6389         smp_wmb();
6390
6391         src_map->data = NULL;
6392 }
6393
6394 /* The RX ring scheme is composed of multiple rings which post fresh
6395  * buffers to the chip, and one special ring the chip uses to report
6396  * status back to the host.
6397  *
6398  * The special ring reports the status of received packets to the
6399  * host.  The chip does not write into the original descriptor the
6400  * RX buffer was obtained from.  The chip simply takes the original
6401  * descriptor as provided by the host, updates the status and length
6402  * field, then writes this into the next status ring entry.
6403  *
6404  * Each ring the host uses to post buffers to the chip is described
6405  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6406  * it is first placed into the on-chip ram.  When the packet's length
6407  * is known, it walks down the TG3_BDINFO entries to select the ring.
6408  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6409  * which is within the range of the new packet's length is chosen.
6410  *
6411  * The "separate ring for rx status" scheme may sound queer, but it makes
6412  * sense from a cache coherency perspective.  If only the host writes
6413  * to the buffer post rings, and only the chip writes to the rx status
6414  * rings, then cache lines never move beyond shared-modified state.
6415  * If both the host and chip were to write into the same ring, cache line
6416  * eviction could occur since both entities want it in an exclusive state.
6417  */
6418 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6419 {
6420         struct tg3 *tp = tnapi->tp;
6421         u32 work_mask, rx_std_posted = 0;
6422         u32 std_prod_idx, jmb_prod_idx;
6423         u32 sw_idx = tnapi->rx_rcb_ptr;
6424         u16 hw_idx;
6425         int received;
6426         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6427
6428         hw_idx = *(tnapi->rx_rcb_prod_idx);
6429         /*
6430          * We need to order the read of hw_idx and the read of
6431          * the opaque cookie.
6432          */
6433         rmb();
6434         work_mask = 0;
6435         received = 0;
6436         std_prod_idx = tpr->rx_std_prod_idx;
6437         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6438         while (sw_idx != hw_idx && budget > 0) {
6439                 struct ring_info *ri;
6440                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6441                 unsigned int len;
6442                 struct sk_buff *skb;
6443                 dma_addr_t dma_addr;
6444                 u32 opaque_key, desc_idx, *post_ptr;
6445                 u8 *data;
6446                 u64 tstamp = 0;
6447
6448                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6449                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6450                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6451                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6452                         dma_addr = dma_unmap_addr(ri, mapping);
6453                         data = ri->data;
6454                         post_ptr = &std_prod_idx;
6455                         rx_std_posted++;
6456                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6457                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6458                         dma_addr = dma_unmap_addr(ri, mapping);
6459                         data = ri->data;
6460                         post_ptr = &jmb_prod_idx;
6461                 } else
6462                         goto next_pkt_nopost;
6463
6464                 work_mask |= opaque_key;
6465
6466                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6467                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6468                 drop_it:
6469                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6470                                        desc_idx, *post_ptr);
6471                 drop_it_no_recycle:
6472                         /* Other statistics kept track of by card. */
6473                         tp->rx_dropped++;
6474                         goto next_pkt;
6475                 }
6476
6477                 prefetch(data + TG3_RX_OFFSET(tp));
6478                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6479                       ETH_FCS_LEN;
6480
6481                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6482                      RXD_FLAG_PTPSTAT_PTPV1 ||
6483                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6484                      RXD_FLAG_PTPSTAT_PTPV2) {
6485                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6486                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6487                 }
6488
6489                 if (len > TG3_RX_COPY_THRESH(tp)) {
6490                         int skb_size;
6491                         unsigned int frag_size;
6492
6493                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6494                                                     *post_ptr, &frag_size);
6495                         if (skb_size < 0)
6496                                 goto drop_it;
6497
6498                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6499                                          PCI_DMA_FROMDEVICE);
6500
6501                         skb = build_skb(data, frag_size);
6502                         if (!skb) {
6503                                 tg3_frag_free(frag_size != 0, data);
6504                                 goto drop_it_no_recycle;
6505                         }
6506                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6507                         /* Ensure that the update to the data happens
6508                          * after the usage of the old DMA mapping.
6509                          */
6510                         smp_wmb();
6511
6512                         ri->data = NULL;
6513
6514                 } else {
6515                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6516                                        desc_idx, *post_ptr);
6517
6518                         skb = netdev_alloc_skb(tp->dev,
6519                                                len + TG3_RAW_IP_ALIGN);
6520                         if (skb == NULL)
6521                                 goto drop_it_no_recycle;
6522
6523                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6524                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6525                         memcpy(skb->data,
6526                                data + TG3_RX_OFFSET(tp),
6527                                len);
6528                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6529                 }
6530
6531                 skb_put(skb, len);
6532                 if (tstamp)
6533                         tg3_hwclock_to_timestamp(tp, tstamp,
6534                                                  skb_hwtstamps(skb));
6535
6536                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6537                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6538                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6539                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6540                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6541                 else
6542                         skb_checksum_none_assert(skb);
6543
6544                 skb->protocol = eth_type_trans(skb, tp->dev);
6545
6546                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6547                     skb->protocol != htons(ETH_P_8021Q)) {
6548                         dev_kfree_skb(skb);
6549                         goto drop_it_no_recycle;
6550                 }
6551
6552                 if (desc->type_flags & RXD_FLAG_VLAN &&
6553                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6554                         __vlan_hwaccel_put_tag(skb,
6555                                                desc->err_vlan & RXD_VLAN_MASK);
6556
6557                 napi_gro_receive(&tnapi->napi, skb);
6558
6559                 received++;
6560                 budget--;
6561
6562 next_pkt:
6563                 (*post_ptr)++;
6564
6565                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6566                         tpr->rx_std_prod_idx = std_prod_idx &
6567                                                tp->rx_std_ring_mask;
6568                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6569                                      tpr->rx_std_prod_idx);
6570                         work_mask &= ~RXD_OPAQUE_RING_STD;
6571                         rx_std_posted = 0;
6572                 }
6573 next_pkt_nopost:
6574                 sw_idx++;
6575                 sw_idx &= tp->rx_ret_ring_mask;
6576
6577                 /* Refresh hw_idx to see if there is new work */
6578                 if (sw_idx == hw_idx) {
6579                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6580                         rmb();
6581                 }
6582         }
6583
6584         /* ACK the status ring. */
6585         tnapi->rx_rcb_ptr = sw_idx;
6586         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6587
6588         /* Refill RX ring(s). */
6589         if (!tg3_flag(tp, ENABLE_RSS)) {
6590                 /* Sync BD data before updating mailbox */
6591                 wmb();
6592
6593                 if (work_mask & RXD_OPAQUE_RING_STD) {
6594                         tpr->rx_std_prod_idx = std_prod_idx &
6595                                                tp->rx_std_ring_mask;
6596                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6597                                      tpr->rx_std_prod_idx);
6598                 }
6599                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6600                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6601                                                tp->rx_jmb_ring_mask;
6602                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6603                                      tpr->rx_jmb_prod_idx);
6604                 }
6605                 mmiowb();
6606         } else if (work_mask) {
6607                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6608                  * updated before the producer indices can be updated.
6609                  */
6610                 smp_wmb();
6611
6612                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6613                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6614
6615                 if (tnapi != &tp->napi[1]) {
6616                         tp->rx_refill = true;
6617                         napi_schedule(&tp->napi[1].napi);
6618                 }
6619         }
6620
6621         return received;
6622 }
6623
6624 static void tg3_poll_link(struct tg3 *tp)
6625 {
6626         /* handle link change and other phy events */
6627         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6628                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6629
6630                 if (sblk->status & SD_STATUS_LINK_CHG) {
6631                         sblk->status = SD_STATUS_UPDATED |
6632                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6633                         spin_lock(&tp->lock);
6634                         if (tg3_flag(tp, USE_PHYLIB)) {
6635                                 tw32_f(MAC_STATUS,
6636                                      (MAC_STATUS_SYNC_CHANGED |
6637                                       MAC_STATUS_CFG_CHANGED |
6638                                       MAC_STATUS_MI_COMPLETION |
6639                                       MAC_STATUS_LNKSTATE_CHANGED));
6640                                 udelay(40);
6641                         } else
6642                                 tg3_setup_phy(tp, 0);
6643                         spin_unlock(&tp->lock);
6644                 }
6645         }
6646 }
6647
6648 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6649                                 struct tg3_rx_prodring_set *dpr,
6650                                 struct tg3_rx_prodring_set *spr)
6651 {
6652         u32 si, di, cpycnt, src_prod_idx;
6653         int i, err = 0;
6654
6655         while (1) {
6656                 src_prod_idx = spr->rx_std_prod_idx;
6657
6658                 /* Make sure updates to the rx_std_buffers[] entries and the
6659                  * standard producer index are seen in the correct order.
6660                  */
6661                 smp_rmb();
6662
6663                 if (spr->rx_std_cons_idx == src_prod_idx)
6664                         break;
6665
6666                 if (spr->rx_std_cons_idx < src_prod_idx)
6667                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6668                 else
6669                         cpycnt = tp->rx_std_ring_mask + 1 -
6670                                  spr->rx_std_cons_idx;
6671
6672                 cpycnt = min(cpycnt,
6673                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6674
6675                 si = spr->rx_std_cons_idx;
6676                 di = dpr->rx_std_prod_idx;
6677
6678                 for (i = di; i < di + cpycnt; i++) {
6679                         if (dpr->rx_std_buffers[i].data) {
6680                                 cpycnt = i - di;
6681                                 err = -ENOSPC;
6682                                 break;
6683                         }
6684                 }
6685
6686                 if (!cpycnt)
6687                         break;
6688
6689                 /* Ensure that updates to the rx_std_buffers ring and the
6690                  * shadowed hardware producer ring from tg3_recycle_skb() are
6691                  * ordered correctly WRT the skb check above.
6692                  */
6693                 smp_rmb();
6694
6695                 memcpy(&dpr->rx_std_buffers[di],
6696                        &spr->rx_std_buffers[si],
6697                        cpycnt * sizeof(struct ring_info));
6698
6699                 for (i = 0; i < cpycnt; i++, di++, si++) {
6700                         struct tg3_rx_buffer_desc *sbd, *dbd;
6701                         sbd = &spr->rx_std[si];
6702                         dbd = &dpr->rx_std[di];
6703                         dbd->addr_hi = sbd->addr_hi;
6704                         dbd->addr_lo = sbd->addr_lo;
6705                 }
6706
6707                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6708                                        tp->rx_std_ring_mask;
6709                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6710                                        tp->rx_std_ring_mask;
6711         }
6712
6713         while (1) {
6714                 src_prod_idx = spr->rx_jmb_prod_idx;
6715
6716                 /* Make sure updates to the rx_jmb_buffers[] entries and
6717                  * the jumbo producer index are seen in the correct order.
6718                  */
6719                 smp_rmb();
6720
6721                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6722                         break;
6723
6724                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6725                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6726                 else
6727                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6728                                  spr->rx_jmb_cons_idx;
6729
6730                 cpycnt = min(cpycnt,
6731                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6732
6733                 si = spr->rx_jmb_cons_idx;
6734                 di = dpr->rx_jmb_prod_idx;
6735
6736                 for (i = di; i < di + cpycnt; i++) {
6737                         if (dpr->rx_jmb_buffers[i].data) {
6738                                 cpycnt = i - di;
6739                                 err = -ENOSPC;
6740                                 break;
6741                         }
6742                 }
6743
6744                 if (!cpycnt)
6745                         break;
6746
6747                 /* Ensure that updates to the rx_jmb_buffers ring and the
6748                  * shadowed hardware producer ring from tg3_recycle_skb() are
6749                  * ordered correctly WRT the skb check above.
6750                  */
6751                 smp_rmb();
6752
6753                 memcpy(&dpr->rx_jmb_buffers[di],
6754                        &spr->rx_jmb_buffers[si],
6755                        cpycnt * sizeof(struct ring_info));
6756
6757                 for (i = 0; i < cpycnt; i++, di++, si++) {
6758                         struct tg3_rx_buffer_desc *sbd, *dbd;
6759                         sbd = &spr->rx_jmb[si].std;
6760                         dbd = &dpr->rx_jmb[di].std;
6761                         dbd->addr_hi = sbd->addr_hi;
6762                         dbd->addr_lo = sbd->addr_lo;
6763                 }
6764
6765                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6766                                        tp->rx_jmb_ring_mask;
6767                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6768                                        tp->rx_jmb_ring_mask;
6769         }
6770
6771         return err;
6772 }
6773
6774 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6775 {
6776         struct tg3 *tp = tnapi->tp;
6777
6778         /* run TX completion thread */
6779         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6780                 tg3_tx(tnapi);
6781                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6782                         return work_done;
6783         }
6784
6785         if (!tnapi->rx_rcb_prod_idx)
6786                 return work_done;
6787
6788         /* run RX thread, within the bounds set by NAPI.
6789          * All RX "locking" is done by ensuring outside
6790          * code synchronizes with tg3->napi.poll()
6791          */
6792         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6793                 work_done += tg3_rx(tnapi, budget - work_done);
6794
6795         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6796                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6797                 int i, err = 0;
6798                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6799                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6800
6801                 tp->rx_refill = false;
6802                 for (i = 1; i <= tp->rxq_cnt; i++)
6803                         err |= tg3_rx_prodring_xfer(tp, dpr,
6804                                                     &tp->napi[i].prodring);
6805
6806                 wmb();
6807
6808                 if (std_prod_idx != dpr->rx_std_prod_idx)
6809                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6810                                      dpr->rx_std_prod_idx);
6811
6812                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6813                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6814                                      dpr->rx_jmb_prod_idx);
6815
6816                 mmiowb();
6817
6818                 if (err)
6819                         tw32_f(HOSTCC_MODE, tp->coal_now);
6820         }
6821
6822         return work_done;
6823 }
6824
6825 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6826 {
6827         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6828                 schedule_work(&tp->reset_task);
6829 }
6830
6831 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6832 {
6833         cancel_work_sync(&tp->reset_task);
6834         tg3_flag_clear(tp, RESET_TASK_PENDING);
6835         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6836 }
6837
6838 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6839 {
6840         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6841         struct tg3 *tp = tnapi->tp;
6842         int work_done = 0;
6843         struct tg3_hw_status *sblk = tnapi->hw_status;
6844
6845         while (1) {
6846                 work_done = tg3_poll_work(tnapi, work_done, budget);
6847
6848                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6849                         goto tx_recovery;
6850
6851                 if (unlikely(work_done >= budget))
6852                         break;
6853
6854                 /* tp->last_tag is used in tg3_int_reenable() below
6855                  * to tell the hw how much work has been processed,
6856                  * so we must read it before checking for more work.
6857                  */
6858                 tnapi->last_tag = sblk->status_tag;
6859                 tnapi->last_irq_tag = tnapi->last_tag;
6860                 rmb();
6861
6862                 /* check for RX/TX work to do */
6863                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6864                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6865
6866                         /* This test here is not race free, but will reduce
6867                          * the number of interrupts by looping again.
6868                          */
6869                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6870                                 continue;
6871
6872                         napi_complete(napi);
6873                         /* Reenable interrupts. */
6874                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6875
6876                         /* This test here is synchronized by napi_schedule()
6877                          * and napi_complete() to close the race condition.
6878                          */
6879                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6880                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6881                                                   HOSTCC_MODE_ENABLE |
6882                                                   tnapi->coal_now);
6883                         }
6884                         mmiowb();
6885                         break;
6886                 }
6887         }
6888
6889         return work_done;
6890
6891 tx_recovery:
6892         /* work_done is guaranteed to be less than budget. */
6893         napi_complete(napi);
6894         tg3_reset_task_schedule(tp);
6895         return work_done;
6896 }
6897
6898 static void tg3_process_error(struct tg3 *tp)
6899 {
6900         u32 val;
6901         bool real_error = false;
6902
6903         if (tg3_flag(tp, ERROR_PROCESSED))
6904                 return;
6905
6906         /* Check Flow Attention register */
6907         val = tr32(HOSTCC_FLOW_ATTN);
6908         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6909                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6910                 real_error = true;
6911         }
6912
6913         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6914                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6915                 real_error = true;
6916         }
6917
6918         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6919                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6920                 real_error = true;
6921         }
6922
6923         if (!real_error)
6924                 return;
6925
6926         tg3_dump_state(tp);
6927
6928         tg3_flag_set(tp, ERROR_PROCESSED);
6929         tg3_reset_task_schedule(tp);
6930 }
6931
6932 static int tg3_poll(struct napi_struct *napi, int budget)
6933 {
6934         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6935         struct tg3 *tp = tnapi->tp;
6936         int work_done = 0;
6937         struct tg3_hw_status *sblk = tnapi->hw_status;
6938
6939         while (1) {
6940                 if (sblk->status & SD_STATUS_ERROR)
6941                         tg3_process_error(tp);
6942
6943                 tg3_poll_link(tp);
6944
6945                 work_done = tg3_poll_work(tnapi, work_done, budget);
6946
6947                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6948                         goto tx_recovery;
6949
6950                 if (unlikely(work_done >= budget))
6951                         break;
6952
6953                 if (tg3_flag(tp, TAGGED_STATUS)) {
6954                         /* tp->last_tag is used in tg3_int_reenable() below
6955                          * to tell the hw how much work has been processed,
6956                          * so we must read it before checking for more work.
6957                          */
6958                         tnapi->last_tag = sblk->status_tag;
6959                         tnapi->last_irq_tag = tnapi->last_tag;
6960                         rmb();
6961                 } else
6962                         sblk->status &= ~SD_STATUS_UPDATED;
6963
6964                 if (likely(!tg3_has_work(tnapi))) {
6965                         napi_complete(napi);
6966                         tg3_int_reenable(tnapi);
6967                         break;
6968                 }
6969         }
6970
6971         return work_done;
6972
6973 tx_recovery:
6974         /* work_done is guaranteed to be less than budget. */
6975         napi_complete(napi);
6976         tg3_reset_task_schedule(tp);
6977         return work_done;
6978 }
6979
6980 static void tg3_napi_disable(struct tg3 *tp)
6981 {
6982         int i;
6983
6984         for (i = tp->irq_cnt - 1; i >= 0; i--)
6985                 napi_disable(&tp->napi[i].napi);
6986 }
6987
6988 static void tg3_napi_enable(struct tg3 *tp)
6989 {
6990         int i;
6991
6992         for (i = 0; i < tp->irq_cnt; i++)
6993                 napi_enable(&tp->napi[i].napi);
6994 }
6995
6996 static void tg3_napi_init(struct tg3 *tp)
6997 {
6998         int i;
6999
7000         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7001         for (i = 1; i < tp->irq_cnt; i++)
7002                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7003 }
7004
7005 static void tg3_napi_fini(struct tg3 *tp)
7006 {
7007         int i;
7008
7009         for (i = 0; i < tp->irq_cnt; i++)
7010                 netif_napi_del(&tp->napi[i].napi);
7011 }
7012
7013 static inline void tg3_netif_stop(struct tg3 *tp)
7014 {
7015         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7016         tg3_napi_disable(tp);
7017         netif_carrier_off(tp->dev);
7018         netif_tx_disable(tp->dev);
7019 }
7020
7021 /* tp->lock must be held */
7022 static inline void tg3_netif_start(struct tg3 *tp)
7023 {
7024         tg3_ptp_resume(tp);
7025
7026         /* NOTE: unconditional netif_tx_wake_all_queues is only
7027          * appropriate so long as all callers are assured to
7028          * have free tx slots (such as after tg3_init_hw)
7029          */
7030         netif_tx_wake_all_queues(tp->dev);
7031
7032         if (tp->link_up)
7033                 netif_carrier_on(tp->dev);
7034
7035         tg3_napi_enable(tp);
7036         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7037         tg3_enable_ints(tp);
7038 }
7039
7040 static void tg3_irq_quiesce(struct tg3 *tp)
7041 {
7042         int i;
7043
7044         BUG_ON(tp->irq_sync);
7045
7046         tp->irq_sync = 1;
7047         smp_mb();
7048
7049         for (i = 0; i < tp->irq_cnt; i++)
7050                 synchronize_irq(tp->napi[i].irq_vec);
7051 }
7052
7053 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7054  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7055  * with as well.  Most of the time, this is not necessary except when
7056  * shutting down the device.
7057  */
7058 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7059 {
7060         spin_lock_bh(&tp->lock);
7061         if (irq_sync)
7062                 tg3_irq_quiesce(tp);
7063 }
7064
7065 static inline void tg3_full_unlock(struct tg3 *tp)
7066 {
7067         spin_unlock_bh(&tp->lock);
7068 }
7069
7070 /* One-shot MSI handler - Chip automatically disables interrupt
7071  * after sending MSI so driver doesn't have to do it.
7072  */
7073 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7074 {
7075         struct tg3_napi *tnapi = dev_id;
7076         struct tg3 *tp = tnapi->tp;
7077
7078         prefetch(tnapi->hw_status);
7079         if (tnapi->rx_rcb)
7080                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7081
7082         if (likely(!tg3_irq_sync(tp)))
7083                 napi_schedule(&tnapi->napi);
7084
7085         return IRQ_HANDLED;
7086 }
7087
7088 /* MSI ISR - No need to check for interrupt sharing and no need to
7089  * flush status block and interrupt mailbox. PCI ordering rules
7090  * guarantee that MSI will arrive after the status block.
7091  */
7092 static irqreturn_t tg3_msi(int irq, void *dev_id)
7093 {
7094         struct tg3_napi *tnapi = dev_id;
7095         struct tg3 *tp = tnapi->tp;
7096
7097         prefetch(tnapi->hw_status);
7098         if (tnapi->rx_rcb)
7099                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7100         /*
7101          * Writing any value to intr-mbox-0 clears PCI INTA# and
7102          * chip-internal interrupt pending events.
7103          * Writing non-zero to intr-mbox-0 additional tells the
7104          * NIC to stop sending us irqs, engaging "in-intr-handler"
7105          * event coalescing.
7106          */
7107         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7108         if (likely(!tg3_irq_sync(tp)))
7109                 napi_schedule(&tnapi->napi);
7110
7111         return IRQ_RETVAL(1);
7112 }
7113
7114 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7115 {
7116         struct tg3_napi *tnapi = dev_id;
7117         struct tg3 *tp = tnapi->tp;
7118         struct tg3_hw_status *sblk = tnapi->hw_status;
7119         unsigned int handled = 1;
7120
7121         /* In INTx mode, it is possible for the interrupt to arrive at
7122          * the CPU before the status block posted prior to the interrupt.
7123          * Reading the PCI State register will confirm whether the
7124          * interrupt is ours and will flush the status block.
7125          */
7126         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7127                 if (tg3_flag(tp, CHIP_RESETTING) ||
7128                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7129                         handled = 0;
7130                         goto out;
7131                 }
7132         }
7133
7134         /*
7135          * Writing any value to intr-mbox-0 clears PCI INTA# and
7136          * chip-internal interrupt pending events.
7137          * Writing non-zero to intr-mbox-0 additional tells the
7138          * NIC to stop sending us irqs, engaging "in-intr-handler"
7139          * event coalescing.
7140          *
7141          * Flush the mailbox to de-assert the IRQ immediately to prevent
7142          * spurious interrupts.  The flush impacts performance but
7143          * excessive spurious interrupts can be worse in some cases.
7144          */
7145         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7146         if (tg3_irq_sync(tp))
7147                 goto out;
7148         sblk->status &= ~SD_STATUS_UPDATED;
7149         if (likely(tg3_has_work(tnapi))) {
7150                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7151                 napi_schedule(&tnapi->napi);
7152         } else {
7153                 /* No work, shared interrupt perhaps?  re-enable
7154                  * interrupts, and flush that PCI write
7155                  */
7156                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7157                                0x00000000);
7158         }
7159 out:
7160         return IRQ_RETVAL(handled);
7161 }
7162
7163 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7164 {
7165         struct tg3_napi *tnapi = dev_id;
7166         struct tg3 *tp = tnapi->tp;
7167         struct tg3_hw_status *sblk = tnapi->hw_status;
7168         unsigned int handled = 1;
7169
7170         /* In INTx mode, it is possible for the interrupt to arrive at
7171          * the CPU before the status block posted prior to the interrupt.
7172          * Reading the PCI State register will confirm whether the
7173          * interrupt is ours and will flush the status block.
7174          */
7175         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7176                 if (tg3_flag(tp, CHIP_RESETTING) ||
7177                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7178                         handled = 0;
7179                         goto out;
7180                 }
7181         }
7182
7183         /*
7184          * writing any value to intr-mbox-0 clears PCI INTA# and
7185          * chip-internal interrupt pending events.
7186          * writing non-zero to intr-mbox-0 additional tells the
7187          * NIC to stop sending us irqs, engaging "in-intr-handler"
7188          * event coalescing.
7189          *
7190          * Flush the mailbox to de-assert the IRQ immediately to prevent
7191          * spurious interrupts.  The flush impacts performance but
7192          * excessive spurious interrupts can be worse in some cases.
7193          */
7194         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7195
7196         /*
7197          * In a shared interrupt configuration, sometimes other devices'
7198          * interrupts will scream.  We record the current status tag here
7199          * so that the above check can report that the screaming interrupts
7200          * are unhandled.  Eventually they will be silenced.
7201          */
7202         tnapi->last_irq_tag = sblk->status_tag;
7203
7204         if (tg3_irq_sync(tp))
7205                 goto out;
7206
7207         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7208
7209         napi_schedule(&tnapi->napi);
7210
7211 out:
7212         return IRQ_RETVAL(handled);
7213 }
7214
7215 /* ISR for interrupt test */
7216 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7217 {
7218         struct tg3_napi *tnapi = dev_id;
7219         struct tg3 *tp = tnapi->tp;
7220         struct tg3_hw_status *sblk = tnapi->hw_status;
7221
7222         if ((sblk->status & SD_STATUS_UPDATED) ||
7223             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7224                 tg3_disable_ints(tp);
7225                 return IRQ_RETVAL(1);
7226         }
7227         return IRQ_RETVAL(0);
7228 }
7229
7230 #ifdef CONFIG_NET_POLL_CONTROLLER
7231 static void tg3_poll_controller(struct net_device *dev)
7232 {
7233         int i;
7234         struct tg3 *tp = netdev_priv(dev);
7235
7236         if (tg3_irq_sync(tp))
7237                 return;
7238
7239         for (i = 0; i < tp->irq_cnt; i++)
7240                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7241 }
7242 #endif
7243
7244 static void tg3_tx_timeout(struct net_device *dev)
7245 {
7246         struct tg3 *tp = netdev_priv(dev);
7247
7248         if (netif_msg_tx_err(tp)) {
7249                 netdev_err(dev, "transmit timed out, resetting\n");
7250                 tg3_dump_state(tp);
7251         }
7252
7253         tg3_reset_task_schedule(tp);
7254 }
7255
7256 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7257 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7258 {
7259         u32 base = (u32) mapping & 0xffffffff;
7260
7261         return (base > 0xffffdcc0) && (base + len + 8 < base);
7262 }
7263
7264 /* Test for DMA addresses > 40-bit */
7265 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7266                                           int len)
7267 {
7268 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7269         if (tg3_flag(tp, 40BIT_DMA_BUG))
7270                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7271         return 0;
7272 #else
7273         return 0;
7274 #endif
7275 }
7276
7277 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7278                                  dma_addr_t mapping, u32 len, u32 flags,
7279                                  u32 mss, u32 vlan)
7280 {
7281         txbd->addr_hi = ((u64) mapping >> 32);
7282         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7283         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7284         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7285 }
7286
7287 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7288                             dma_addr_t map, u32 len, u32 flags,
7289                             u32 mss, u32 vlan)
7290 {
7291         struct tg3 *tp = tnapi->tp;
7292         bool hwbug = false;
7293
7294         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7295                 hwbug = true;
7296
7297         if (tg3_4g_overflow_test(map, len))
7298                 hwbug = true;
7299
7300         if (tg3_40bit_overflow_test(tp, map, len))
7301                 hwbug = true;
7302
7303         if (tp->dma_limit) {
7304                 u32 prvidx = *entry;
7305                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7306                 while (len > tp->dma_limit && *budget) {
7307                         u32 frag_len = tp->dma_limit;
7308                         len -= tp->dma_limit;
7309
7310                         /* Avoid the 8byte DMA problem */
7311                         if (len <= 8) {
7312                                 len += tp->dma_limit / 2;
7313                                 frag_len = tp->dma_limit / 2;
7314                         }
7315
7316                         tnapi->tx_buffers[*entry].fragmented = true;
7317
7318                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7319                                       frag_len, tmp_flag, mss, vlan);
7320                         *budget -= 1;
7321                         prvidx = *entry;
7322                         *entry = NEXT_TX(*entry);
7323
7324                         map += frag_len;
7325                 }
7326
7327                 if (len) {
7328                         if (*budget) {
7329                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7330                                               len, flags, mss, vlan);
7331                                 *budget -= 1;
7332                                 *entry = NEXT_TX(*entry);
7333                         } else {
7334                                 hwbug = true;
7335                                 tnapi->tx_buffers[prvidx].fragmented = false;
7336                         }
7337                 }
7338         } else {
7339                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7340                               len, flags, mss, vlan);
7341                 *entry = NEXT_TX(*entry);
7342         }
7343
7344         return hwbug;
7345 }
7346
7347 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7348 {
7349         int i;
7350         struct sk_buff *skb;
7351         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7352
7353         skb = txb->skb;
7354         txb->skb = NULL;
7355
7356         pci_unmap_single(tnapi->tp->pdev,
7357                          dma_unmap_addr(txb, mapping),
7358                          skb_headlen(skb),
7359                          PCI_DMA_TODEVICE);
7360
7361         while (txb->fragmented) {
7362                 txb->fragmented = false;
7363                 entry = NEXT_TX(entry);
7364                 txb = &tnapi->tx_buffers[entry];
7365         }
7366
7367         for (i = 0; i <= last; i++) {
7368                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7369
7370                 entry = NEXT_TX(entry);
7371                 txb = &tnapi->tx_buffers[entry];
7372
7373                 pci_unmap_page(tnapi->tp->pdev,
7374                                dma_unmap_addr(txb, mapping),
7375                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7376
7377                 while (txb->fragmented) {
7378                         txb->fragmented = false;
7379                         entry = NEXT_TX(entry);
7380                         txb = &tnapi->tx_buffers[entry];
7381                 }
7382         }
7383 }
7384
7385 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7386 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7387                                        struct sk_buff **pskb,
7388                                        u32 *entry, u32 *budget,
7389                                        u32 base_flags, u32 mss, u32 vlan)
7390 {
7391         struct tg3 *tp = tnapi->tp;
7392         struct sk_buff *new_skb, *skb = *pskb;
7393         dma_addr_t new_addr = 0;
7394         int ret = 0;
7395
7396         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7397                 new_skb = skb_copy(skb, GFP_ATOMIC);
7398         else {
7399                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7400
7401                 new_skb = skb_copy_expand(skb,
7402                                           skb_headroom(skb) + more_headroom,
7403                                           skb_tailroom(skb), GFP_ATOMIC);
7404         }
7405
7406         if (!new_skb) {
7407                 ret = -1;
7408         } else {
7409                 /* New SKB is guaranteed to be linear. */
7410                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7411                                           PCI_DMA_TODEVICE);
7412                 /* Make sure the mapping succeeded */
7413                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7414                         dev_kfree_skb(new_skb);
7415                         ret = -1;
7416                 } else {
7417                         u32 save_entry = *entry;
7418
7419                         base_flags |= TXD_FLAG_END;
7420
7421                         tnapi->tx_buffers[*entry].skb = new_skb;
7422                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7423                                            mapping, new_addr);
7424
7425                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7426                                             new_skb->len, base_flags,
7427                                             mss, vlan)) {
7428                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7429                                 dev_kfree_skb(new_skb);
7430                                 ret = -1;
7431                         }
7432                 }
7433         }
7434
7435         dev_kfree_skb(skb);
7436         *pskb = new_skb;
7437         return ret;
7438 }
7439
7440 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7441
7442 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7443  * TSO header is greater than 80 bytes.
7444  */
7445 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7446 {
7447         struct sk_buff *segs, *nskb;
7448         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7449
7450         /* Estimate the number of fragments in the worst case */
7451         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7452                 netif_stop_queue(tp->dev);
7453
7454                 /* netif_tx_stop_queue() must be done before checking
7455                  * checking tx index in tg3_tx_avail() below, because in
7456                  * tg3_tx(), we update tx index before checking for
7457                  * netif_tx_queue_stopped().
7458                  */
7459                 smp_mb();
7460                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7461                         return NETDEV_TX_BUSY;
7462
7463                 netif_wake_queue(tp->dev);
7464         }
7465
7466         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7467         if (IS_ERR(segs))
7468                 goto tg3_tso_bug_end;
7469
7470         do {
7471                 nskb = segs;
7472                 segs = segs->next;
7473                 nskb->next = NULL;
7474                 tg3_start_xmit(nskb, tp->dev);
7475         } while (segs);
7476
7477 tg3_tso_bug_end:
7478         dev_kfree_skb(skb);
7479
7480         return NETDEV_TX_OK;
7481 }
7482
7483 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7484  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7485  */
7486 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7487 {
7488         struct tg3 *tp = netdev_priv(dev);
7489         u32 len, entry, base_flags, mss, vlan = 0;
7490         u32 budget;
7491         int i = -1, would_hit_hwbug;
7492         dma_addr_t mapping;
7493         struct tg3_napi *tnapi;
7494         struct netdev_queue *txq;
7495         unsigned int last;
7496
7497         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7498         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7499         if (tg3_flag(tp, ENABLE_TSS))
7500                 tnapi++;
7501
7502         budget = tg3_tx_avail(tnapi);
7503
7504         /* We are running in BH disabled context with netif_tx_lock
7505          * and TX reclaim runs via tp->napi.poll inside of a software
7506          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7507          * no IRQ context deadlocks to worry about either.  Rejoice!
7508          */
7509         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7510                 if (!netif_tx_queue_stopped(txq)) {
7511                         netif_tx_stop_queue(txq);
7512
7513                         /* This is a hard error, log it. */
7514                         netdev_err(dev,
7515                                    "BUG! Tx Ring full when queue awake!\n");
7516                 }
7517                 return NETDEV_TX_BUSY;
7518         }
7519
7520         entry = tnapi->tx_prod;
7521         base_flags = 0;
7522         if (skb->ip_summed == CHECKSUM_PARTIAL)
7523                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7524
7525         mss = skb_shinfo(skb)->gso_size;
7526         if (mss) {
7527                 struct iphdr *iph;
7528                 u32 tcp_opt_len, hdr_len;
7529
7530                 if (skb_header_cloned(skb) &&
7531                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7532                         goto drop;
7533
7534                 iph = ip_hdr(skb);
7535                 tcp_opt_len = tcp_optlen(skb);
7536
7537                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7538
7539                 if (!skb_is_gso_v6(skb)) {
7540                         iph->check = 0;
7541                         iph->tot_len = htons(mss + hdr_len);
7542                 }
7543
7544                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7545                     tg3_flag(tp, TSO_BUG))
7546                         return tg3_tso_bug(tp, skb);
7547
7548                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7549                                TXD_FLAG_CPU_POST_DMA);
7550
7551                 if (tg3_flag(tp, HW_TSO_1) ||
7552                     tg3_flag(tp, HW_TSO_2) ||
7553                     tg3_flag(tp, HW_TSO_3)) {
7554                         tcp_hdr(skb)->check = 0;
7555                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7556                 } else
7557                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7558                                                                  iph->daddr, 0,
7559                                                                  IPPROTO_TCP,
7560                                                                  0);
7561
7562                 if (tg3_flag(tp, HW_TSO_3)) {
7563                         mss |= (hdr_len & 0xc) << 12;
7564                         if (hdr_len & 0x10)
7565                                 base_flags |= 0x00000010;
7566                         base_flags |= (hdr_len & 0x3e0) << 5;
7567                 } else if (tg3_flag(tp, HW_TSO_2))
7568                         mss |= hdr_len << 9;
7569                 else if (tg3_flag(tp, HW_TSO_1) ||
7570                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7571                         if (tcp_opt_len || iph->ihl > 5) {
7572                                 int tsflags;
7573
7574                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7575                                 mss |= (tsflags << 11);
7576                         }
7577                 } else {
7578                         if (tcp_opt_len || iph->ihl > 5) {
7579                                 int tsflags;
7580
7581                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7582                                 base_flags |= tsflags << 12;
7583                         }
7584                 }
7585         }
7586
7587         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7588             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7589                 base_flags |= TXD_FLAG_JMB_PKT;
7590
7591         if (vlan_tx_tag_present(skb)) {
7592                 base_flags |= TXD_FLAG_VLAN;
7593                 vlan = vlan_tx_tag_get(skb);
7594         }
7595
7596         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7597             tg3_flag(tp, TX_TSTAMP_EN)) {
7598                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7599                 base_flags |= TXD_FLAG_HWTSTAMP;
7600         }
7601
7602         len = skb_headlen(skb);
7603
7604         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7605         if (pci_dma_mapping_error(tp->pdev, mapping))
7606                 goto drop;
7607
7608
7609         tnapi->tx_buffers[entry].skb = skb;
7610         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7611
7612         would_hit_hwbug = 0;
7613
7614         if (tg3_flag(tp, 5701_DMA_BUG))
7615                 would_hit_hwbug = 1;
7616
7617         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7618                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7619                             mss, vlan)) {
7620                 would_hit_hwbug = 1;
7621         } else if (skb_shinfo(skb)->nr_frags > 0) {
7622                 u32 tmp_mss = mss;
7623
7624                 if (!tg3_flag(tp, HW_TSO_1) &&
7625                     !tg3_flag(tp, HW_TSO_2) &&
7626                     !tg3_flag(tp, HW_TSO_3))
7627                         tmp_mss = 0;
7628
7629                 /* Now loop through additional data
7630                  * fragments, and queue them.
7631                  */
7632                 last = skb_shinfo(skb)->nr_frags - 1;
7633                 for (i = 0; i <= last; i++) {
7634                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7635
7636                         len = skb_frag_size(frag);
7637                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7638                                                    len, DMA_TO_DEVICE);
7639
7640                         tnapi->tx_buffers[entry].skb = NULL;
7641                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7642                                            mapping);
7643                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7644                                 goto dma_error;
7645
7646                         if (!budget ||
7647                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7648                                             len, base_flags |
7649                                             ((i == last) ? TXD_FLAG_END : 0),
7650                                             tmp_mss, vlan)) {
7651                                 would_hit_hwbug = 1;
7652                                 break;
7653                         }
7654                 }
7655         }
7656
7657         if (would_hit_hwbug) {
7658                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7659
7660                 /* If the workaround fails due to memory/mapping
7661                  * failure, silently drop this packet.
7662                  */
7663                 entry = tnapi->tx_prod;
7664                 budget = tg3_tx_avail(tnapi);
7665                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7666                                                 base_flags, mss, vlan))
7667                         goto drop_nofree;
7668         }
7669
7670         skb_tx_timestamp(skb);
7671         netdev_tx_sent_queue(txq, skb->len);
7672
7673         /* Sync BD data before updating mailbox */
7674         wmb();
7675
7676         /* Packets are ready, update Tx producer idx local and on card. */
7677         tw32_tx_mbox(tnapi->prodmbox, entry);
7678
7679         tnapi->tx_prod = entry;
7680         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7681                 netif_tx_stop_queue(txq);
7682
7683                 /* netif_tx_stop_queue() must be done before checking
7684                  * checking tx index in tg3_tx_avail() below, because in
7685                  * tg3_tx(), we update tx index before checking for
7686                  * netif_tx_queue_stopped().
7687                  */
7688                 smp_mb();
7689                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7690                         netif_tx_wake_queue(txq);
7691         }
7692
7693         mmiowb();
7694         return NETDEV_TX_OK;
7695
7696 dma_error:
7697         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7698         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7699 drop:
7700         dev_kfree_skb(skb);
7701 drop_nofree:
7702         tp->tx_dropped++;
7703         return NETDEV_TX_OK;
7704 }
7705
7706 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7707 {
7708         if (enable) {
7709                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7710                                   MAC_MODE_PORT_MODE_MASK);
7711
7712                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7713
7714                 if (!tg3_flag(tp, 5705_PLUS))
7715                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7716
7717                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7718                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7719                 else
7720                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7721         } else {
7722                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7723
7724                 if (tg3_flag(tp, 5705_PLUS) ||
7725                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7726                     tg3_asic_rev(tp) == ASIC_REV_5700)
7727                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7728         }
7729
7730         tw32(MAC_MODE, tp->mac_mode);
7731         udelay(40);
7732 }
7733
7734 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7735 {
7736         u32 val, bmcr, mac_mode, ptest = 0;
7737
7738         tg3_phy_toggle_apd(tp, false);
7739         tg3_phy_toggle_automdix(tp, 0);
7740
7741         if (extlpbk && tg3_phy_set_extloopbk(tp))
7742                 return -EIO;
7743
7744         bmcr = BMCR_FULLDPLX;
7745         switch (speed) {
7746         case SPEED_10:
7747                 break;
7748         case SPEED_100:
7749                 bmcr |= BMCR_SPEED100;
7750                 break;
7751         case SPEED_1000:
7752         default:
7753                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7754                         speed = SPEED_100;
7755                         bmcr |= BMCR_SPEED100;
7756                 } else {
7757                         speed = SPEED_1000;
7758                         bmcr |= BMCR_SPEED1000;
7759                 }
7760         }
7761
7762         if (extlpbk) {
7763                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7764                         tg3_readphy(tp, MII_CTRL1000, &val);
7765                         val |= CTL1000_AS_MASTER |
7766                                CTL1000_ENABLE_MASTER;
7767                         tg3_writephy(tp, MII_CTRL1000, val);
7768                 } else {
7769                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7770                                 MII_TG3_FET_PTEST_TRIM_2;
7771                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7772                 }
7773         } else
7774                 bmcr |= BMCR_LOOPBACK;
7775
7776         tg3_writephy(tp, MII_BMCR, bmcr);
7777
7778         /* The write needs to be flushed for the FETs */
7779         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7780                 tg3_readphy(tp, MII_BMCR, &bmcr);
7781
7782         udelay(40);
7783
7784         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7785             tg3_asic_rev(tp) == ASIC_REV_5785) {
7786                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7787                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7788                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7789
7790                 /* The write needs to be flushed for the AC131 */
7791                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7792         }
7793
7794         /* Reset to prevent losing 1st rx packet intermittently */
7795         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7796             tg3_flag(tp, 5780_CLASS)) {
7797                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7798                 udelay(10);
7799                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7800         }
7801
7802         mac_mode = tp->mac_mode &
7803                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7804         if (speed == SPEED_1000)
7805                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7806         else
7807                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7808
7809         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7810                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7811
7812                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7813                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7814                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7815                         mac_mode |= MAC_MODE_LINK_POLARITY;
7816
7817                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7818                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7819         }
7820
7821         tw32(MAC_MODE, mac_mode);
7822         udelay(40);
7823
7824         return 0;
7825 }
7826
7827 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7828 {
7829         struct tg3 *tp = netdev_priv(dev);
7830
7831         if (features & NETIF_F_LOOPBACK) {
7832                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7833                         return;
7834
7835                 spin_lock_bh(&tp->lock);
7836                 tg3_mac_loopback(tp, true);
7837                 netif_carrier_on(tp->dev);
7838                 spin_unlock_bh(&tp->lock);
7839                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7840         } else {
7841                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7842                         return;
7843
7844                 spin_lock_bh(&tp->lock);
7845                 tg3_mac_loopback(tp, false);
7846                 /* Force link status check */
7847                 tg3_setup_phy(tp, 1);
7848                 spin_unlock_bh(&tp->lock);
7849                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7850         }
7851 }
7852
7853 static netdev_features_t tg3_fix_features(struct net_device *dev,
7854         netdev_features_t features)
7855 {
7856         struct tg3 *tp = netdev_priv(dev);
7857
7858         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7859                 features &= ~NETIF_F_ALL_TSO;
7860
7861         return features;
7862 }
7863
7864 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7865 {
7866         netdev_features_t changed = dev->features ^ features;
7867
7868         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7869                 tg3_set_loopback(dev, features);
7870
7871         return 0;
7872 }
7873
7874 static void tg3_rx_prodring_free(struct tg3 *tp,
7875                                  struct tg3_rx_prodring_set *tpr)
7876 {
7877         int i;
7878
7879         if (tpr != &tp->napi[0].prodring) {
7880                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7881                      i = (i + 1) & tp->rx_std_ring_mask)
7882                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7883                                         tp->rx_pkt_map_sz);
7884
7885                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7886                         for (i = tpr->rx_jmb_cons_idx;
7887                              i != tpr->rx_jmb_prod_idx;
7888                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7889                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7890                                                 TG3_RX_JMB_MAP_SZ);
7891                         }
7892                 }
7893
7894                 return;
7895         }
7896
7897         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7898                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7899                                 tp->rx_pkt_map_sz);
7900
7901         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7902                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7903                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7904                                         TG3_RX_JMB_MAP_SZ);
7905         }
7906 }
7907
7908 /* Initialize rx rings for packet processing.
7909  *
7910  * The chip has been shut down and the driver detached from
7911  * the networking, so no interrupts or new tx packets will
7912  * end up in the driver.  tp->{tx,}lock are held and thus
7913  * we may not sleep.
7914  */
7915 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7916                                  struct tg3_rx_prodring_set *tpr)
7917 {
7918         u32 i, rx_pkt_dma_sz;
7919
7920         tpr->rx_std_cons_idx = 0;
7921         tpr->rx_std_prod_idx = 0;
7922         tpr->rx_jmb_cons_idx = 0;
7923         tpr->rx_jmb_prod_idx = 0;
7924
7925         if (tpr != &tp->napi[0].prodring) {
7926                 memset(&tpr->rx_std_buffers[0], 0,
7927                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7928                 if (tpr->rx_jmb_buffers)
7929                         memset(&tpr->rx_jmb_buffers[0], 0,
7930                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7931                 goto done;
7932         }
7933
7934         /* Zero out all descriptors. */
7935         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7936
7937         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7938         if (tg3_flag(tp, 5780_CLASS) &&
7939             tp->dev->mtu > ETH_DATA_LEN)
7940                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7941         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7942
7943         /* Initialize invariants of the rings, we only set this
7944          * stuff once.  This works because the card does not
7945          * write into the rx buffer posting rings.
7946          */
7947         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7948                 struct tg3_rx_buffer_desc *rxd;
7949
7950                 rxd = &tpr->rx_std[i];
7951                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7952                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7953                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7954                                (i << RXD_OPAQUE_INDEX_SHIFT));
7955         }
7956
7957         /* Now allocate fresh SKBs for each rx ring. */
7958         for (i = 0; i < tp->rx_pending; i++) {
7959                 unsigned int frag_size;
7960
7961                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7962                                       &frag_size) < 0) {
7963                         netdev_warn(tp->dev,
7964                                     "Using a smaller RX standard ring. Only "
7965                                     "%d out of %d buffers were allocated "
7966                                     "successfully\n", i, tp->rx_pending);
7967                         if (i == 0)
7968                                 goto initfail;
7969                         tp->rx_pending = i;
7970                         break;
7971                 }
7972         }
7973
7974         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7975                 goto done;
7976
7977         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7978
7979         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7980                 goto done;
7981
7982         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7983                 struct tg3_rx_buffer_desc *rxd;
7984
7985                 rxd = &tpr->rx_jmb[i].std;
7986                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7987                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7988                                   RXD_FLAG_JUMBO;
7989                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7990                        (i << RXD_OPAQUE_INDEX_SHIFT));
7991         }
7992
7993         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7994                 unsigned int frag_size;
7995
7996                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7997                                       &frag_size) < 0) {
7998                         netdev_warn(tp->dev,
7999                                     "Using a smaller RX jumbo ring. Only %d "
8000                                     "out of %d buffers were allocated "
8001                                     "successfully\n", i, tp->rx_jumbo_pending);
8002                         if (i == 0)
8003                                 goto initfail;
8004                         tp->rx_jumbo_pending = i;
8005                         break;
8006                 }
8007         }
8008
8009 done:
8010         return 0;
8011
8012 initfail:
8013         tg3_rx_prodring_free(tp, tpr);
8014         return -ENOMEM;
8015 }
8016
8017 static void tg3_rx_prodring_fini(struct tg3 *tp,
8018                                  struct tg3_rx_prodring_set *tpr)
8019 {
8020         kfree(tpr->rx_std_buffers);
8021         tpr->rx_std_buffers = NULL;
8022         kfree(tpr->rx_jmb_buffers);
8023         tpr->rx_jmb_buffers = NULL;
8024         if (tpr->rx_std) {
8025                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8026                                   tpr->rx_std, tpr->rx_std_mapping);
8027                 tpr->rx_std = NULL;
8028         }
8029         if (tpr->rx_jmb) {
8030                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8031                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8032                 tpr->rx_jmb = NULL;
8033         }
8034 }
8035
8036 static int tg3_rx_prodring_init(struct tg3 *tp,
8037                                 struct tg3_rx_prodring_set *tpr)
8038 {
8039         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8040                                       GFP_KERNEL);
8041         if (!tpr->rx_std_buffers)
8042                 return -ENOMEM;
8043
8044         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8045                                          TG3_RX_STD_RING_BYTES(tp),
8046                                          &tpr->rx_std_mapping,
8047                                          GFP_KERNEL);
8048         if (!tpr->rx_std)
8049                 goto err_out;
8050
8051         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8052                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8053                                               GFP_KERNEL);
8054                 if (!tpr->rx_jmb_buffers)
8055                         goto err_out;
8056
8057                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8058                                                  TG3_RX_JMB_RING_BYTES(tp),
8059                                                  &tpr->rx_jmb_mapping,
8060                                                  GFP_KERNEL);
8061                 if (!tpr->rx_jmb)
8062                         goto err_out;
8063         }
8064
8065         return 0;
8066
8067 err_out:
8068         tg3_rx_prodring_fini(tp, tpr);
8069         return -ENOMEM;
8070 }
8071
8072 /* Free up pending packets in all rx/tx rings.
8073  *
8074  * The chip has been shut down and the driver detached from
8075  * the networking, so no interrupts or new tx packets will
8076  * end up in the driver.  tp->{tx,}lock is not held and we are not
8077  * in an interrupt context and thus may sleep.
8078  */
8079 static void tg3_free_rings(struct tg3 *tp)
8080 {
8081         int i, j;
8082
8083         for (j = 0; j < tp->irq_cnt; j++) {
8084                 struct tg3_napi *tnapi = &tp->napi[j];
8085
8086                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8087
8088                 if (!tnapi->tx_buffers)
8089                         continue;
8090
8091                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8092                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8093
8094                         if (!skb)
8095                                 continue;
8096
8097                         tg3_tx_skb_unmap(tnapi, i,
8098                                          skb_shinfo(skb)->nr_frags - 1);
8099
8100                         dev_kfree_skb_any(skb);
8101                 }
8102                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8103         }
8104 }
8105
8106 /* Initialize tx/rx rings for packet processing.
8107  *
8108  * The chip has been shut down and the driver detached from
8109  * the networking, so no interrupts or new tx packets will
8110  * end up in the driver.  tp->{tx,}lock are held and thus
8111  * we may not sleep.
8112  */
8113 static int tg3_init_rings(struct tg3 *tp)
8114 {
8115         int i;
8116
8117         /* Free up all the SKBs. */
8118         tg3_free_rings(tp);
8119
8120         for (i = 0; i < tp->irq_cnt; i++) {
8121                 struct tg3_napi *tnapi = &tp->napi[i];
8122
8123                 tnapi->last_tag = 0;
8124                 tnapi->last_irq_tag = 0;
8125                 tnapi->hw_status->status = 0;
8126                 tnapi->hw_status->status_tag = 0;
8127                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8128
8129                 tnapi->tx_prod = 0;
8130                 tnapi->tx_cons = 0;
8131                 if (tnapi->tx_ring)
8132                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8133
8134                 tnapi->rx_rcb_ptr = 0;
8135                 if (tnapi->rx_rcb)
8136                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8137
8138                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8139                         tg3_free_rings(tp);
8140                         return -ENOMEM;
8141                 }
8142         }
8143
8144         return 0;
8145 }
8146
8147 static void tg3_mem_tx_release(struct tg3 *tp)
8148 {
8149         int i;
8150
8151         for (i = 0; i < tp->irq_max; i++) {
8152                 struct tg3_napi *tnapi = &tp->napi[i];
8153
8154                 if (tnapi->tx_ring) {
8155                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8156                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8157                         tnapi->tx_ring = NULL;
8158                 }
8159
8160                 kfree(tnapi->tx_buffers);
8161                 tnapi->tx_buffers = NULL;
8162         }
8163 }
8164
8165 static int tg3_mem_tx_acquire(struct tg3 *tp)
8166 {
8167         int i;
8168         struct tg3_napi *tnapi = &tp->napi[0];
8169
8170         /* If multivector TSS is enabled, vector 0 does not handle
8171          * tx interrupts.  Don't allocate any resources for it.
8172          */
8173         if (tg3_flag(tp, ENABLE_TSS))
8174                 tnapi++;
8175
8176         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8177                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8178                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8179                 if (!tnapi->tx_buffers)
8180                         goto err_out;
8181
8182                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8183                                                     TG3_TX_RING_BYTES,
8184                                                     &tnapi->tx_desc_mapping,
8185                                                     GFP_KERNEL);
8186                 if (!tnapi->tx_ring)
8187                         goto err_out;
8188         }
8189
8190         return 0;
8191
8192 err_out:
8193         tg3_mem_tx_release(tp);
8194         return -ENOMEM;
8195 }
8196
8197 static void tg3_mem_rx_release(struct tg3 *tp)
8198 {
8199         int i;
8200
8201         for (i = 0; i < tp->irq_max; i++) {
8202                 struct tg3_napi *tnapi = &tp->napi[i];
8203
8204                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8205
8206                 if (!tnapi->rx_rcb)
8207                         continue;
8208
8209                 dma_free_coherent(&tp->pdev->dev,
8210                                   TG3_RX_RCB_RING_BYTES(tp),
8211                                   tnapi->rx_rcb,
8212                                   tnapi->rx_rcb_mapping);
8213                 tnapi->rx_rcb = NULL;
8214         }
8215 }
8216
8217 static int tg3_mem_rx_acquire(struct tg3 *tp)
8218 {
8219         unsigned int i, limit;
8220
8221         limit = tp->rxq_cnt;
8222
8223         /* If RSS is enabled, we need a (dummy) producer ring
8224          * set on vector zero.  This is the true hw prodring.
8225          */
8226         if (tg3_flag(tp, ENABLE_RSS))
8227                 limit++;
8228
8229         for (i = 0; i < limit; i++) {
8230                 struct tg3_napi *tnapi = &tp->napi[i];
8231
8232                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8233                         goto err_out;
8234
8235                 /* If multivector RSS is enabled, vector 0
8236                  * does not handle rx or tx interrupts.
8237                  * Don't allocate any resources for it.
8238                  */
8239                 if (!i && tg3_flag(tp, ENABLE_RSS))
8240                         continue;
8241
8242                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8243                                                    TG3_RX_RCB_RING_BYTES(tp),
8244                                                    &tnapi->rx_rcb_mapping,
8245                                                    GFP_KERNEL | __GFP_ZERO);
8246                 if (!tnapi->rx_rcb)
8247                         goto err_out;
8248         }
8249
8250         return 0;
8251
8252 err_out:
8253         tg3_mem_rx_release(tp);
8254         return -ENOMEM;
8255 }
8256
8257 /*
8258  * Must not be invoked with interrupt sources disabled and
8259  * the hardware shutdown down.
8260  */
8261 static void tg3_free_consistent(struct tg3 *tp)
8262 {
8263         int i;
8264
8265         for (i = 0; i < tp->irq_cnt; i++) {
8266                 struct tg3_napi *tnapi = &tp->napi[i];
8267
8268                 if (tnapi->hw_status) {
8269                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8270                                           tnapi->hw_status,
8271                                           tnapi->status_mapping);
8272                         tnapi->hw_status = NULL;
8273                 }
8274         }
8275
8276         tg3_mem_rx_release(tp);
8277         tg3_mem_tx_release(tp);
8278
8279         if (tp->hw_stats) {
8280                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8281                                   tp->hw_stats, tp->stats_mapping);
8282                 tp->hw_stats = NULL;
8283         }
8284 }
8285
8286 /*
8287  * Must not be invoked with interrupt sources disabled and
8288  * the hardware shutdown down.  Can sleep.
8289  */
8290 static int tg3_alloc_consistent(struct tg3 *tp)
8291 {
8292         int i;
8293
8294         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8295                                           sizeof(struct tg3_hw_stats),
8296                                           &tp->stats_mapping,
8297                                           GFP_KERNEL | __GFP_ZERO);
8298         if (!tp->hw_stats)
8299                 goto err_out;
8300
8301         for (i = 0; i < tp->irq_cnt; i++) {
8302                 struct tg3_napi *tnapi = &tp->napi[i];
8303                 struct tg3_hw_status *sblk;
8304
8305                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8306                                                       TG3_HW_STATUS_SIZE,
8307                                                       &tnapi->status_mapping,
8308                                                       GFP_KERNEL | __GFP_ZERO);
8309                 if (!tnapi->hw_status)
8310                         goto err_out;
8311
8312                 sblk = tnapi->hw_status;
8313
8314                 if (tg3_flag(tp, ENABLE_RSS)) {
8315                         u16 *prodptr = NULL;
8316
8317                         /*
8318                          * When RSS is enabled, the status block format changes
8319                          * slightly.  The "rx_jumbo_consumer", "reserved",
8320                          * and "rx_mini_consumer" members get mapped to the
8321                          * other three rx return ring producer indexes.
8322                          */
8323                         switch (i) {
8324                         case 1:
8325                                 prodptr = &sblk->idx[0].rx_producer;
8326                                 break;
8327                         case 2:
8328                                 prodptr = &sblk->rx_jumbo_consumer;
8329                                 break;
8330                         case 3:
8331                                 prodptr = &sblk->reserved;
8332                                 break;
8333                         case 4:
8334                                 prodptr = &sblk->rx_mini_consumer;
8335                                 break;
8336                         }
8337                         tnapi->rx_rcb_prod_idx = prodptr;
8338                 } else {
8339                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8340                 }
8341         }
8342
8343         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8344                 goto err_out;
8345
8346         return 0;
8347
8348 err_out:
8349         tg3_free_consistent(tp);
8350         return -ENOMEM;
8351 }
8352
8353 #define MAX_WAIT_CNT 1000
8354
8355 /* To stop a block, clear the enable bit and poll till it
8356  * clears.  tp->lock is held.
8357  */
8358 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8359 {
8360         unsigned int i;
8361         u32 val;
8362
8363         if (tg3_flag(tp, 5705_PLUS)) {
8364                 switch (ofs) {
8365                 case RCVLSC_MODE:
8366                 case DMAC_MODE:
8367                 case MBFREE_MODE:
8368                 case BUFMGR_MODE:
8369                 case MEMARB_MODE:
8370                         /* We can't enable/disable these bits of the
8371                          * 5705/5750, just say success.
8372                          */
8373                         return 0;
8374
8375                 default:
8376                         break;
8377                 }
8378         }
8379
8380         val = tr32(ofs);
8381         val &= ~enable_bit;
8382         tw32_f(ofs, val);
8383
8384         for (i = 0; i < MAX_WAIT_CNT; i++) {
8385                 udelay(100);
8386                 val = tr32(ofs);
8387                 if ((val & enable_bit) == 0)
8388                         break;
8389         }
8390
8391         if (i == MAX_WAIT_CNT && !silent) {
8392                 dev_err(&tp->pdev->dev,
8393                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8394                         ofs, enable_bit);
8395                 return -ENODEV;
8396         }
8397
8398         return 0;
8399 }
8400
8401 /* tp->lock is held. */
8402 static int tg3_abort_hw(struct tg3 *tp, int silent)
8403 {
8404         int i, err;
8405
8406         tg3_disable_ints(tp);
8407
8408         tp->rx_mode &= ~RX_MODE_ENABLE;
8409         tw32_f(MAC_RX_MODE, tp->rx_mode);
8410         udelay(10);
8411
8412         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8413         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8414         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8415         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8416         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8417         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8418
8419         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8420         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8421         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8422         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8423         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8424         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8425         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8426
8427         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8428         tw32_f(MAC_MODE, tp->mac_mode);
8429         udelay(40);
8430
8431         tp->tx_mode &= ~TX_MODE_ENABLE;
8432         tw32_f(MAC_TX_MODE, tp->tx_mode);
8433
8434         for (i = 0; i < MAX_WAIT_CNT; i++) {
8435                 udelay(100);
8436                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8437                         break;
8438         }
8439         if (i >= MAX_WAIT_CNT) {
8440                 dev_err(&tp->pdev->dev,
8441                         "%s timed out, TX_MODE_ENABLE will not clear "
8442                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8443                 err |= -ENODEV;
8444         }
8445
8446         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8447         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8448         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8449
8450         tw32(FTQ_RESET, 0xffffffff);
8451         tw32(FTQ_RESET, 0x00000000);
8452
8453         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8454         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8455
8456         for (i = 0; i < tp->irq_cnt; i++) {
8457                 struct tg3_napi *tnapi = &tp->napi[i];
8458                 if (tnapi->hw_status)
8459                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8460         }
8461
8462         return err;
8463 }
8464
8465 /* Save PCI command register before chip reset */
8466 static void tg3_save_pci_state(struct tg3 *tp)
8467 {
8468         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8469 }
8470
8471 /* Restore PCI state after chip reset */
8472 static void tg3_restore_pci_state(struct tg3 *tp)
8473 {
8474         u32 val;
8475
8476         /* Re-enable indirect register accesses. */
8477         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8478                                tp->misc_host_ctrl);
8479
8480         /* Set MAX PCI retry to zero. */
8481         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8482         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8483             tg3_flag(tp, PCIX_MODE))
8484                 val |= PCISTATE_RETRY_SAME_DMA;
8485         /* Allow reads and writes to the APE register and memory space. */
8486         if (tg3_flag(tp, ENABLE_APE))
8487                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8488                        PCISTATE_ALLOW_APE_SHMEM_WR |
8489                        PCISTATE_ALLOW_APE_PSPACE_WR;
8490         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8491
8492         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8493
8494         if (!tg3_flag(tp, PCI_EXPRESS)) {
8495                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8496                                       tp->pci_cacheline_sz);
8497                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8498                                       tp->pci_lat_timer);
8499         }
8500
8501         /* Make sure PCI-X relaxed ordering bit is clear. */
8502         if (tg3_flag(tp, PCIX_MODE)) {
8503                 u16 pcix_cmd;
8504
8505                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8506                                      &pcix_cmd);
8507                 pcix_cmd &= ~PCI_X_CMD_ERO;
8508                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8509                                       pcix_cmd);
8510         }
8511
8512         if (tg3_flag(tp, 5780_CLASS)) {
8513
8514                 /* Chip reset on 5780 will reset MSI enable bit,
8515                  * so need to restore it.
8516                  */
8517                 if (tg3_flag(tp, USING_MSI)) {
8518                         u16 ctrl;
8519
8520                         pci_read_config_word(tp->pdev,
8521                                              tp->msi_cap + PCI_MSI_FLAGS,
8522                                              &ctrl);
8523                         pci_write_config_word(tp->pdev,
8524                                               tp->msi_cap + PCI_MSI_FLAGS,
8525                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8526                         val = tr32(MSGINT_MODE);
8527                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8528                 }
8529         }
8530 }
8531
8532 /* tp->lock is held. */
8533 static int tg3_chip_reset(struct tg3 *tp)
8534 {
8535         u32 val;
8536         void (*write_op)(struct tg3 *, u32, u32);
8537         int i, err;
8538
8539         tg3_nvram_lock(tp);
8540
8541         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8542
8543         /* No matching tg3_nvram_unlock() after this because
8544          * chip reset below will undo the nvram lock.
8545          */
8546         tp->nvram_lock_cnt = 0;
8547
8548         /* GRC_MISC_CFG core clock reset will clear the memory
8549          * enable bit in PCI register 4 and the MSI enable bit
8550          * on some chips, so we save relevant registers here.
8551          */
8552         tg3_save_pci_state(tp);
8553
8554         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8555             tg3_flag(tp, 5755_PLUS))
8556                 tw32(GRC_FASTBOOT_PC, 0);
8557
8558         /*
8559          * We must avoid the readl() that normally takes place.
8560          * It locks machines, causes machine checks, and other
8561          * fun things.  So, temporarily disable the 5701
8562          * hardware workaround, while we do the reset.
8563          */
8564         write_op = tp->write32;
8565         if (write_op == tg3_write_flush_reg32)
8566                 tp->write32 = tg3_write32;
8567
8568         /* Prevent the irq handler from reading or writing PCI registers
8569          * during chip reset when the memory enable bit in the PCI command
8570          * register may be cleared.  The chip does not generate interrupt
8571          * at this time, but the irq handler may still be called due to irq
8572          * sharing or irqpoll.
8573          */
8574         tg3_flag_set(tp, CHIP_RESETTING);
8575         for (i = 0; i < tp->irq_cnt; i++) {
8576                 struct tg3_napi *tnapi = &tp->napi[i];
8577                 if (tnapi->hw_status) {
8578                         tnapi->hw_status->status = 0;
8579                         tnapi->hw_status->status_tag = 0;
8580                 }
8581                 tnapi->last_tag = 0;
8582                 tnapi->last_irq_tag = 0;
8583         }
8584         smp_mb();
8585
8586         for (i = 0; i < tp->irq_cnt; i++)
8587                 synchronize_irq(tp->napi[i].irq_vec);
8588
8589         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8590                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8591                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8592         }
8593
8594         /* do the reset */
8595         val = GRC_MISC_CFG_CORECLK_RESET;
8596
8597         if (tg3_flag(tp, PCI_EXPRESS)) {
8598                 /* Force PCIe 1.0a mode */
8599                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8600                     !tg3_flag(tp, 57765_PLUS) &&
8601                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8602                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8603                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8604
8605                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8606                         tw32(GRC_MISC_CFG, (1 << 29));
8607                         val |= (1 << 29);
8608                 }
8609         }
8610
8611         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8612                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8613                 tw32(GRC_VCPU_EXT_CTRL,
8614                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8615         }
8616
8617         /* Manage gphy power for all CPMU absent PCIe devices. */
8618         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8619                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8620
8621         tw32(GRC_MISC_CFG, val);
8622
8623         /* restore 5701 hardware bug workaround write method */
8624         tp->write32 = write_op;
8625
8626         /* Unfortunately, we have to delay before the PCI read back.
8627          * Some 575X chips even will not respond to a PCI cfg access
8628          * when the reset command is given to the chip.
8629          *
8630          * How do these hardware designers expect things to work
8631          * properly if the PCI write is posted for a long period
8632          * of time?  It is always necessary to have some method by
8633          * which a register read back can occur to push the write
8634          * out which does the reset.
8635          *
8636          * For most tg3 variants the trick below was working.
8637          * Ho hum...
8638          */
8639         udelay(120);
8640
8641         /* Flush PCI posted writes.  The normal MMIO registers
8642          * are inaccessible at this time so this is the only
8643          * way to make this reliably (actually, this is no longer
8644          * the case, see above).  I tried to use indirect
8645          * register read/write but this upset some 5701 variants.
8646          */
8647         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8648
8649         udelay(120);
8650
8651         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8652                 u16 val16;
8653
8654                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8655                         int j;
8656                         u32 cfg_val;
8657
8658                         /* Wait for link training to complete.  */
8659                         for (j = 0; j < 5000; j++)
8660                                 udelay(100);
8661
8662                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8663                         pci_write_config_dword(tp->pdev, 0xc4,
8664                                                cfg_val | (1 << 15));
8665                 }
8666
8667                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8668                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8669                 /*
8670                  * Older PCIe devices only support the 128 byte
8671                  * MPS setting.  Enforce the restriction.
8672                  */
8673                 if (!tg3_flag(tp, CPMU_PRESENT))
8674                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8675                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8676
8677                 /* Clear error status */
8678                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8679                                       PCI_EXP_DEVSTA_CED |
8680                                       PCI_EXP_DEVSTA_NFED |
8681                                       PCI_EXP_DEVSTA_FED |
8682                                       PCI_EXP_DEVSTA_URD);
8683         }
8684
8685         tg3_restore_pci_state(tp);
8686
8687         tg3_flag_clear(tp, CHIP_RESETTING);
8688         tg3_flag_clear(tp, ERROR_PROCESSED);
8689
8690         val = 0;
8691         if (tg3_flag(tp, 5780_CLASS))
8692                 val = tr32(MEMARB_MODE);
8693         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8694
8695         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8696                 tg3_stop_fw(tp);
8697                 tw32(0x5000, 0x400);
8698         }
8699
8700         if (tg3_flag(tp, IS_SSB_CORE)) {
8701                 /*
8702                  * BCM4785: In order to avoid repercussions from using
8703                  * potentially defective internal ROM, stop the Rx RISC CPU,
8704                  * which is not required.
8705                  */
8706                 tg3_stop_fw(tp);
8707                 tg3_halt_cpu(tp, RX_CPU_BASE);
8708         }
8709
8710         tw32(GRC_MODE, tp->grc_mode);
8711
8712         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8713                 val = tr32(0xc4);
8714
8715                 tw32(0xc4, val | (1 << 15));
8716         }
8717
8718         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8719             tg3_asic_rev(tp) == ASIC_REV_5705) {
8720                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8721                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8722                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8723                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8724         }
8725
8726         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8727                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8728                 val = tp->mac_mode;
8729         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8730                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8731                 val = tp->mac_mode;
8732         } else
8733                 val = 0;
8734
8735         tw32_f(MAC_MODE, val);
8736         udelay(40);
8737
8738         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8739
8740         err = tg3_poll_fw(tp);
8741         if (err)
8742                 return err;
8743
8744         tg3_mdio_start(tp);
8745
8746         if (tg3_flag(tp, PCI_EXPRESS) &&
8747             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8748             tg3_asic_rev(tp) != ASIC_REV_5785 &&
8749             !tg3_flag(tp, 57765_PLUS)) {
8750                 val = tr32(0x7c00);
8751
8752                 tw32(0x7c00, val | (1 << 25));
8753         }
8754
8755         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8756                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8757                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8758         }
8759
8760         /* Reprobe ASF enable state.  */
8761         tg3_flag_clear(tp, ENABLE_ASF);
8762         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8763                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8764
8765         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8766         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8767         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8768                 u32 nic_cfg;
8769
8770                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8771                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8772                         tg3_flag_set(tp, ENABLE_ASF);
8773                         tp->last_event_jiffies = jiffies;
8774                         if (tg3_flag(tp, 5750_PLUS))
8775                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8776
8777                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8778                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8779                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8780                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8781                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8782                 }
8783         }
8784
8785         return 0;
8786 }
8787
8788 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8789 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8790
8791 /* tp->lock is held. */
8792 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8793 {
8794         int err;
8795
8796         tg3_stop_fw(tp);
8797
8798         tg3_write_sig_pre_reset(tp, kind);
8799
8800         tg3_abort_hw(tp, silent);
8801         err = tg3_chip_reset(tp);
8802
8803         __tg3_set_mac_addr(tp, 0);
8804
8805         tg3_write_sig_legacy(tp, kind);
8806         tg3_write_sig_post_reset(tp, kind);
8807
8808         if (tp->hw_stats) {
8809                 /* Save the stats across chip resets... */
8810                 tg3_get_nstats(tp, &tp->net_stats_prev);
8811                 tg3_get_estats(tp, &tp->estats_prev);
8812
8813                 /* And make sure the next sample is new data */
8814                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8815         }
8816
8817         if (err)
8818                 return err;
8819
8820         return 0;
8821 }
8822
8823 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8824 {
8825         struct tg3 *tp = netdev_priv(dev);
8826         struct sockaddr *addr = p;
8827         int err = 0, skip_mac_1 = 0;
8828
8829         if (!is_valid_ether_addr(addr->sa_data))
8830                 return -EADDRNOTAVAIL;
8831
8832         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8833
8834         if (!netif_running(dev))
8835                 return 0;
8836
8837         if (tg3_flag(tp, ENABLE_ASF)) {
8838                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8839
8840                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8841                 addr0_low = tr32(MAC_ADDR_0_LOW);
8842                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8843                 addr1_low = tr32(MAC_ADDR_1_LOW);
8844
8845                 /* Skip MAC addr 1 if ASF is using it. */
8846                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8847                     !(addr1_high == 0 && addr1_low == 0))
8848                         skip_mac_1 = 1;
8849         }
8850         spin_lock_bh(&tp->lock);
8851         __tg3_set_mac_addr(tp, skip_mac_1);
8852         spin_unlock_bh(&tp->lock);
8853
8854         return err;
8855 }
8856
8857 /* tp->lock is held. */
8858 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8859                            dma_addr_t mapping, u32 maxlen_flags,
8860                            u32 nic_addr)
8861 {
8862         tg3_write_mem(tp,
8863                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8864                       ((u64) mapping >> 32));
8865         tg3_write_mem(tp,
8866                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8867                       ((u64) mapping & 0xffffffff));
8868         tg3_write_mem(tp,
8869                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8870                        maxlen_flags);
8871
8872         if (!tg3_flag(tp, 5705_PLUS))
8873                 tg3_write_mem(tp,
8874                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8875                               nic_addr);
8876 }
8877
8878
8879 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8880 {
8881         int i = 0;
8882
8883         if (!tg3_flag(tp, ENABLE_TSS)) {
8884                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8885                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8886                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8887         } else {
8888                 tw32(HOSTCC_TXCOL_TICKS, 0);
8889                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8890                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8891
8892                 for (; i < tp->txq_cnt; i++) {
8893                         u32 reg;
8894
8895                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8896                         tw32(reg, ec->tx_coalesce_usecs);
8897                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8898                         tw32(reg, ec->tx_max_coalesced_frames);
8899                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8900                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8901                 }
8902         }
8903
8904         for (; i < tp->irq_max - 1; i++) {
8905                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8906                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8907                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8908         }
8909 }
8910
8911 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8912 {
8913         int i = 0;
8914         u32 limit = tp->rxq_cnt;
8915
8916         if (!tg3_flag(tp, ENABLE_RSS)) {
8917                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8918                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8919                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8920                 limit--;
8921         } else {
8922                 tw32(HOSTCC_RXCOL_TICKS, 0);
8923                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8924                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8925         }
8926
8927         for (; i < limit; i++) {
8928                 u32 reg;
8929
8930                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8931                 tw32(reg, ec->rx_coalesce_usecs);
8932                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8933                 tw32(reg, ec->rx_max_coalesced_frames);
8934                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8935                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8936         }
8937
8938         for (; i < tp->irq_max - 1; i++) {
8939                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8940                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8941                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8942         }
8943 }
8944
8945 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8946 {
8947         tg3_coal_tx_init(tp, ec);
8948         tg3_coal_rx_init(tp, ec);
8949
8950         if (!tg3_flag(tp, 5705_PLUS)) {
8951                 u32 val = ec->stats_block_coalesce_usecs;
8952
8953                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8954                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8955
8956                 if (!tp->link_up)
8957                         val = 0;
8958
8959                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8960         }
8961 }
8962
8963 /* tp->lock is held. */
8964 static void tg3_rings_reset(struct tg3 *tp)
8965 {
8966         int i;
8967         u32 stblk, txrcb, rxrcb, limit;
8968         struct tg3_napi *tnapi = &tp->napi[0];
8969
8970         /* Disable all transmit rings but the first. */
8971         if (!tg3_flag(tp, 5705_PLUS))
8972                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8973         else if (tg3_flag(tp, 5717_PLUS))
8974                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8975         else if (tg3_flag(tp, 57765_CLASS) ||
8976                  tg3_asic_rev(tp) == ASIC_REV_5762)
8977                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8978         else
8979                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8980
8981         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8982              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8983                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8984                               BDINFO_FLAGS_DISABLED);
8985
8986
8987         /* Disable all receive return rings but the first. */
8988         if (tg3_flag(tp, 5717_PLUS))
8989                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8990         else if (!tg3_flag(tp, 5705_PLUS))
8991                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8992         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8993                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
8994                  tg3_flag(tp, 57765_CLASS))
8995                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8996         else
8997                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8998
8999         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9000              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9001                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9002                               BDINFO_FLAGS_DISABLED);
9003
9004         /* Disable interrupts */
9005         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9006         tp->napi[0].chk_msi_cnt = 0;
9007         tp->napi[0].last_rx_cons = 0;
9008         tp->napi[0].last_tx_cons = 0;
9009
9010         /* Zero mailbox registers. */
9011         if (tg3_flag(tp, SUPPORT_MSIX)) {
9012                 for (i = 1; i < tp->irq_max; i++) {
9013                         tp->napi[i].tx_prod = 0;
9014                         tp->napi[i].tx_cons = 0;
9015                         if (tg3_flag(tp, ENABLE_TSS))
9016                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9017                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9018                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9019                         tp->napi[i].chk_msi_cnt = 0;
9020                         tp->napi[i].last_rx_cons = 0;
9021                         tp->napi[i].last_tx_cons = 0;
9022                 }
9023                 if (!tg3_flag(tp, ENABLE_TSS))
9024                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9025         } else {
9026                 tp->napi[0].tx_prod = 0;
9027                 tp->napi[0].tx_cons = 0;
9028                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9029                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9030         }
9031
9032         /* Make sure the NIC-based send BD rings are disabled. */
9033         if (!tg3_flag(tp, 5705_PLUS)) {
9034                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9035                 for (i = 0; i < 16; i++)
9036                         tw32_tx_mbox(mbox + i * 8, 0);
9037         }
9038
9039         txrcb = NIC_SRAM_SEND_RCB;
9040         rxrcb = NIC_SRAM_RCV_RET_RCB;
9041
9042         /* Clear status block in ram. */
9043         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9044
9045         /* Set status block DMA address */
9046         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9047              ((u64) tnapi->status_mapping >> 32));
9048         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9049              ((u64) tnapi->status_mapping & 0xffffffff));
9050
9051         if (tnapi->tx_ring) {
9052                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9053                                (TG3_TX_RING_SIZE <<
9054                                 BDINFO_FLAGS_MAXLEN_SHIFT),
9055                                NIC_SRAM_TX_BUFFER_DESC);
9056                 txrcb += TG3_BDINFO_SIZE;
9057         }
9058
9059         if (tnapi->rx_rcb) {
9060                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9061                                (tp->rx_ret_ring_mask + 1) <<
9062                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9063                 rxrcb += TG3_BDINFO_SIZE;
9064         }
9065
9066         stblk = HOSTCC_STATBLCK_RING1;
9067
9068         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9069                 u64 mapping = (u64)tnapi->status_mapping;
9070                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9071                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9072
9073                 /* Clear status block in ram. */
9074                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9075
9076                 if (tnapi->tx_ring) {
9077                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9078                                        (TG3_TX_RING_SIZE <<
9079                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9080                                        NIC_SRAM_TX_BUFFER_DESC);
9081                         txrcb += TG3_BDINFO_SIZE;
9082                 }
9083
9084                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9085                                ((tp->rx_ret_ring_mask + 1) <<
9086                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9087
9088                 stblk += 8;
9089                 rxrcb += TG3_BDINFO_SIZE;
9090         }
9091 }
9092
9093 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9094 {
9095         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9096
9097         if (!tg3_flag(tp, 5750_PLUS) ||
9098             tg3_flag(tp, 5780_CLASS) ||
9099             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9100             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9101             tg3_flag(tp, 57765_PLUS))
9102                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9103         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9104                  tg3_asic_rev(tp) == ASIC_REV_5787)
9105                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9106         else
9107                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9108
9109         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9110         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9111
9112         val = min(nic_rep_thresh, host_rep_thresh);
9113         tw32(RCVBDI_STD_THRESH, val);
9114
9115         if (tg3_flag(tp, 57765_PLUS))
9116                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9117
9118         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9119                 return;
9120
9121         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9122
9123         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9124
9125         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9126         tw32(RCVBDI_JUMBO_THRESH, val);
9127
9128         if (tg3_flag(tp, 57765_PLUS))
9129                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9130 }
9131
9132 static inline u32 calc_crc(unsigned char *buf, int len)
9133 {
9134         u32 reg;
9135         u32 tmp;
9136         int j, k;
9137
9138         reg = 0xffffffff;
9139
9140         for (j = 0; j < len; j++) {
9141                 reg ^= buf[j];
9142
9143                 for (k = 0; k < 8; k++) {
9144                         tmp = reg & 0x01;
9145
9146                         reg >>= 1;
9147
9148                         if (tmp)
9149                                 reg ^= 0xedb88320;
9150                 }
9151         }
9152
9153         return ~reg;
9154 }
9155
9156 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9157 {
9158         /* accept or reject all multicast frames */
9159         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9160         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9161         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9162         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9163 }
9164
9165 static void __tg3_set_rx_mode(struct net_device *dev)
9166 {
9167         struct tg3 *tp = netdev_priv(dev);
9168         u32 rx_mode;
9169
9170         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9171                                   RX_MODE_KEEP_VLAN_TAG);
9172
9173 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9174         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9175          * flag clear.
9176          */
9177         if (!tg3_flag(tp, ENABLE_ASF))
9178                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9179 #endif
9180
9181         if (dev->flags & IFF_PROMISC) {
9182                 /* Promiscuous mode. */
9183                 rx_mode |= RX_MODE_PROMISC;
9184         } else if (dev->flags & IFF_ALLMULTI) {
9185                 /* Accept all multicast. */
9186                 tg3_set_multi(tp, 1);
9187         } else if (netdev_mc_empty(dev)) {
9188                 /* Reject all multicast. */
9189                 tg3_set_multi(tp, 0);
9190         } else {
9191                 /* Accept one or more multicast(s). */
9192                 struct netdev_hw_addr *ha;
9193                 u32 mc_filter[4] = { 0, };
9194                 u32 regidx;
9195                 u32 bit;
9196                 u32 crc;
9197
9198                 netdev_for_each_mc_addr(ha, dev) {
9199                         crc = calc_crc(ha->addr, ETH_ALEN);
9200                         bit = ~crc & 0x7f;
9201                         regidx = (bit & 0x60) >> 5;
9202                         bit &= 0x1f;
9203                         mc_filter[regidx] |= (1 << bit);
9204                 }
9205
9206                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9207                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9208                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9209                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9210         }
9211
9212         if (rx_mode != tp->rx_mode) {
9213                 tp->rx_mode = rx_mode;
9214                 tw32_f(MAC_RX_MODE, rx_mode);
9215                 udelay(10);
9216         }
9217 }
9218
9219 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9220 {
9221         int i;
9222
9223         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9224                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9225 }
9226
9227 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9228 {
9229         int i;
9230
9231         if (!tg3_flag(tp, SUPPORT_MSIX))
9232                 return;
9233
9234         if (tp->rxq_cnt == 1) {
9235                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9236                 return;
9237         }
9238
9239         /* Validate table against current IRQ count */
9240         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9241                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9242                         break;
9243         }
9244
9245         if (i != TG3_RSS_INDIR_TBL_SIZE)
9246                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9247 }
9248
9249 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9250 {
9251         int i = 0;
9252         u32 reg = MAC_RSS_INDIR_TBL_0;
9253
9254         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9255                 u32 val = tp->rss_ind_tbl[i];
9256                 i++;
9257                 for (; i % 8; i++) {
9258                         val <<= 4;
9259                         val |= tp->rss_ind_tbl[i];
9260                 }
9261                 tw32(reg, val);
9262                 reg += 4;
9263         }
9264 }
9265
9266 /* tp->lock is held. */
9267 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9268 {
9269         u32 val, rdmac_mode;
9270         int i, err, limit;
9271         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9272
9273         tg3_disable_ints(tp);
9274
9275         tg3_stop_fw(tp);
9276
9277         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9278
9279         if (tg3_flag(tp, INIT_COMPLETE))
9280                 tg3_abort_hw(tp, 1);
9281
9282         /* Enable MAC control of LPI */
9283         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9284                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9285                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9286                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9287                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9288
9289                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9290
9291                 tw32_f(TG3_CPMU_EEE_CTRL,
9292                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9293
9294                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9295                       TG3_CPMU_EEEMD_LPI_IN_TX |
9296                       TG3_CPMU_EEEMD_LPI_IN_RX |
9297                       TG3_CPMU_EEEMD_EEE_ENABLE;
9298
9299                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9300                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9301
9302                 if (tg3_flag(tp, ENABLE_APE))
9303                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9304
9305                 tw32_f(TG3_CPMU_EEE_MODE, val);
9306
9307                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9308                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9309                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9310
9311                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9312                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9313                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9314         }
9315
9316         if (reset_phy)
9317                 tg3_phy_reset(tp);
9318
9319         err = tg3_chip_reset(tp);
9320         if (err)
9321                 return err;
9322
9323         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9324
9325         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9326                 val = tr32(TG3_CPMU_CTRL);
9327                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9328                 tw32(TG3_CPMU_CTRL, val);
9329
9330                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9331                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9332                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9333                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9334
9335                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9336                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9337                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9338                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9339
9340                 val = tr32(TG3_CPMU_HST_ACC);
9341                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9342                 val |= CPMU_HST_ACC_MACCLK_6_25;
9343                 tw32(TG3_CPMU_HST_ACC, val);
9344         }
9345
9346         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9347                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9348                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9349                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9350                 tw32(PCIE_PWR_MGMT_THRESH, val);
9351
9352                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9353                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9354
9355                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9356
9357                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9358                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9359         }
9360
9361         if (tg3_flag(tp, L1PLLPD_EN)) {
9362                 u32 grc_mode = tr32(GRC_MODE);
9363
9364                 /* Access the lower 1K of PL PCIE block registers. */
9365                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9366                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9367
9368                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9369                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9370                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9371
9372                 tw32(GRC_MODE, grc_mode);
9373         }
9374
9375         if (tg3_flag(tp, 57765_CLASS)) {
9376                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9377                         u32 grc_mode = tr32(GRC_MODE);
9378
9379                         /* Access the lower 1K of PL PCIE block registers. */
9380                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9381                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9382
9383                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9384                                    TG3_PCIE_PL_LO_PHYCTL5);
9385                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9386                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9387
9388                         tw32(GRC_MODE, grc_mode);
9389                 }
9390
9391                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9392                         u32 grc_mode;
9393
9394                         /* Fix transmit hangs */
9395                         val = tr32(TG3_CPMU_PADRNG_CTL);
9396                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9397                         tw32(TG3_CPMU_PADRNG_CTL, val);
9398
9399                         grc_mode = tr32(GRC_MODE);
9400
9401                         /* Access the lower 1K of DL PCIE block registers. */
9402                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9403                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9404
9405                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9406                                    TG3_PCIE_DL_LO_FTSMAX);
9407                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9408                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9409                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9410
9411                         tw32(GRC_MODE, grc_mode);
9412                 }
9413
9414                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9415                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9416                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9417                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9418         }
9419
9420         /* This works around an issue with Athlon chipsets on
9421          * B3 tigon3 silicon.  This bit has no effect on any
9422          * other revision.  But do not set this on PCI Express
9423          * chips and don't even touch the clocks if the CPMU is present.
9424          */
9425         if (!tg3_flag(tp, CPMU_PRESENT)) {
9426                 if (!tg3_flag(tp, PCI_EXPRESS))
9427                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9428                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9429         }
9430
9431         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9432             tg3_flag(tp, PCIX_MODE)) {
9433                 val = tr32(TG3PCI_PCISTATE);
9434                 val |= PCISTATE_RETRY_SAME_DMA;
9435                 tw32(TG3PCI_PCISTATE, val);
9436         }
9437
9438         if (tg3_flag(tp, ENABLE_APE)) {
9439                 /* Allow reads and writes to the
9440                  * APE register and memory space.
9441                  */
9442                 val = tr32(TG3PCI_PCISTATE);
9443                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9444                        PCISTATE_ALLOW_APE_SHMEM_WR |
9445                        PCISTATE_ALLOW_APE_PSPACE_WR;
9446                 tw32(TG3PCI_PCISTATE, val);
9447         }
9448
9449         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9450                 /* Enable some hw fixes.  */
9451                 val = tr32(TG3PCI_MSI_DATA);
9452                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9453                 tw32(TG3PCI_MSI_DATA, val);
9454         }
9455
9456         /* Descriptor ring init may make accesses to the
9457          * NIC SRAM area to setup the TX descriptors, so we
9458          * can only do this after the hardware has been
9459          * successfully reset.
9460          */
9461         err = tg3_init_rings(tp);
9462         if (err)
9463                 return err;
9464
9465         if (tg3_flag(tp, 57765_PLUS)) {
9466                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9467                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9468                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9469                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9470                 if (!tg3_flag(tp, 57765_CLASS) &&
9471                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9472                     tg3_asic_rev(tp) != ASIC_REV_5762)
9473                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9474                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9475         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9476                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9477                 /* This value is determined during the probe time DMA
9478                  * engine test, tg3_test_dma.
9479                  */
9480                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9481         }
9482
9483         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9484                           GRC_MODE_4X_NIC_SEND_RINGS |
9485                           GRC_MODE_NO_TX_PHDR_CSUM |
9486                           GRC_MODE_NO_RX_PHDR_CSUM);
9487         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9488
9489         /* Pseudo-header checksum is done by hardware logic and not
9490          * the offload processers, so make the chip do the pseudo-
9491          * header checksums on receive.  For transmit it is more
9492          * convenient to do the pseudo-header checksum in software
9493          * as Linux does that on transmit for us in all cases.
9494          */
9495         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9496
9497         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9498         if (tp->rxptpctl)
9499                 tw32(TG3_RX_PTP_CTL,
9500                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9501
9502         if (tg3_flag(tp, PTP_CAPABLE))
9503                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9504
9505         tw32(GRC_MODE, tp->grc_mode | val);
9506
9507         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9508         val = tr32(GRC_MISC_CFG);
9509         val &= ~0xff;
9510         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9511         tw32(GRC_MISC_CFG, val);
9512
9513         /* Initialize MBUF/DESC pool. */
9514         if (tg3_flag(tp, 5750_PLUS)) {
9515                 /* Do nothing.  */
9516         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9517                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9518                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9519                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9520                 else
9521                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9522                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9523                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9524         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9525                 int fw_len;
9526
9527                 fw_len = tp->fw_len;
9528                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9529                 tw32(BUFMGR_MB_POOL_ADDR,
9530                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9531                 tw32(BUFMGR_MB_POOL_SIZE,
9532                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9533         }
9534
9535         if (tp->dev->mtu <= ETH_DATA_LEN) {
9536                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9537                      tp->bufmgr_config.mbuf_read_dma_low_water);
9538                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9539                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9540                 tw32(BUFMGR_MB_HIGH_WATER,
9541                      tp->bufmgr_config.mbuf_high_water);
9542         } else {
9543                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9544                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9545                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9546                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9547                 tw32(BUFMGR_MB_HIGH_WATER,
9548                      tp->bufmgr_config.mbuf_high_water_jumbo);
9549         }
9550         tw32(BUFMGR_DMA_LOW_WATER,
9551              tp->bufmgr_config.dma_low_water);
9552         tw32(BUFMGR_DMA_HIGH_WATER,
9553              tp->bufmgr_config.dma_high_water);
9554
9555         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9556         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9557                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9558         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9559             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9560             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9561                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9562         tw32(BUFMGR_MODE, val);
9563         for (i = 0; i < 2000; i++) {
9564                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9565                         break;
9566                 udelay(10);
9567         }
9568         if (i >= 2000) {
9569                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9570                 return -ENODEV;
9571         }
9572
9573         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9574                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9575
9576         tg3_setup_rxbd_thresholds(tp);
9577
9578         /* Initialize TG3_BDINFO's at:
9579          *  RCVDBDI_STD_BD:     standard eth size rx ring
9580          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9581          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9582          *
9583          * like so:
9584          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9585          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9586          *                              ring attribute flags
9587          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9588          *
9589          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9590          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9591          *
9592          * The size of each ring is fixed in the firmware, but the location is
9593          * configurable.
9594          */
9595         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9596              ((u64) tpr->rx_std_mapping >> 32));
9597         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9598              ((u64) tpr->rx_std_mapping & 0xffffffff));
9599         if (!tg3_flag(tp, 5717_PLUS))
9600                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9601                      NIC_SRAM_RX_BUFFER_DESC);
9602
9603         /* Disable the mini ring */
9604         if (!tg3_flag(tp, 5705_PLUS))
9605                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9606                      BDINFO_FLAGS_DISABLED);
9607
9608         /* Program the jumbo buffer descriptor ring control
9609          * blocks on those devices that have them.
9610          */
9611         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9612             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9613
9614                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9615                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9616                              ((u64) tpr->rx_jmb_mapping >> 32));
9617                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9618                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9619                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9620                               BDINFO_FLAGS_MAXLEN_SHIFT;
9621                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9622                              val | BDINFO_FLAGS_USE_EXT_RECV);
9623                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9624                             tg3_flag(tp, 57765_CLASS) ||
9625                             tg3_asic_rev(tp) == ASIC_REV_5762)
9626                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9627                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9628                 } else {
9629                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9630                              BDINFO_FLAGS_DISABLED);
9631                 }
9632
9633                 if (tg3_flag(tp, 57765_PLUS)) {
9634                         val = TG3_RX_STD_RING_SIZE(tp);
9635                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9636                         val |= (TG3_RX_STD_DMA_SZ << 2);
9637                 } else
9638                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9639         } else
9640                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9641
9642         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9643
9644         tpr->rx_std_prod_idx = tp->rx_pending;
9645         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9646
9647         tpr->rx_jmb_prod_idx =
9648                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9649         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9650
9651         tg3_rings_reset(tp);
9652
9653         /* Initialize MAC address and backoff seed. */
9654         __tg3_set_mac_addr(tp, 0);
9655
9656         /* MTU + ethernet header + FCS + optional VLAN tag */
9657         tw32(MAC_RX_MTU_SIZE,
9658              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9659
9660         /* The slot time is changed by tg3_setup_phy if we
9661          * run at gigabit with half duplex.
9662          */
9663         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9664               (6 << TX_LENGTHS_IPG_SHIFT) |
9665               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9666
9667         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9668             tg3_asic_rev(tp) == ASIC_REV_5762)
9669                 val |= tr32(MAC_TX_LENGTHS) &
9670                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9671                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9672
9673         tw32(MAC_TX_LENGTHS, val);
9674
9675         /* Receive rules. */
9676         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9677         tw32(RCVLPC_CONFIG, 0x0181);
9678
9679         /* Calculate RDMAC_MODE setting early, we need it to determine
9680          * the RCVLPC_STATE_ENABLE mask.
9681          */
9682         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9683                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9684                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9685                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9686                       RDMAC_MODE_LNGREAD_ENAB);
9687
9688         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9689                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9690
9691         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9692             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9693             tg3_asic_rev(tp) == ASIC_REV_57780)
9694                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9695                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9696                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9697
9698         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9699             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9700                 if (tg3_flag(tp, TSO_CAPABLE) &&
9701                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9702                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9703                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9704                            !tg3_flag(tp, IS_5788)) {
9705                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9706                 }
9707         }
9708
9709         if (tg3_flag(tp, PCI_EXPRESS))
9710                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9711
9712         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9713                 tp->dma_limit = 0;
9714                 if (tp->dev->mtu <= ETH_DATA_LEN) {
9715                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9716                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9717                 }
9718         }
9719
9720         if (tg3_flag(tp, HW_TSO_1) ||
9721             tg3_flag(tp, HW_TSO_2) ||
9722             tg3_flag(tp, HW_TSO_3))
9723                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9724
9725         if (tg3_flag(tp, 57765_PLUS) ||
9726             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9727             tg3_asic_rev(tp) == ASIC_REV_57780)
9728                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9729
9730         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9731             tg3_asic_rev(tp) == ASIC_REV_5762)
9732                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9733
9734         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9735             tg3_asic_rev(tp) == ASIC_REV_5784 ||
9736             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9737             tg3_asic_rev(tp) == ASIC_REV_57780 ||
9738             tg3_flag(tp, 57765_PLUS)) {
9739                 u32 tgtreg;
9740
9741                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9742                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9743                 else
9744                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
9745
9746                 val = tr32(tgtreg);
9747                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9748                     tg3_asic_rev(tp) == ASIC_REV_5762) {
9749                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9750                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9751                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9752                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9753                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9754                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9755                 }
9756                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9757         }
9758
9759         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9760             tg3_asic_rev(tp) == ASIC_REV_5720 ||
9761             tg3_asic_rev(tp) == ASIC_REV_5762) {
9762                 u32 tgtreg;
9763
9764                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9765                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9766                 else
9767                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9768
9769                 val = tr32(tgtreg);
9770                 tw32(tgtreg, val |
9771                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9772                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9773         }
9774
9775         /* Receive/send statistics. */
9776         if (tg3_flag(tp, 5750_PLUS)) {
9777                 val = tr32(RCVLPC_STATS_ENABLE);
9778                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9779                 tw32(RCVLPC_STATS_ENABLE, val);
9780         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9781                    tg3_flag(tp, TSO_CAPABLE)) {
9782                 val = tr32(RCVLPC_STATS_ENABLE);
9783                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9784                 tw32(RCVLPC_STATS_ENABLE, val);
9785         } else {
9786                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9787         }
9788         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9789         tw32(SNDDATAI_STATSENAB, 0xffffff);
9790         tw32(SNDDATAI_STATSCTRL,
9791              (SNDDATAI_SCTRL_ENABLE |
9792               SNDDATAI_SCTRL_FASTUPD));
9793
9794         /* Setup host coalescing engine. */
9795         tw32(HOSTCC_MODE, 0);
9796         for (i = 0; i < 2000; i++) {
9797                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9798                         break;
9799                 udelay(10);
9800         }
9801
9802         __tg3_set_coalesce(tp, &tp->coal);
9803
9804         if (!tg3_flag(tp, 5705_PLUS)) {
9805                 /* Status/statistics block address.  See tg3_timer,
9806                  * the tg3_periodic_fetch_stats call there, and
9807                  * tg3_get_stats to see how this works for 5705/5750 chips.
9808                  */
9809                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9810                      ((u64) tp->stats_mapping >> 32));
9811                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9812                      ((u64) tp->stats_mapping & 0xffffffff));
9813                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9814
9815                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9816
9817                 /* Clear statistics and status block memory areas */
9818                 for (i = NIC_SRAM_STATS_BLK;
9819                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9820                      i += sizeof(u32)) {
9821                         tg3_write_mem(tp, i, 0);
9822                         udelay(40);
9823                 }
9824         }
9825
9826         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9827
9828         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9829         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9830         if (!tg3_flag(tp, 5705_PLUS))
9831                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9832
9833         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9834                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9835                 /* reset to prevent losing 1st rx packet intermittently */
9836                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9837                 udelay(10);
9838         }
9839
9840         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9841                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9842                         MAC_MODE_FHDE_ENABLE;
9843         if (tg3_flag(tp, ENABLE_APE))
9844                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9845         if (!tg3_flag(tp, 5705_PLUS) &&
9846             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9847             tg3_asic_rev(tp) != ASIC_REV_5700)
9848                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9849         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9850         udelay(40);
9851
9852         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9853          * If TG3_FLAG_IS_NIC is zero, we should read the
9854          * register to preserve the GPIO settings for LOMs. The GPIOs,
9855          * whether used as inputs or outputs, are set by boot code after
9856          * reset.
9857          */
9858         if (!tg3_flag(tp, IS_NIC)) {
9859                 u32 gpio_mask;
9860
9861                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9862                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9863                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9864
9865                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9866                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9867                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9868
9869                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9870                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9871
9872                 tp->grc_local_ctrl &= ~gpio_mask;
9873                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9874
9875                 /* GPIO1 must be driven high for eeprom write protect */
9876                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9877                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9878                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9879         }
9880         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9881         udelay(100);
9882
9883         if (tg3_flag(tp, USING_MSIX)) {
9884                 val = tr32(MSGINT_MODE);
9885                 val |= MSGINT_MODE_ENABLE;
9886                 if (tp->irq_cnt > 1)
9887                         val |= MSGINT_MODE_MULTIVEC_EN;
9888                 if (!tg3_flag(tp, 1SHOT_MSI))
9889                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9890                 tw32(MSGINT_MODE, val);
9891         }
9892
9893         if (!tg3_flag(tp, 5705_PLUS)) {
9894                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9895                 udelay(40);
9896         }
9897
9898         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9899                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9900                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9901                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9902                WDMAC_MODE_LNGREAD_ENAB);
9903
9904         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9905             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9906                 if (tg3_flag(tp, TSO_CAPABLE) &&
9907                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9908                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9909                         /* nothing */
9910                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9911                            !tg3_flag(tp, IS_5788)) {
9912                         val |= WDMAC_MODE_RX_ACCEL;
9913                 }
9914         }
9915
9916         /* Enable host coalescing bug fix */
9917         if (tg3_flag(tp, 5755_PLUS))
9918                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9919
9920         if (tg3_asic_rev(tp) == ASIC_REV_5785)
9921                 val |= WDMAC_MODE_BURST_ALL_DATA;
9922
9923         tw32_f(WDMAC_MODE, val);
9924         udelay(40);
9925
9926         if (tg3_flag(tp, PCIX_MODE)) {
9927                 u16 pcix_cmd;
9928
9929                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9930                                      &pcix_cmd);
9931                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9932                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9933                         pcix_cmd |= PCI_X_CMD_READ_2K;
9934                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9935                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9936                         pcix_cmd |= PCI_X_CMD_READ_2K;
9937                 }
9938                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9939                                       pcix_cmd);
9940         }
9941
9942         tw32_f(RDMAC_MODE, rdmac_mode);
9943         udelay(40);
9944
9945         if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9946                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9947                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9948                                 break;
9949                 }
9950                 if (i < TG3_NUM_RDMA_CHANNELS) {
9951                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9952                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9953                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9954                         tg3_flag_set(tp, 5719_RDMA_BUG);
9955                 }
9956         }
9957
9958         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9959         if (!tg3_flag(tp, 5705_PLUS))
9960                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9961
9962         if (tg3_asic_rev(tp) == ASIC_REV_5761)
9963                 tw32(SNDDATAC_MODE,
9964                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9965         else
9966                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9967
9968         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9969         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9970         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9971         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9972                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9973         tw32(RCVDBDI_MODE, val);
9974         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9975         if (tg3_flag(tp, HW_TSO_1) ||
9976             tg3_flag(tp, HW_TSO_2) ||
9977             tg3_flag(tp, HW_TSO_3))
9978                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9979         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9980         if (tg3_flag(tp, ENABLE_TSS))
9981                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9982         tw32(SNDBDI_MODE, val);
9983         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9984
9985         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9986                 err = tg3_load_5701_a0_firmware_fix(tp);
9987                 if (err)
9988                         return err;
9989         }
9990
9991         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9992                 /* Ignore any errors for the firmware download. If download
9993                  * fails, the device will operate with EEE disabled
9994                  */
9995                 tg3_load_57766_firmware(tp);
9996         }
9997
9998         if (tg3_flag(tp, TSO_CAPABLE)) {
9999                 err = tg3_load_tso_firmware(tp);
10000                 if (err)
10001                         return err;
10002         }
10003
10004         tp->tx_mode = TX_MODE_ENABLE;
10005
10006         if (tg3_flag(tp, 5755_PLUS) ||
10007             tg3_asic_rev(tp) == ASIC_REV_5906)
10008                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10009
10010         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10011             tg3_asic_rev(tp) == ASIC_REV_5762) {
10012                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10013                 tp->tx_mode &= ~val;
10014                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10015         }
10016
10017         tw32_f(MAC_TX_MODE, tp->tx_mode);
10018         udelay(100);
10019
10020         if (tg3_flag(tp, ENABLE_RSS)) {
10021                 tg3_rss_write_indir_tbl(tp);
10022
10023                 /* Setup the "secret" hash key. */
10024                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10025                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10026                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10027                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10028                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10029                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10030                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10031                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10032                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10033                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10034         }
10035
10036         tp->rx_mode = RX_MODE_ENABLE;
10037         if (tg3_flag(tp, 5755_PLUS))
10038                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10039
10040         if (tg3_flag(tp, ENABLE_RSS))
10041                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10042                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10043                                RX_MODE_RSS_IPV6_HASH_EN |
10044                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10045                                RX_MODE_RSS_IPV4_HASH_EN |
10046                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10047
10048         tw32_f(MAC_RX_MODE, tp->rx_mode);
10049         udelay(10);
10050
10051         tw32(MAC_LED_CTRL, tp->led_ctrl);
10052
10053         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10054         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10055                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10056                 udelay(10);
10057         }
10058         tw32_f(MAC_RX_MODE, tp->rx_mode);
10059         udelay(10);
10060
10061         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10062                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10063                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10064                         /* Set drive transmission level to 1.2V  */
10065                         /* only if the signal pre-emphasis bit is not set  */
10066                         val = tr32(MAC_SERDES_CFG);
10067                         val &= 0xfffff000;
10068                         val |= 0x880;
10069                         tw32(MAC_SERDES_CFG, val);
10070                 }
10071                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10072                         tw32(MAC_SERDES_CFG, 0x616000);
10073         }
10074
10075         /* Prevent chip from dropping frames when flow control
10076          * is enabled.
10077          */
10078         if (tg3_flag(tp, 57765_CLASS))
10079                 val = 1;
10080         else
10081                 val = 2;
10082         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10083
10084         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10085             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10086                 /* Use hardware link auto-negotiation */
10087                 tg3_flag_set(tp, HW_AUTONEG);
10088         }
10089
10090         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10091             tg3_asic_rev(tp) == ASIC_REV_5714) {
10092                 u32 tmp;
10093
10094                 tmp = tr32(SERDES_RX_CTRL);
10095                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10096                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10097                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10098                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10099         }
10100
10101         if (!tg3_flag(tp, USE_PHYLIB)) {
10102                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10103                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10104
10105                 err = tg3_setup_phy(tp, 0);
10106                 if (err)
10107                         return err;
10108
10109                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10110                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10111                         u32 tmp;
10112
10113                         /* Clear CRC stats. */
10114                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10115                                 tg3_writephy(tp, MII_TG3_TEST1,
10116                                              tmp | MII_TG3_TEST1_CRC_EN);
10117                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10118                         }
10119                 }
10120         }
10121
10122         __tg3_set_rx_mode(tp->dev);
10123
10124         /* Initialize receive rules. */
10125         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10126         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10127         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10128         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10129
10130         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10131                 limit = 8;
10132         else
10133                 limit = 16;
10134         if (tg3_flag(tp, ENABLE_ASF))
10135                 limit -= 4;
10136         switch (limit) {
10137         case 16:
10138                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10139         case 15:
10140                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10141         case 14:
10142                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10143         case 13:
10144                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10145         case 12:
10146                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10147         case 11:
10148                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10149         case 10:
10150                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10151         case 9:
10152                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10153         case 8:
10154                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10155         case 7:
10156                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10157         case 6:
10158                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10159         case 5:
10160                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10161         case 4:
10162                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10163         case 3:
10164                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10165         case 2:
10166         case 1:
10167
10168         default:
10169                 break;
10170         }
10171
10172         if (tg3_flag(tp, ENABLE_APE))
10173                 /* Write our heartbeat update interval to APE. */
10174                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10175                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10176
10177         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10178
10179         return 0;
10180 }
10181
10182 /* Called at device open time to get the chip ready for
10183  * packet processing.  Invoked with tp->lock held.
10184  */
10185 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10186 {
10187         tg3_switch_clocks(tp);
10188
10189         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10190
10191         return tg3_reset_hw(tp, reset_phy);
10192 }
10193
10194 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10195 {
10196         int i;
10197
10198         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10199                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10200
10201                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10202                 off += len;
10203
10204                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10205                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10206                         memset(ocir, 0, TG3_OCIR_LEN);
10207         }
10208 }
10209
10210 /* sysfs attributes for hwmon */
10211 static ssize_t tg3_show_temp(struct device *dev,
10212                              struct device_attribute *devattr, char *buf)
10213 {
10214         struct pci_dev *pdev = to_pci_dev(dev);
10215         struct net_device *netdev = pci_get_drvdata(pdev);
10216         struct tg3 *tp = netdev_priv(netdev);
10217         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10218         u32 temperature;
10219
10220         spin_lock_bh(&tp->lock);
10221         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10222                                 sizeof(temperature));
10223         spin_unlock_bh(&tp->lock);
10224         return sprintf(buf, "%u\n", temperature);
10225 }
10226
10227
10228 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10229                           TG3_TEMP_SENSOR_OFFSET);
10230 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10231                           TG3_TEMP_CAUTION_OFFSET);
10232 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10233                           TG3_TEMP_MAX_OFFSET);
10234
10235 static struct attribute *tg3_attributes[] = {
10236         &sensor_dev_attr_temp1_input.dev_attr.attr,
10237         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10238         &sensor_dev_attr_temp1_max.dev_attr.attr,
10239         NULL
10240 };
10241
10242 static const struct attribute_group tg3_group = {
10243         .attrs = tg3_attributes,
10244 };
10245
10246 static void tg3_hwmon_close(struct tg3 *tp)
10247 {
10248         if (tp->hwmon_dev) {
10249                 hwmon_device_unregister(tp->hwmon_dev);
10250                 tp->hwmon_dev = NULL;
10251                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10252         }
10253 }
10254
10255 static void tg3_hwmon_open(struct tg3 *tp)
10256 {
10257         int i, err;
10258         u32 size = 0;
10259         struct pci_dev *pdev = tp->pdev;
10260         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10261
10262         tg3_sd_scan_scratchpad(tp, ocirs);
10263
10264         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10265                 if (!ocirs[i].src_data_length)
10266                         continue;
10267
10268                 size += ocirs[i].src_hdr_length;
10269                 size += ocirs[i].src_data_length;
10270         }
10271
10272         if (!size)
10273                 return;
10274
10275         /* Register hwmon sysfs hooks */
10276         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10277         if (err) {
10278                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10279                 return;
10280         }
10281
10282         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10283         if (IS_ERR(tp->hwmon_dev)) {
10284                 tp->hwmon_dev = NULL;
10285                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10286                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10287         }
10288 }
10289
10290
10291 #define TG3_STAT_ADD32(PSTAT, REG) \
10292 do {    u32 __val = tr32(REG); \
10293         (PSTAT)->low += __val; \
10294         if ((PSTAT)->low < __val) \
10295                 (PSTAT)->high += 1; \
10296 } while (0)
10297
10298 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10299 {
10300         struct tg3_hw_stats *sp = tp->hw_stats;
10301
10302         if (!tp->link_up)
10303                 return;
10304
10305         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10306         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10307         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10308         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10309         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10310         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10311         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10312         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10313         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10314         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10315         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10316         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10317         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10318         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10319                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10320                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10321                 u32 val;
10322
10323                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10324                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10325                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10326                 tg3_flag_clear(tp, 5719_RDMA_BUG);
10327         }
10328
10329         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10330         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10331         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10332         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10333         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10334         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10335         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10336         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10337         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10338         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10339         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10340         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10341         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10342         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10343
10344         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10345         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10346             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10347             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10348                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10349         } else {
10350                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10351                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10352                 if (val) {
10353                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10354                         sp->rx_discards.low += val;
10355                         if (sp->rx_discards.low < val)
10356                                 sp->rx_discards.high += 1;
10357                 }
10358                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10359         }
10360         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10361 }
10362
10363 static void tg3_chk_missed_msi(struct tg3 *tp)
10364 {
10365         u32 i;
10366
10367         for (i = 0; i < tp->irq_cnt; i++) {
10368                 struct tg3_napi *tnapi = &tp->napi[i];
10369
10370                 if (tg3_has_work(tnapi)) {
10371                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10372                             tnapi->last_tx_cons == tnapi->tx_cons) {
10373                                 if (tnapi->chk_msi_cnt < 1) {
10374                                         tnapi->chk_msi_cnt++;
10375                                         return;
10376                                 }
10377                                 tg3_msi(0, tnapi);
10378                         }
10379                 }
10380                 tnapi->chk_msi_cnt = 0;
10381                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10382                 tnapi->last_tx_cons = tnapi->tx_cons;
10383         }
10384 }
10385
10386 static void tg3_timer(unsigned long __opaque)
10387 {
10388         struct tg3 *tp = (struct tg3 *) __opaque;
10389
10390         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10391                 goto restart_timer;
10392
10393         spin_lock(&tp->lock);
10394
10395         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10396             tg3_flag(tp, 57765_CLASS))
10397                 tg3_chk_missed_msi(tp);
10398
10399         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10400                 /* BCM4785: Flush posted writes from GbE to host memory. */
10401                 tr32(HOSTCC_MODE);
10402         }
10403
10404         if (!tg3_flag(tp, TAGGED_STATUS)) {
10405                 /* All of this garbage is because when using non-tagged
10406                  * IRQ status the mailbox/status_block protocol the chip
10407                  * uses with the cpu is race prone.
10408                  */
10409                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10410                         tw32(GRC_LOCAL_CTRL,
10411                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10412                 } else {
10413                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10414                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10415                 }
10416
10417                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10418                         spin_unlock(&tp->lock);
10419                         tg3_reset_task_schedule(tp);
10420                         goto restart_timer;
10421                 }
10422         }
10423
10424         /* This part only runs once per second. */
10425         if (!--tp->timer_counter) {
10426                 if (tg3_flag(tp, 5705_PLUS))
10427                         tg3_periodic_fetch_stats(tp);
10428
10429                 if (tp->setlpicnt && !--tp->setlpicnt)
10430                         tg3_phy_eee_enable(tp);
10431
10432                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10433                         u32 mac_stat;
10434                         int phy_event;
10435
10436                         mac_stat = tr32(MAC_STATUS);
10437
10438                         phy_event = 0;
10439                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10440                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10441                                         phy_event = 1;
10442                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10443                                 phy_event = 1;
10444
10445                         if (phy_event)
10446                                 tg3_setup_phy(tp, 0);
10447                 } else if (tg3_flag(tp, POLL_SERDES)) {
10448                         u32 mac_stat = tr32(MAC_STATUS);
10449                         int need_setup = 0;
10450
10451                         if (tp->link_up &&
10452                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10453                                 need_setup = 1;
10454                         }
10455                         if (!tp->link_up &&
10456                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10457                                          MAC_STATUS_SIGNAL_DET))) {
10458                                 need_setup = 1;
10459                         }
10460                         if (need_setup) {
10461                                 if (!tp->serdes_counter) {
10462                                         tw32_f(MAC_MODE,
10463                                              (tp->mac_mode &
10464                                               ~MAC_MODE_PORT_MODE_MASK));
10465                                         udelay(40);
10466                                         tw32_f(MAC_MODE, tp->mac_mode);
10467                                         udelay(40);
10468                                 }
10469                                 tg3_setup_phy(tp, 0);
10470                         }
10471                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10472                            tg3_flag(tp, 5780_CLASS)) {
10473                         tg3_serdes_parallel_detect(tp);
10474                 }
10475
10476                 tp->timer_counter = tp->timer_multiplier;
10477         }
10478
10479         /* Heartbeat is only sent once every 2 seconds.
10480          *
10481          * The heartbeat is to tell the ASF firmware that the host
10482          * driver is still alive.  In the event that the OS crashes,
10483          * ASF needs to reset the hardware to free up the FIFO space
10484          * that may be filled with rx packets destined for the host.
10485          * If the FIFO is full, ASF will no longer function properly.
10486          *
10487          * Unintended resets have been reported on real time kernels
10488          * where the timer doesn't run on time.  Netpoll will also have
10489          * same problem.
10490          *
10491          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10492          * to check the ring condition when the heartbeat is expiring
10493          * before doing the reset.  This will prevent most unintended
10494          * resets.
10495          */
10496         if (!--tp->asf_counter) {
10497                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10498                         tg3_wait_for_event_ack(tp);
10499
10500                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10501                                       FWCMD_NICDRV_ALIVE3);
10502                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10503                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10504                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10505
10506                         tg3_generate_fw_event(tp);
10507                 }
10508                 tp->asf_counter = tp->asf_multiplier;
10509         }
10510
10511         spin_unlock(&tp->lock);
10512
10513 restart_timer:
10514         tp->timer.expires = jiffies + tp->timer_offset;
10515         add_timer(&tp->timer);
10516 }
10517
10518 static void tg3_timer_init(struct tg3 *tp)
10519 {
10520         if (tg3_flag(tp, TAGGED_STATUS) &&
10521             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10522             !tg3_flag(tp, 57765_CLASS))
10523                 tp->timer_offset = HZ;
10524         else
10525                 tp->timer_offset = HZ / 10;
10526
10527         BUG_ON(tp->timer_offset > HZ);
10528
10529         tp->timer_multiplier = (HZ / tp->timer_offset);
10530         tp->asf_multiplier = (HZ / tp->timer_offset) *
10531                              TG3_FW_UPDATE_FREQ_SEC;
10532
10533         init_timer(&tp->timer);
10534         tp->timer.data = (unsigned long) tp;
10535         tp->timer.function = tg3_timer;
10536 }
10537
10538 static void tg3_timer_start(struct tg3 *tp)
10539 {
10540         tp->asf_counter   = tp->asf_multiplier;
10541         tp->timer_counter = tp->timer_multiplier;
10542
10543         tp->timer.expires = jiffies + tp->timer_offset;
10544         add_timer(&tp->timer);
10545 }
10546
10547 static void tg3_timer_stop(struct tg3 *tp)
10548 {
10549         del_timer_sync(&tp->timer);
10550 }
10551
10552 /* Restart hardware after configuration changes, self-test, etc.
10553  * Invoked with tp->lock held.
10554  */
10555 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10556         __releases(tp->lock)
10557         __acquires(tp->lock)
10558 {
10559         int err;
10560
10561         err = tg3_init_hw(tp, reset_phy);
10562         if (err) {
10563                 netdev_err(tp->dev,
10564                            "Failed to re-initialize device, aborting\n");
10565                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10566                 tg3_full_unlock(tp);
10567                 tg3_timer_stop(tp);
10568                 tp->irq_sync = 0;
10569                 tg3_napi_enable(tp);
10570                 dev_close(tp->dev);
10571                 tg3_full_lock(tp, 0);
10572         }
10573         return err;
10574 }
10575
10576 static void tg3_reset_task(struct work_struct *work)
10577 {
10578         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10579         int err;
10580
10581         tg3_full_lock(tp, 0);
10582
10583         if (!netif_running(tp->dev)) {
10584                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10585                 tg3_full_unlock(tp);
10586                 return;
10587         }
10588
10589         tg3_full_unlock(tp);
10590
10591         tg3_phy_stop(tp);
10592
10593         tg3_netif_stop(tp);
10594
10595         tg3_full_lock(tp, 1);
10596
10597         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10598                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10599                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10600                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10601                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10602         }
10603
10604         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10605         err = tg3_init_hw(tp, 1);
10606         if (err)
10607                 goto out;
10608
10609         tg3_netif_start(tp);
10610
10611 out:
10612         tg3_full_unlock(tp);
10613
10614         if (!err)
10615                 tg3_phy_start(tp);
10616
10617         tg3_flag_clear(tp, RESET_TASK_PENDING);
10618 }
10619
10620 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10621 {
10622         irq_handler_t fn;
10623         unsigned long flags;
10624         char *name;
10625         struct tg3_napi *tnapi = &tp->napi[irq_num];
10626
10627         if (tp->irq_cnt == 1)
10628                 name = tp->dev->name;
10629         else {
10630                 name = &tnapi->irq_lbl[0];
10631                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10632                 name[IFNAMSIZ-1] = 0;
10633         }
10634
10635         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10636                 fn = tg3_msi;
10637                 if (tg3_flag(tp, 1SHOT_MSI))
10638                         fn = tg3_msi_1shot;
10639                 flags = 0;
10640         } else {
10641                 fn = tg3_interrupt;
10642                 if (tg3_flag(tp, TAGGED_STATUS))
10643                         fn = tg3_interrupt_tagged;
10644                 flags = IRQF_SHARED;
10645         }
10646
10647         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10648 }
10649
10650 static int tg3_test_interrupt(struct tg3 *tp)
10651 {
10652         struct tg3_napi *tnapi = &tp->napi[0];
10653         struct net_device *dev = tp->dev;
10654         int err, i, intr_ok = 0;
10655         u32 val;
10656
10657         if (!netif_running(dev))
10658                 return -ENODEV;
10659
10660         tg3_disable_ints(tp);
10661
10662         free_irq(tnapi->irq_vec, tnapi);
10663
10664         /*
10665          * Turn off MSI one shot mode.  Otherwise this test has no
10666          * observable way to know whether the interrupt was delivered.
10667          */
10668         if (tg3_flag(tp, 57765_PLUS)) {
10669                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10670                 tw32(MSGINT_MODE, val);
10671         }
10672
10673         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10674                           IRQF_SHARED, dev->name, tnapi);
10675         if (err)
10676                 return err;
10677
10678         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10679         tg3_enable_ints(tp);
10680
10681         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10682                tnapi->coal_now);
10683
10684         for (i = 0; i < 5; i++) {
10685                 u32 int_mbox, misc_host_ctrl;
10686
10687                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10688                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10689
10690                 if ((int_mbox != 0) ||
10691                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10692                         intr_ok = 1;
10693                         break;
10694                 }
10695
10696                 if (tg3_flag(tp, 57765_PLUS) &&
10697                     tnapi->hw_status->status_tag != tnapi->last_tag)
10698                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10699
10700                 msleep(10);
10701         }
10702
10703         tg3_disable_ints(tp);
10704
10705         free_irq(tnapi->irq_vec, tnapi);
10706
10707         err = tg3_request_irq(tp, 0);
10708
10709         if (err)
10710                 return err;
10711
10712         if (intr_ok) {
10713                 /* Reenable MSI one shot mode. */
10714                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10715                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10716                         tw32(MSGINT_MODE, val);
10717                 }
10718                 return 0;
10719         }
10720
10721         return -EIO;
10722 }
10723
10724 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10725  * successfully restored
10726  */
10727 static int tg3_test_msi(struct tg3 *tp)
10728 {
10729         int err;
10730         u16 pci_cmd;
10731
10732         if (!tg3_flag(tp, USING_MSI))
10733                 return 0;
10734
10735         /* Turn off SERR reporting in case MSI terminates with Master
10736          * Abort.
10737          */
10738         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10739         pci_write_config_word(tp->pdev, PCI_COMMAND,
10740                               pci_cmd & ~PCI_COMMAND_SERR);
10741
10742         err = tg3_test_interrupt(tp);
10743
10744         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10745
10746         if (!err)
10747                 return 0;
10748
10749         /* other failures */
10750         if (err != -EIO)
10751                 return err;
10752
10753         /* MSI test failed, go back to INTx mode */
10754         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10755                     "to INTx mode. Please report this failure to the PCI "
10756                     "maintainer and include system chipset information\n");
10757
10758         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10759
10760         pci_disable_msi(tp->pdev);
10761
10762         tg3_flag_clear(tp, USING_MSI);
10763         tp->napi[0].irq_vec = tp->pdev->irq;
10764
10765         err = tg3_request_irq(tp, 0);
10766         if (err)
10767                 return err;
10768
10769         /* Need to reset the chip because the MSI cycle may have terminated
10770          * with Master Abort.
10771          */
10772         tg3_full_lock(tp, 1);
10773
10774         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10775         err = tg3_init_hw(tp, 1);
10776
10777         tg3_full_unlock(tp);
10778
10779         if (err)
10780                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10781
10782         return err;
10783 }
10784
10785 static int tg3_request_firmware(struct tg3 *tp)
10786 {
10787         const struct tg3_firmware_hdr *fw_hdr;
10788
10789         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10790                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10791                            tp->fw_needed);
10792                 return -ENOENT;
10793         }
10794
10795         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10796
10797         /* Firmware blob starts with version numbers, followed by
10798          * start address and _full_ length including BSS sections
10799          * (which must be longer than the actual data, of course
10800          */
10801
10802         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
10803         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10804                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10805                            tp->fw_len, tp->fw_needed);
10806                 release_firmware(tp->fw);
10807                 tp->fw = NULL;
10808                 return -EINVAL;
10809         }
10810
10811         /* We no longer need firmware; we have it. */
10812         tp->fw_needed = NULL;
10813         return 0;
10814 }
10815
10816 static u32 tg3_irq_count(struct tg3 *tp)
10817 {
10818         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10819
10820         if (irq_cnt > 1) {
10821                 /* We want as many rx rings enabled as there are cpus.
10822                  * In multiqueue MSI-X mode, the first MSI-X vector
10823                  * only deals with link interrupts, etc, so we add
10824                  * one to the number of vectors we are requesting.
10825                  */
10826                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10827         }
10828
10829         return irq_cnt;
10830 }
10831
10832 static bool tg3_enable_msix(struct tg3 *tp)
10833 {
10834         int i, rc;
10835         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10836
10837         tp->txq_cnt = tp->txq_req;
10838         tp->rxq_cnt = tp->rxq_req;
10839         if (!tp->rxq_cnt)
10840                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10841         if (tp->rxq_cnt > tp->rxq_max)
10842                 tp->rxq_cnt = tp->rxq_max;
10843
10844         /* Disable multiple TX rings by default.  Simple round-robin hardware
10845          * scheduling of the TX rings can cause starvation of rings with
10846          * small packets when other rings have TSO or jumbo packets.
10847          */
10848         if (!tp->txq_req)
10849                 tp->txq_cnt = 1;
10850
10851         tp->irq_cnt = tg3_irq_count(tp);
10852
10853         for (i = 0; i < tp->irq_max; i++) {
10854                 msix_ent[i].entry  = i;
10855                 msix_ent[i].vector = 0;
10856         }
10857
10858         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10859         if (rc < 0) {
10860                 return false;
10861         } else if (rc != 0) {
10862                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10863                         return false;
10864                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10865                               tp->irq_cnt, rc);
10866                 tp->irq_cnt = rc;
10867                 tp->rxq_cnt = max(rc - 1, 1);
10868                 if (tp->txq_cnt)
10869                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10870         }
10871
10872         for (i = 0; i < tp->irq_max; i++)
10873                 tp->napi[i].irq_vec = msix_ent[i].vector;
10874
10875         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10876                 pci_disable_msix(tp->pdev);
10877                 return false;
10878         }
10879
10880         if (tp->irq_cnt == 1)
10881                 return true;
10882
10883         tg3_flag_set(tp, ENABLE_RSS);
10884
10885         if (tp->txq_cnt > 1)
10886                 tg3_flag_set(tp, ENABLE_TSS);
10887
10888         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10889
10890         return true;
10891 }
10892
10893 static void tg3_ints_init(struct tg3 *tp)
10894 {
10895         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10896             !tg3_flag(tp, TAGGED_STATUS)) {
10897                 /* All MSI supporting chips should support tagged
10898                  * status.  Assert that this is the case.
10899                  */
10900                 netdev_warn(tp->dev,
10901                             "MSI without TAGGED_STATUS? Not using MSI\n");
10902                 goto defcfg;
10903         }
10904
10905         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10906                 tg3_flag_set(tp, USING_MSIX);
10907         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10908                 tg3_flag_set(tp, USING_MSI);
10909
10910         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10911                 u32 msi_mode = tr32(MSGINT_MODE);
10912                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10913                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10914                 if (!tg3_flag(tp, 1SHOT_MSI))
10915                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10916                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10917         }
10918 defcfg:
10919         if (!tg3_flag(tp, USING_MSIX)) {
10920                 tp->irq_cnt = 1;
10921                 tp->napi[0].irq_vec = tp->pdev->irq;
10922         }
10923
10924         if (tp->irq_cnt == 1) {
10925                 tp->txq_cnt = 1;
10926                 tp->rxq_cnt = 1;
10927                 netif_set_real_num_tx_queues(tp->dev, 1);
10928                 netif_set_real_num_rx_queues(tp->dev, 1);
10929         }
10930 }
10931
10932 static void tg3_ints_fini(struct tg3 *tp)
10933 {
10934         if (tg3_flag(tp, USING_MSIX))
10935                 pci_disable_msix(tp->pdev);
10936         else if (tg3_flag(tp, USING_MSI))
10937                 pci_disable_msi(tp->pdev);
10938         tg3_flag_clear(tp, USING_MSI);
10939         tg3_flag_clear(tp, USING_MSIX);
10940         tg3_flag_clear(tp, ENABLE_RSS);
10941         tg3_flag_clear(tp, ENABLE_TSS);
10942 }
10943
10944 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10945                      bool init)
10946 {
10947         struct net_device *dev = tp->dev;
10948         int i, err;
10949
10950         /*
10951          * Setup interrupts first so we know how
10952          * many NAPI resources to allocate
10953          */
10954         tg3_ints_init(tp);
10955
10956         tg3_rss_check_indir_tbl(tp);
10957
10958         /* The placement of this call is tied
10959          * to the setup and use of Host TX descriptors.
10960          */
10961         err = tg3_alloc_consistent(tp);
10962         if (err)
10963                 goto err_out1;
10964
10965         tg3_napi_init(tp);
10966
10967         tg3_napi_enable(tp);
10968
10969         for (i = 0; i < tp->irq_cnt; i++) {
10970                 struct tg3_napi *tnapi = &tp->napi[i];
10971                 err = tg3_request_irq(tp, i);
10972                 if (err) {
10973                         for (i--; i >= 0; i--) {
10974                                 tnapi = &tp->napi[i];
10975                                 free_irq(tnapi->irq_vec, tnapi);
10976                         }
10977                         goto err_out2;
10978                 }
10979         }
10980
10981         tg3_full_lock(tp, 0);
10982
10983         err = tg3_init_hw(tp, reset_phy);
10984         if (err) {
10985                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10986                 tg3_free_rings(tp);
10987         }
10988
10989         tg3_full_unlock(tp);
10990
10991         if (err)
10992                 goto err_out3;
10993
10994         if (test_irq && tg3_flag(tp, USING_MSI)) {
10995                 err = tg3_test_msi(tp);
10996
10997                 if (err) {
10998                         tg3_full_lock(tp, 0);
10999                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11000                         tg3_free_rings(tp);
11001                         tg3_full_unlock(tp);
11002
11003                         goto err_out2;
11004                 }
11005
11006                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11007                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11008
11009                         tw32(PCIE_TRANSACTION_CFG,
11010                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11011                 }
11012         }
11013
11014         tg3_phy_start(tp);
11015
11016         tg3_hwmon_open(tp);
11017
11018         tg3_full_lock(tp, 0);
11019
11020         tg3_timer_start(tp);
11021         tg3_flag_set(tp, INIT_COMPLETE);
11022         tg3_enable_ints(tp);
11023
11024         if (init)
11025                 tg3_ptp_init(tp);
11026         else
11027                 tg3_ptp_resume(tp);
11028
11029
11030         tg3_full_unlock(tp);
11031
11032         netif_tx_start_all_queues(dev);
11033
11034         /*
11035          * Reset loopback feature if it was turned on while the device was down
11036          * make sure that it's installed properly now.
11037          */
11038         if (dev->features & NETIF_F_LOOPBACK)
11039                 tg3_set_loopback(dev, dev->features);
11040
11041         return 0;
11042
11043 err_out3:
11044         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11045                 struct tg3_napi *tnapi = &tp->napi[i];
11046                 free_irq(tnapi->irq_vec, tnapi);
11047         }
11048
11049 err_out2:
11050         tg3_napi_disable(tp);
11051         tg3_napi_fini(tp);
11052         tg3_free_consistent(tp);
11053
11054 err_out1:
11055         tg3_ints_fini(tp);
11056
11057         return err;
11058 }
11059
11060 static void tg3_stop(struct tg3 *tp)
11061 {
11062         int i;
11063
11064         tg3_reset_task_cancel(tp);
11065         tg3_netif_stop(tp);
11066
11067         tg3_timer_stop(tp);
11068
11069         tg3_hwmon_close(tp);
11070
11071         tg3_phy_stop(tp);
11072
11073         tg3_full_lock(tp, 1);
11074
11075         tg3_disable_ints(tp);
11076
11077         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11078         tg3_free_rings(tp);
11079         tg3_flag_clear(tp, INIT_COMPLETE);
11080
11081         tg3_full_unlock(tp);
11082
11083         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11084                 struct tg3_napi *tnapi = &tp->napi[i];
11085                 free_irq(tnapi->irq_vec, tnapi);
11086         }
11087
11088         tg3_ints_fini(tp);
11089
11090         tg3_napi_fini(tp);
11091
11092         tg3_free_consistent(tp);
11093 }
11094
11095 static int tg3_open(struct net_device *dev)
11096 {
11097         struct tg3 *tp = netdev_priv(dev);
11098         int err;
11099
11100         if (tp->fw_needed) {
11101                 err = tg3_request_firmware(tp);
11102                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11103                         if (err) {
11104                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11105                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11106                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11107                                 netdev_warn(tp->dev, "EEE capability restored\n");
11108                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11109                         }
11110                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11111                         if (err)
11112                                 return err;
11113                 } else if (err) {
11114                         netdev_warn(tp->dev, "TSO capability disabled\n");
11115                         tg3_flag_clear(tp, TSO_CAPABLE);
11116                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11117                         netdev_notice(tp->dev, "TSO capability restored\n");
11118                         tg3_flag_set(tp, TSO_CAPABLE);
11119                 }
11120         }
11121
11122         tg3_carrier_off(tp);
11123
11124         err = tg3_power_up(tp);
11125         if (err)
11126                 return err;
11127
11128         tg3_full_lock(tp, 0);
11129
11130         tg3_disable_ints(tp);
11131         tg3_flag_clear(tp, INIT_COMPLETE);
11132
11133         tg3_full_unlock(tp);
11134
11135         err = tg3_start(tp,
11136                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11137                         true, true);
11138         if (err) {
11139                 tg3_frob_aux_power(tp, false);
11140                 pci_set_power_state(tp->pdev, PCI_D3hot);
11141         }
11142
11143         if (tg3_flag(tp, PTP_CAPABLE)) {
11144                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11145                                                    &tp->pdev->dev);
11146                 if (IS_ERR(tp->ptp_clock))
11147                         tp->ptp_clock = NULL;
11148         }
11149
11150         return err;
11151 }
11152
11153 static int tg3_close(struct net_device *dev)
11154 {
11155         struct tg3 *tp = netdev_priv(dev);
11156
11157         tg3_ptp_fini(tp);
11158
11159         tg3_stop(tp);
11160
11161         /* Clear stats across close / open calls */
11162         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11163         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11164
11165         tg3_power_down(tp);
11166
11167         tg3_carrier_off(tp);
11168
11169         return 0;
11170 }
11171
11172 static inline u64 get_stat64(tg3_stat64_t *val)
11173 {
11174        return ((u64)val->high << 32) | ((u64)val->low);
11175 }
11176
11177 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11178 {
11179         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11180
11181         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11182             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11183              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11184                 u32 val;
11185
11186                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11187                         tg3_writephy(tp, MII_TG3_TEST1,
11188                                      val | MII_TG3_TEST1_CRC_EN);
11189                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11190                 } else
11191                         val = 0;
11192
11193                 tp->phy_crc_errors += val;
11194
11195                 return tp->phy_crc_errors;
11196         }
11197
11198         return get_stat64(&hw_stats->rx_fcs_errors);
11199 }
11200
11201 #define ESTAT_ADD(member) \
11202         estats->member =        old_estats->member + \
11203                                 get_stat64(&hw_stats->member)
11204
11205 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11206 {
11207         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11208         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11209
11210         ESTAT_ADD(rx_octets);
11211         ESTAT_ADD(rx_fragments);
11212         ESTAT_ADD(rx_ucast_packets);
11213         ESTAT_ADD(rx_mcast_packets);
11214         ESTAT_ADD(rx_bcast_packets);
11215         ESTAT_ADD(rx_fcs_errors);
11216         ESTAT_ADD(rx_align_errors);
11217         ESTAT_ADD(rx_xon_pause_rcvd);
11218         ESTAT_ADD(rx_xoff_pause_rcvd);
11219         ESTAT_ADD(rx_mac_ctrl_rcvd);
11220         ESTAT_ADD(rx_xoff_entered);
11221         ESTAT_ADD(rx_frame_too_long_errors);
11222         ESTAT_ADD(rx_jabbers);
11223         ESTAT_ADD(rx_undersize_packets);
11224         ESTAT_ADD(rx_in_length_errors);
11225         ESTAT_ADD(rx_out_length_errors);
11226         ESTAT_ADD(rx_64_or_less_octet_packets);
11227         ESTAT_ADD(rx_65_to_127_octet_packets);
11228         ESTAT_ADD(rx_128_to_255_octet_packets);
11229         ESTAT_ADD(rx_256_to_511_octet_packets);
11230         ESTAT_ADD(rx_512_to_1023_octet_packets);
11231         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11232         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11233         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11234         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11235         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11236
11237         ESTAT_ADD(tx_octets);
11238         ESTAT_ADD(tx_collisions);
11239         ESTAT_ADD(tx_xon_sent);
11240         ESTAT_ADD(tx_xoff_sent);
11241         ESTAT_ADD(tx_flow_control);
11242         ESTAT_ADD(tx_mac_errors);
11243         ESTAT_ADD(tx_single_collisions);
11244         ESTAT_ADD(tx_mult_collisions);
11245         ESTAT_ADD(tx_deferred);
11246         ESTAT_ADD(tx_excessive_collisions);
11247         ESTAT_ADD(tx_late_collisions);
11248         ESTAT_ADD(tx_collide_2times);
11249         ESTAT_ADD(tx_collide_3times);
11250         ESTAT_ADD(tx_collide_4times);
11251         ESTAT_ADD(tx_collide_5times);
11252         ESTAT_ADD(tx_collide_6times);
11253         ESTAT_ADD(tx_collide_7times);
11254         ESTAT_ADD(tx_collide_8times);
11255         ESTAT_ADD(tx_collide_9times);
11256         ESTAT_ADD(tx_collide_10times);
11257         ESTAT_ADD(tx_collide_11times);
11258         ESTAT_ADD(tx_collide_12times);
11259         ESTAT_ADD(tx_collide_13times);
11260         ESTAT_ADD(tx_collide_14times);
11261         ESTAT_ADD(tx_collide_15times);
11262         ESTAT_ADD(tx_ucast_packets);
11263         ESTAT_ADD(tx_mcast_packets);
11264         ESTAT_ADD(tx_bcast_packets);
11265         ESTAT_ADD(tx_carrier_sense_errors);
11266         ESTAT_ADD(tx_discards);
11267         ESTAT_ADD(tx_errors);
11268
11269         ESTAT_ADD(dma_writeq_full);
11270         ESTAT_ADD(dma_write_prioq_full);
11271         ESTAT_ADD(rxbds_empty);
11272         ESTAT_ADD(rx_discards);
11273         ESTAT_ADD(rx_errors);
11274         ESTAT_ADD(rx_threshold_hit);
11275
11276         ESTAT_ADD(dma_readq_full);
11277         ESTAT_ADD(dma_read_prioq_full);
11278         ESTAT_ADD(tx_comp_queue_full);
11279
11280         ESTAT_ADD(ring_set_send_prod_index);
11281         ESTAT_ADD(ring_status_update);
11282         ESTAT_ADD(nic_irqs);
11283         ESTAT_ADD(nic_avoided_irqs);
11284         ESTAT_ADD(nic_tx_threshold_hit);
11285
11286         ESTAT_ADD(mbuf_lwm_thresh_hit);
11287 }
11288
11289 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11290 {
11291         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11292         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11293
11294         stats->rx_packets = old_stats->rx_packets +
11295                 get_stat64(&hw_stats->rx_ucast_packets) +
11296                 get_stat64(&hw_stats->rx_mcast_packets) +
11297                 get_stat64(&hw_stats->rx_bcast_packets);
11298
11299         stats->tx_packets = old_stats->tx_packets +
11300                 get_stat64(&hw_stats->tx_ucast_packets) +
11301                 get_stat64(&hw_stats->tx_mcast_packets) +
11302                 get_stat64(&hw_stats->tx_bcast_packets);
11303
11304         stats->rx_bytes = old_stats->rx_bytes +
11305                 get_stat64(&hw_stats->rx_octets);
11306         stats->tx_bytes = old_stats->tx_bytes +
11307                 get_stat64(&hw_stats->tx_octets);
11308
11309         stats->rx_errors = old_stats->rx_errors +
11310                 get_stat64(&hw_stats->rx_errors);
11311         stats->tx_errors = old_stats->tx_errors +
11312                 get_stat64(&hw_stats->tx_errors) +
11313                 get_stat64(&hw_stats->tx_mac_errors) +
11314                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11315                 get_stat64(&hw_stats->tx_discards);
11316
11317         stats->multicast = old_stats->multicast +
11318                 get_stat64(&hw_stats->rx_mcast_packets);
11319         stats->collisions = old_stats->collisions +
11320                 get_stat64(&hw_stats->tx_collisions);
11321
11322         stats->rx_length_errors = old_stats->rx_length_errors +
11323                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11324                 get_stat64(&hw_stats->rx_undersize_packets);
11325
11326         stats->rx_over_errors = old_stats->rx_over_errors +
11327                 get_stat64(&hw_stats->rxbds_empty);
11328         stats->rx_frame_errors = old_stats->rx_frame_errors +
11329                 get_stat64(&hw_stats->rx_align_errors);
11330         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11331                 get_stat64(&hw_stats->tx_discards);
11332         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11333                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11334
11335         stats->rx_crc_errors = old_stats->rx_crc_errors +
11336                 tg3_calc_crc_errors(tp);
11337
11338         stats->rx_missed_errors = old_stats->rx_missed_errors +
11339                 get_stat64(&hw_stats->rx_discards);
11340
11341         stats->rx_dropped = tp->rx_dropped;
11342         stats->tx_dropped = tp->tx_dropped;
11343 }
11344
11345 static int tg3_get_regs_len(struct net_device *dev)
11346 {
11347         return TG3_REG_BLK_SIZE;
11348 }
11349
11350 static void tg3_get_regs(struct net_device *dev,
11351                 struct ethtool_regs *regs, void *_p)
11352 {
11353         struct tg3 *tp = netdev_priv(dev);
11354
11355         regs->version = 0;
11356
11357         memset(_p, 0, TG3_REG_BLK_SIZE);
11358
11359         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11360                 return;
11361
11362         tg3_full_lock(tp, 0);
11363
11364         tg3_dump_legacy_regs(tp, (u32 *)_p);
11365
11366         tg3_full_unlock(tp);
11367 }
11368
11369 static int tg3_get_eeprom_len(struct net_device *dev)
11370 {
11371         struct tg3 *tp = netdev_priv(dev);
11372
11373         return tp->nvram_size;
11374 }
11375
11376 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11377 {
11378         struct tg3 *tp = netdev_priv(dev);
11379         int ret;
11380         u8  *pd;
11381         u32 i, offset, len, b_offset, b_count;
11382         __be32 val;
11383
11384         if (tg3_flag(tp, NO_NVRAM))
11385                 return -EINVAL;
11386
11387         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11388                 return -EAGAIN;
11389
11390         offset = eeprom->offset;
11391         len = eeprom->len;
11392         eeprom->len = 0;
11393
11394         eeprom->magic = TG3_EEPROM_MAGIC;
11395
11396         if (offset & 3) {
11397                 /* adjustments to start on required 4 byte boundary */
11398                 b_offset = offset & 3;
11399                 b_count = 4 - b_offset;
11400                 if (b_count > len) {
11401                         /* i.e. offset=1 len=2 */
11402                         b_count = len;
11403                 }
11404                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11405                 if (ret)
11406                         return ret;
11407                 memcpy(data, ((char *)&val) + b_offset, b_count);
11408                 len -= b_count;
11409                 offset += b_count;
11410                 eeprom->len += b_count;
11411         }
11412
11413         /* read bytes up to the last 4 byte boundary */
11414         pd = &data[eeprom->len];
11415         for (i = 0; i < (len - (len & 3)); i += 4) {
11416                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11417                 if (ret) {
11418                         eeprom->len += i;
11419                         return ret;
11420                 }
11421                 memcpy(pd + i, &val, 4);
11422         }
11423         eeprom->len += i;
11424
11425         if (len & 3) {
11426                 /* read last bytes not ending on 4 byte boundary */
11427                 pd = &data[eeprom->len];
11428                 b_count = len & 3;
11429                 b_offset = offset + len - b_count;
11430                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11431                 if (ret)
11432                         return ret;
11433                 memcpy(pd, &val, b_count);
11434                 eeprom->len += b_count;
11435         }
11436         return 0;
11437 }
11438
11439 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11440 {
11441         struct tg3 *tp = netdev_priv(dev);
11442         int ret;
11443         u32 offset, len, b_offset, odd_len;
11444         u8 *buf;
11445         __be32 start, end;
11446
11447         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11448                 return -EAGAIN;
11449
11450         if (tg3_flag(tp, NO_NVRAM) ||
11451             eeprom->magic != TG3_EEPROM_MAGIC)
11452                 return -EINVAL;
11453
11454         offset = eeprom->offset;
11455         len = eeprom->len;
11456
11457         if ((b_offset = (offset & 3))) {
11458                 /* adjustments to start on required 4 byte boundary */
11459                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11460                 if (ret)
11461                         return ret;
11462                 len += b_offset;
11463                 offset &= ~3;
11464                 if (len < 4)
11465                         len = 4;
11466         }
11467
11468         odd_len = 0;
11469         if (len & 3) {
11470                 /* adjustments to end on required 4 byte boundary */
11471                 odd_len = 1;
11472                 len = (len + 3) & ~3;
11473                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11474                 if (ret)
11475                         return ret;
11476         }
11477
11478         buf = data;
11479         if (b_offset || odd_len) {
11480                 buf = kmalloc(len, GFP_KERNEL);
11481                 if (!buf)
11482                         return -ENOMEM;
11483                 if (b_offset)
11484                         memcpy(buf, &start, 4);
11485                 if (odd_len)
11486                         memcpy(buf+len-4, &end, 4);
11487                 memcpy(buf + b_offset, data, eeprom->len);
11488         }
11489
11490         ret = tg3_nvram_write_block(tp, offset, len, buf);
11491
11492         if (buf != data)
11493                 kfree(buf);
11494
11495         return ret;
11496 }
11497
11498 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11499 {
11500         struct tg3 *tp = netdev_priv(dev);
11501
11502         if (tg3_flag(tp, USE_PHYLIB)) {
11503                 struct phy_device *phydev;
11504                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11505                         return -EAGAIN;
11506                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11507                 return phy_ethtool_gset(phydev, cmd);
11508         }
11509
11510         cmd->supported = (SUPPORTED_Autoneg);
11511
11512         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11513                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11514                                    SUPPORTED_1000baseT_Full);
11515
11516         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11517                 cmd->supported |= (SUPPORTED_100baseT_Half |
11518                                   SUPPORTED_100baseT_Full |
11519                                   SUPPORTED_10baseT_Half |
11520                                   SUPPORTED_10baseT_Full |
11521                                   SUPPORTED_TP);
11522                 cmd->port = PORT_TP;
11523         } else {
11524                 cmd->supported |= SUPPORTED_FIBRE;
11525                 cmd->port = PORT_FIBRE;
11526         }
11527
11528         cmd->advertising = tp->link_config.advertising;
11529         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11530                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11531                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11532                                 cmd->advertising |= ADVERTISED_Pause;
11533                         } else {
11534                                 cmd->advertising |= ADVERTISED_Pause |
11535                                                     ADVERTISED_Asym_Pause;
11536                         }
11537                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11538                         cmd->advertising |= ADVERTISED_Asym_Pause;
11539                 }
11540         }
11541         if (netif_running(dev) && tp->link_up) {
11542                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11543                 cmd->duplex = tp->link_config.active_duplex;
11544                 cmd->lp_advertising = tp->link_config.rmt_adv;
11545                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11546                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11547                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11548                         else
11549                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11550                 }
11551         } else {
11552                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11553                 cmd->duplex = DUPLEX_UNKNOWN;
11554                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11555         }
11556         cmd->phy_address = tp->phy_addr;
11557         cmd->transceiver = XCVR_INTERNAL;
11558         cmd->autoneg = tp->link_config.autoneg;
11559         cmd->maxtxpkt = 0;
11560         cmd->maxrxpkt = 0;
11561         return 0;
11562 }
11563
11564 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11565 {
11566         struct tg3 *tp = netdev_priv(dev);
11567         u32 speed = ethtool_cmd_speed(cmd);
11568
11569         if (tg3_flag(tp, USE_PHYLIB)) {
11570                 struct phy_device *phydev;
11571                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11572                         return -EAGAIN;
11573                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11574                 return phy_ethtool_sset(phydev, cmd);
11575         }
11576
11577         if (cmd->autoneg != AUTONEG_ENABLE &&
11578             cmd->autoneg != AUTONEG_DISABLE)
11579                 return -EINVAL;
11580
11581         if (cmd->autoneg == AUTONEG_DISABLE &&
11582             cmd->duplex != DUPLEX_FULL &&
11583             cmd->duplex != DUPLEX_HALF)
11584                 return -EINVAL;
11585
11586         if (cmd->autoneg == AUTONEG_ENABLE) {
11587                 u32 mask = ADVERTISED_Autoneg |
11588                            ADVERTISED_Pause |
11589                            ADVERTISED_Asym_Pause;
11590
11591                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11592                         mask |= ADVERTISED_1000baseT_Half |
11593                                 ADVERTISED_1000baseT_Full;
11594
11595                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11596                         mask |= ADVERTISED_100baseT_Half |
11597                                 ADVERTISED_100baseT_Full |
11598                                 ADVERTISED_10baseT_Half |
11599                                 ADVERTISED_10baseT_Full |
11600                                 ADVERTISED_TP;
11601                 else
11602                         mask |= ADVERTISED_FIBRE;
11603
11604                 if (cmd->advertising & ~mask)
11605                         return -EINVAL;
11606
11607                 mask &= (ADVERTISED_1000baseT_Half |
11608                          ADVERTISED_1000baseT_Full |
11609                          ADVERTISED_100baseT_Half |
11610                          ADVERTISED_100baseT_Full |
11611                          ADVERTISED_10baseT_Half |
11612                          ADVERTISED_10baseT_Full);
11613
11614                 cmd->advertising &= mask;
11615         } else {
11616                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11617                         if (speed != SPEED_1000)
11618                                 return -EINVAL;
11619
11620                         if (cmd->duplex != DUPLEX_FULL)
11621                                 return -EINVAL;
11622                 } else {
11623                         if (speed != SPEED_100 &&
11624                             speed != SPEED_10)
11625                                 return -EINVAL;
11626                 }
11627         }
11628
11629         tg3_full_lock(tp, 0);
11630
11631         tp->link_config.autoneg = cmd->autoneg;
11632         if (cmd->autoneg == AUTONEG_ENABLE) {
11633                 tp->link_config.advertising = (cmd->advertising |
11634                                               ADVERTISED_Autoneg);
11635                 tp->link_config.speed = SPEED_UNKNOWN;
11636                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11637         } else {
11638                 tp->link_config.advertising = 0;
11639                 tp->link_config.speed = speed;
11640                 tp->link_config.duplex = cmd->duplex;
11641         }
11642
11643         tg3_warn_mgmt_link_flap(tp);
11644
11645         if (netif_running(dev))
11646                 tg3_setup_phy(tp, 1);
11647
11648         tg3_full_unlock(tp);
11649
11650         return 0;
11651 }
11652
11653 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11654 {
11655         struct tg3 *tp = netdev_priv(dev);
11656
11657         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11658         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11659         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11660         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11661 }
11662
11663 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11664 {
11665         struct tg3 *tp = netdev_priv(dev);
11666
11667         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11668                 wol->supported = WAKE_MAGIC;
11669         else
11670                 wol->supported = 0;
11671         wol->wolopts = 0;
11672         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11673                 wol->wolopts = WAKE_MAGIC;
11674         memset(&wol->sopass, 0, sizeof(wol->sopass));
11675 }
11676
11677 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11678 {
11679         struct tg3 *tp = netdev_priv(dev);
11680         struct device *dp = &tp->pdev->dev;
11681
11682         if (wol->wolopts & ~WAKE_MAGIC)
11683                 return -EINVAL;
11684         if ((wol->wolopts & WAKE_MAGIC) &&
11685             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11686                 return -EINVAL;
11687
11688         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11689
11690         spin_lock_bh(&tp->lock);
11691         if (device_may_wakeup(dp))
11692                 tg3_flag_set(tp, WOL_ENABLE);
11693         else
11694                 tg3_flag_clear(tp, WOL_ENABLE);
11695         spin_unlock_bh(&tp->lock);
11696
11697         return 0;
11698 }
11699
11700 static u32 tg3_get_msglevel(struct net_device *dev)
11701 {
11702         struct tg3 *tp = netdev_priv(dev);
11703         return tp->msg_enable;
11704 }
11705
11706 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11707 {
11708         struct tg3 *tp = netdev_priv(dev);
11709         tp->msg_enable = value;
11710 }
11711
11712 static int tg3_nway_reset(struct net_device *dev)
11713 {
11714         struct tg3 *tp = netdev_priv(dev);
11715         int r;
11716
11717         if (!netif_running(dev))
11718                 return -EAGAIN;
11719
11720         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11721                 return -EINVAL;
11722
11723         tg3_warn_mgmt_link_flap(tp);
11724
11725         if (tg3_flag(tp, USE_PHYLIB)) {
11726                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11727                         return -EAGAIN;
11728                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11729         } else {
11730                 u32 bmcr;
11731
11732                 spin_lock_bh(&tp->lock);
11733                 r = -EINVAL;
11734                 tg3_readphy(tp, MII_BMCR, &bmcr);
11735                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11736                     ((bmcr & BMCR_ANENABLE) ||
11737                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11738                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11739                                                    BMCR_ANENABLE);
11740                         r = 0;
11741                 }
11742                 spin_unlock_bh(&tp->lock);
11743         }
11744
11745         return r;
11746 }
11747
11748 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11749 {
11750         struct tg3 *tp = netdev_priv(dev);
11751
11752         ering->rx_max_pending = tp->rx_std_ring_mask;
11753         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11754                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11755         else
11756                 ering->rx_jumbo_max_pending = 0;
11757
11758         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11759
11760         ering->rx_pending = tp->rx_pending;
11761         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11762                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11763         else
11764                 ering->rx_jumbo_pending = 0;
11765
11766         ering->tx_pending = tp->napi[0].tx_pending;
11767 }
11768
11769 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11770 {
11771         struct tg3 *tp = netdev_priv(dev);
11772         int i, irq_sync = 0, err = 0;
11773
11774         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11775             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11776             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11777             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11778             (tg3_flag(tp, TSO_BUG) &&
11779              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11780                 return -EINVAL;
11781
11782         if (netif_running(dev)) {
11783                 tg3_phy_stop(tp);
11784                 tg3_netif_stop(tp);
11785                 irq_sync = 1;
11786         }
11787
11788         tg3_full_lock(tp, irq_sync);
11789
11790         tp->rx_pending = ering->rx_pending;
11791
11792         if (tg3_flag(tp, MAX_RXPEND_64) &&
11793             tp->rx_pending > 63)
11794                 tp->rx_pending = 63;
11795         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11796
11797         for (i = 0; i < tp->irq_max; i++)
11798                 tp->napi[i].tx_pending = ering->tx_pending;
11799
11800         if (netif_running(dev)) {
11801                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11802                 err = tg3_restart_hw(tp, 0);
11803                 if (!err)
11804                         tg3_netif_start(tp);
11805         }
11806
11807         tg3_full_unlock(tp);
11808
11809         if (irq_sync && !err)
11810                 tg3_phy_start(tp);
11811
11812         return err;
11813 }
11814
11815 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11816 {
11817         struct tg3 *tp = netdev_priv(dev);
11818
11819         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11820
11821         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11822                 epause->rx_pause = 1;
11823         else
11824                 epause->rx_pause = 0;
11825
11826         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11827                 epause->tx_pause = 1;
11828         else
11829                 epause->tx_pause = 0;
11830 }
11831
11832 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11833 {
11834         struct tg3 *tp = netdev_priv(dev);
11835         int err = 0;
11836
11837         if (tp->link_config.autoneg == AUTONEG_ENABLE)
11838                 tg3_warn_mgmt_link_flap(tp);
11839
11840         if (tg3_flag(tp, USE_PHYLIB)) {
11841                 u32 newadv;
11842                 struct phy_device *phydev;
11843
11844                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11845
11846                 if (!(phydev->supported & SUPPORTED_Pause) ||
11847                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11848                      (epause->rx_pause != epause->tx_pause)))
11849                         return -EINVAL;
11850
11851                 tp->link_config.flowctrl = 0;
11852                 if (epause->rx_pause) {
11853                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11854
11855                         if (epause->tx_pause) {
11856                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11857                                 newadv = ADVERTISED_Pause;
11858                         } else
11859                                 newadv = ADVERTISED_Pause |
11860                                          ADVERTISED_Asym_Pause;
11861                 } else if (epause->tx_pause) {
11862                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11863                         newadv = ADVERTISED_Asym_Pause;
11864                 } else
11865                         newadv = 0;
11866
11867                 if (epause->autoneg)
11868                         tg3_flag_set(tp, PAUSE_AUTONEG);
11869                 else
11870                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11871
11872                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11873                         u32 oldadv = phydev->advertising &
11874                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11875                         if (oldadv != newadv) {
11876                                 phydev->advertising &=
11877                                         ~(ADVERTISED_Pause |
11878                                           ADVERTISED_Asym_Pause);
11879                                 phydev->advertising |= newadv;
11880                                 if (phydev->autoneg) {
11881                                         /*
11882                                          * Always renegotiate the link to
11883                                          * inform our link partner of our
11884                                          * flow control settings, even if the
11885                                          * flow control is forced.  Let
11886                                          * tg3_adjust_link() do the final
11887                                          * flow control setup.
11888                                          */
11889                                         return phy_start_aneg(phydev);
11890                                 }
11891                         }
11892
11893                         if (!epause->autoneg)
11894                                 tg3_setup_flow_control(tp, 0, 0);
11895                 } else {
11896                         tp->link_config.advertising &=
11897                                         ~(ADVERTISED_Pause |
11898                                           ADVERTISED_Asym_Pause);
11899                         tp->link_config.advertising |= newadv;
11900                 }
11901         } else {
11902                 int irq_sync = 0;
11903
11904                 if (netif_running(dev)) {
11905                         tg3_netif_stop(tp);
11906                         irq_sync = 1;
11907                 }
11908
11909                 tg3_full_lock(tp, irq_sync);
11910
11911                 if (epause->autoneg)
11912                         tg3_flag_set(tp, PAUSE_AUTONEG);
11913                 else
11914                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11915                 if (epause->rx_pause)
11916                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11917                 else
11918                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11919                 if (epause->tx_pause)
11920                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11921                 else
11922                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11923
11924                 if (netif_running(dev)) {
11925                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11926                         err = tg3_restart_hw(tp, 0);
11927                         if (!err)
11928                                 tg3_netif_start(tp);
11929                 }
11930
11931                 tg3_full_unlock(tp);
11932         }
11933
11934         return err;
11935 }
11936
11937 static int tg3_get_sset_count(struct net_device *dev, int sset)
11938 {
11939         switch (sset) {
11940         case ETH_SS_TEST:
11941                 return TG3_NUM_TEST;
11942         case ETH_SS_STATS:
11943                 return TG3_NUM_STATS;
11944         default:
11945                 return -EOPNOTSUPP;
11946         }
11947 }
11948
11949 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11950                          u32 *rules __always_unused)
11951 {
11952         struct tg3 *tp = netdev_priv(dev);
11953
11954         if (!tg3_flag(tp, SUPPORT_MSIX))
11955                 return -EOPNOTSUPP;
11956
11957         switch (info->cmd) {
11958         case ETHTOOL_GRXRINGS:
11959                 if (netif_running(tp->dev))
11960                         info->data = tp->rxq_cnt;
11961                 else {
11962                         info->data = num_online_cpus();
11963                         if (info->data > TG3_RSS_MAX_NUM_QS)
11964                                 info->data = TG3_RSS_MAX_NUM_QS;
11965                 }
11966
11967                 /* The first interrupt vector only
11968                  * handles link interrupts.
11969                  */
11970                 info->data -= 1;
11971                 return 0;
11972
11973         default:
11974                 return -EOPNOTSUPP;
11975         }
11976 }
11977
11978 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11979 {
11980         u32 size = 0;
11981         struct tg3 *tp = netdev_priv(dev);
11982
11983         if (tg3_flag(tp, SUPPORT_MSIX))
11984                 size = TG3_RSS_INDIR_TBL_SIZE;
11985
11986         return size;
11987 }
11988
11989 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11990 {
11991         struct tg3 *tp = netdev_priv(dev);
11992         int i;
11993
11994         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11995                 indir[i] = tp->rss_ind_tbl[i];
11996
11997         return 0;
11998 }
11999
12000 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12001 {
12002         struct tg3 *tp = netdev_priv(dev);
12003         size_t i;
12004
12005         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12006                 tp->rss_ind_tbl[i] = indir[i];
12007
12008         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12009                 return 0;
12010
12011         /* It is legal to write the indirection
12012          * table while the device is running.
12013          */
12014         tg3_full_lock(tp, 0);
12015         tg3_rss_write_indir_tbl(tp);
12016         tg3_full_unlock(tp);
12017
12018         return 0;
12019 }
12020
12021 static void tg3_get_channels(struct net_device *dev,
12022                              struct ethtool_channels *channel)
12023 {
12024         struct tg3 *tp = netdev_priv(dev);
12025         u32 deflt_qs = netif_get_num_default_rss_queues();
12026
12027         channel->max_rx = tp->rxq_max;
12028         channel->max_tx = tp->txq_max;
12029
12030         if (netif_running(dev)) {
12031                 channel->rx_count = tp->rxq_cnt;
12032                 channel->tx_count = tp->txq_cnt;
12033         } else {
12034                 if (tp->rxq_req)
12035                         channel->rx_count = tp->rxq_req;
12036                 else
12037                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12038
12039                 if (tp->txq_req)
12040                         channel->tx_count = tp->txq_req;
12041                 else
12042                         channel->tx_count = min(deflt_qs, tp->txq_max);
12043         }
12044 }
12045
12046 static int tg3_set_channels(struct net_device *dev,
12047                             struct ethtool_channels *channel)
12048 {
12049         struct tg3 *tp = netdev_priv(dev);
12050
12051         if (!tg3_flag(tp, SUPPORT_MSIX))
12052                 return -EOPNOTSUPP;
12053
12054         if (channel->rx_count > tp->rxq_max ||
12055             channel->tx_count > tp->txq_max)
12056                 return -EINVAL;
12057
12058         tp->rxq_req = channel->rx_count;
12059         tp->txq_req = channel->tx_count;
12060
12061         if (!netif_running(dev))
12062                 return 0;
12063
12064         tg3_stop(tp);
12065
12066         tg3_carrier_off(tp);
12067
12068         tg3_start(tp, true, false, false);
12069
12070         return 0;
12071 }
12072
12073 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12074 {
12075         switch (stringset) {
12076         case ETH_SS_STATS:
12077                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12078                 break;
12079         case ETH_SS_TEST:
12080                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12081                 break;
12082         default:
12083                 WARN_ON(1);     /* we need a WARN() */
12084                 break;
12085         }
12086 }
12087
12088 static int tg3_set_phys_id(struct net_device *dev,
12089                             enum ethtool_phys_id_state state)
12090 {
12091         struct tg3 *tp = netdev_priv(dev);
12092
12093         if (!netif_running(tp->dev))
12094                 return -EAGAIN;
12095
12096         switch (state) {
12097         case ETHTOOL_ID_ACTIVE:
12098                 return 1;       /* cycle on/off once per second */
12099
12100         case ETHTOOL_ID_ON:
12101                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12102                      LED_CTRL_1000MBPS_ON |
12103                      LED_CTRL_100MBPS_ON |
12104                      LED_CTRL_10MBPS_ON |
12105                      LED_CTRL_TRAFFIC_OVERRIDE |
12106                      LED_CTRL_TRAFFIC_BLINK |
12107                      LED_CTRL_TRAFFIC_LED);
12108                 break;
12109
12110         case ETHTOOL_ID_OFF:
12111                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12112                      LED_CTRL_TRAFFIC_OVERRIDE);
12113                 break;
12114
12115         case ETHTOOL_ID_INACTIVE:
12116                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12117                 break;
12118         }
12119
12120         return 0;
12121 }
12122
12123 static void tg3_get_ethtool_stats(struct net_device *dev,
12124                                    struct ethtool_stats *estats, u64 *tmp_stats)
12125 {
12126         struct tg3 *tp = netdev_priv(dev);
12127
12128         if (tp->hw_stats)
12129                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12130         else
12131                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12132 }
12133
12134 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12135 {
12136         int i;
12137         __be32 *buf;
12138         u32 offset = 0, len = 0;
12139         u32 magic, val;
12140
12141         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12142                 return NULL;
12143
12144         if (magic == TG3_EEPROM_MAGIC) {
12145                 for (offset = TG3_NVM_DIR_START;
12146                      offset < TG3_NVM_DIR_END;
12147                      offset += TG3_NVM_DIRENT_SIZE) {
12148                         if (tg3_nvram_read(tp, offset, &val))
12149                                 return NULL;
12150
12151                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12152                             TG3_NVM_DIRTYPE_EXTVPD)
12153                                 break;
12154                 }
12155
12156                 if (offset != TG3_NVM_DIR_END) {
12157                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12158                         if (tg3_nvram_read(tp, offset + 4, &offset))
12159                                 return NULL;
12160
12161                         offset = tg3_nvram_logical_addr(tp, offset);
12162                 }
12163         }
12164
12165         if (!offset || !len) {
12166                 offset = TG3_NVM_VPD_OFF;
12167                 len = TG3_NVM_VPD_LEN;
12168         }
12169
12170         buf = kmalloc(len, GFP_KERNEL);
12171         if (buf == NULL)
12172                 return NULL;
12173
12174         if (magic == TG3_EEPROM_MAGIC) {
12175                 for (i = 0; i < len; i += 4) {
12176                         /* The data is in little-endian format in NVRAM.
12177                          * Use the big-endian read routines to preserve
12178                          * the byte order as it exists in NVRAM.
12179                          */
12180                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12181                                 goto error;
12182                 }
12183         } else {
12184                 u8 *ptr;
12185                 ssize_t cnt;
12186                 unsigned int pos = 0;
12187
12188                 ptr = (u8 *)&buf[0];
12189                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12190                         cnt = pci_read_vpd(tp->pdev, pos,
12191                                            len - pos, ptr);
12192                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12193                                 cnt = 0;
12194                         else if (cnt < 0)
12195                                 goto error;
12196                 }
12197                 if (pos != len)
12198                         goto error;
12199         }
12200
12201         *vpdlen = len;
12202
12203         return buf;
12204
12205 error:
12206         kfree(buf);
12207         return NULL;
12208 }
12209
12210 #define NVRAM_TEST_SIZE 0x100
12211 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12212 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12213 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12214 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12215 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12216 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12217 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12218 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12219
12220 static int tg3_test_nvram(struct tg3 *tp)
12221 {
12222         u32 csum, magic, len;
12223         __be32 *buf;
12224         int i, j, k, err = 0, size;
12225
12226         if (tg3_flag(tp, NO_NVRAM))
12227                 return 0;
12228
12229         if (tg3_nvram_read(tp, 0, &magic) != 0)
12230                 return -EIO;
12231
12232         if (magic == TG3_EEPROM_MAGIC)
12233                 size = NVRAM_TEST_SIZE;
12234         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12235                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12236                     TG3_EEPROM_SB_FORMAT_1) {
12237                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12238                         case TG3_EEPROM_SB_REVISION_0:
12239                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12240                                 break;
12241                         case TG3_EEPROM_SB_REVISION_2:
12242                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12243                                 break;
12244                         case TG3_EEPROM_SB_REVISION_3:
12245                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12246                                 break;
12247                         case TG3_EEPROM_SB_REVISION_4:
12248                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12249                                 break;
12250                         case TG3_EEPROM_SB_REVISION_5:
12251                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12252                                 break;
12253                         case TG3_EEPROM_SB_REVISION_6:
12254                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12255                                 break;
12256                         default:
12257                                 return -EIO;
12258                         }
12259                 } else
12260                         return 0;
12261         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12262                 size = NVRAM_SELFBOOT_HW_SIZE;
12263         else
12264                 return -EIO;
12265
12266         buf = kmalloc(size, GFP_KERNEL);
12267         if (buf == NULL)
12268                 return -ENOMEM;
12269
12270         err = -EIO;
12271         for (i = 0, j = 0; i < size; i += 4, j++) {
12272                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12273                 if (err)
12274                         break;
12275         }
12276         if (i < size)
12277                 goto out;
12278
12279         /* Selfboot format */
12280         magic = be32_to_cpu(buf[0]);
12281         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12282             TG3_EEPROM_MAGIC_FW) {
12283                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12284
12285                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12286                     TG3_EEPROM_SB_REVISION_2) {
12287                         /* For rev 2, the csum doesn't include the MBA. */
12288                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12289                                 csum8 += buf8[i];
12290                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12291                                 csum8 += buf8[i];
12292                 } else {
12293                         for (i = 0; i < size; i++)
12294                                 csum8 += buf8[i];
12295                 }
12296
12297                 if (csum8 == 0) {
12298                         err = 0;
12299                         goto out;
12300                 }
12301
12302                 err = -EIO;
12303                 goto out;
12304         }
12305
12306         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12307             TG3_EEPROM_MAGIC_HW) {
12308                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12309                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12310                 u8 *buf8 = (u8 *) buf;
12311
12312                 /* Separate the parity bits and the data bytes.  */
12313                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12314                         if ((i == 0) || (i == 8)) {
12315                                 int l;
12316                                 u8 msk;
12317
12318                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12319                                         parity[k++] = buf8[i] & msk;
12320                                 i++;
12321                         } else if (i == 16) {
12322                                 int l;
12323                                 u8 msk;
12324
12325                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12326                                         parity[k++] = buf8[i] & msk;
12327                                 i++;
12328
12329                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12330                                         parity[k++] = buf8[i] & msk;
12331                                 i++;
12332                         }
12333                         data[j++] = buf8[i];
12334                 }
12335
12336                 err = -EIO;
12337                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12338                         u8 hw8 = hweight8(data[i]);
12339
12340                         if ((hw8 & 0x1) && parity[i])
12341                                 goto out;
12342                         else if (!(hw8 & 0x1) && !parity[i])
12343                                 goto out;
12344                 }
12345                 err = 0;
12346                 goto out;
12347         }
12348
12349         err = -EIO;
12350
12351         /* Bootstrap checksum at offset 0x10 */
12352         csum = calc_crc((unsigned char *) buf, 0x10);
12353         if (csum != le32_to_cpu(buf[0x10/4]))
12354                 goto out;
12355
12356         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12357         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12358         if (csum != le32_to_cpu(buf[0xfc/4]))
12359                 goto out;
12360
12361         kfree(buf);
12362
12363         buf = tg3_vpd_readblock(tp, &len);
12364         if (!buf)
12365                 return -ENOMEM;
12366
12367         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12368         if (i > 0) {
12369                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12370                 if (j < 0)
12371                         goto out;
12372
12373                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12374                         goto out;
12375
12376                 i += PCI_VPD_LRDT_TAG_SIZE;
12377                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12378                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12379                 if (j > 0) {
12380                         u8 csum8 = 0;
12381
12382                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12383
12384                         for (i = 0; i <= j; i++)
12385                                 csum8 += ((u8 *)buf)[i];
12386
12387                         if (csum8)
12388                                 goto out;
12389                 }
12390         }
12391
12392         err = 0;
12393
12394 out:
12395         kfree(buf);
12396         return err;
12397 }
12398
12399 #define TG3_SERDES_TIMEOUT_SEC  2
12400 #define TG3_COPPER_TIMEOUT_SEC  6
12401
12402 static int tg3_test_link(struct tg3 *tp)
12403 {
12404         int i, max;
12405
12406         if (!netif_running(tp->dev))
12407                 return -ENODEV;
12408
12409         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12410                 max = TG3_SERDES_TIMEOUT_SEC;
12411         else
12412                 max = TG3_COPPER_TIMEOUT_SEC;
12413
12414         for (i = 0; i < max; i++) {
12415                 if (tp->link_up)
12416                         return 0;
12417
12418                 if (msleep_interruptible(1000))
12419                         break;
12420         }
12421
12422         return -EIO;
12423 }
12424
12425 /* Only test the commonly used registers */
12426 static int tg3_test_registers(struct tg3 *tp)
12427 {
12428         int i, is_5705, is_5750;
12429         u32 offset, read_mask, write_mask, val, save_val, read_val;
12430         static struct {
12431                 u16 offset;
12432                 u16 flags;
12433 #define TG3_FL_5705     0x1
12434 #define TG3_FL_NOT_5705 0x2
12435 #define TG3_FL_NOT_5788 0x4
12436 #define TG3_FL_NOT_5750 0x8
12437                 u32 read_mask;
12438                 u32 write_mask;
12439         } reg_tbl[] = {
12440                 /* MAC Control Registers */
12441                 { MAC_MODE, TG3_FL_NOT_5705,
12442                         0x00000000, 0x00ef6f8c },
12443                 { MAC_MODE, TG3_FL_5705,
12444                         0x00000000, 0x01ef6b8c },
12445                 { MAC_STATUS, TG3_FL_NOT_5705,
12446                         0x03800107, 0x00000000 },
12447                 { MAC_STATUS, TG3_FL_5705,
12448                         0x03800100, 0x00000000 },
12449                 { MAC_ADDR_0_HIGH, 0x0000,
12450                         0x00000000, 0x0000ffff },
12451                 { MAC_ADDR_0_LOW, 0x0000,
12452                         0x00000000, 0xffffffff },
12453                 { MAC_RX_MTU_SIZE, 0x0000,
12454                         0x00000000, 0x0000ffff },
12455                 { MAC_TX_MODE, 0x0000,
12456                         0x00000000, 0x00000070 },
12457                 { MAC_TX_LENGTHS, 0x0000,
12458                         0x00000000, 0x00003fff },
12459                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12460                         0x00000000, 0x000007fc },
12461                 { MAC_RX_MODE, TG3_FL_5705,
12462                         0x00000000, 0x000007dc },
12463                 { MAC_HASH_REG_0, 0x0000,
12464                         0x00000000, 0xffffffff },
12465                 { MAC_HASH_REG_1, 0x0000,
12466                         0x00000000, 0xffffffff },
12467                 { MAC_HASH_REG_2, 0x0000,
12468                         0x00000000, 0xffffffff },
12469                 { MAC_HASH_REG_3, 0x0000,
12470                         0x00000000, 0xffffffff },
12471
12472                 /* Receive Data and Receive BD Initiator Control Registers. */
12473                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12474                         0x00000000, 0xffffffff },
12475                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12476                         0x00000000, 0xffffffff },
12477                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12478                         0x00000000, 0x00000003 },
12479                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12480                         0x00000000, 0xffffffff },
12481                 { RCVDBDI_STD_BD+0, 0x0000,
12482                         0x00000000, 0xffffffff },
12483                 { RCVDBDI_STD_BD+4, 0x0000,
12484                         0x00000000, 0xffffffff },
12485                 { RCVDBDI_STD_BD+8, 0x0000,
12486                         0x00000000, 0xffff0002 },
12487                 { RCVDBDI_STD_BD+0xc, 0x0000,
12488                         0x00000000, 0xffffffff },
12489
12490                 /* Receive BD Initiator Control Registers. */
12491                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12492                         0x00000000, 0xffffffff },
12493                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12494                         0x00000000, 0x000003ff },
12495                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12496                         0x00000000, 0xffffffff },
12497
12498                 /* Host Coalescing Control Registers. */
12499                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12500                         0x00000000, 0x00000004 },
12501                 { HOSTCC_MODE, TG3_FL_5705,
12502                         0x00000000, 0x000000f6 },
12503                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12504                         0x00000000, 0xffffffff },
12505                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12506                         0x00000000, 0x000003ff },
12507                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12508                         0x00000000, 0xffffffff },
12509                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12510                         0x00000000, 0x000003ff },
12511                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12512                         0x00000000, 0xffffffff },
12513                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12514                         0x00000000, 0x000000ff },
12515                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12516                         0x00000000, 0xffffffff },
12517                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12518                         0x00000000, 0x000000ff },
12519                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12520                         0x00000000, 0xffffffff },
12521                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12522                         0x00000000, 0xffffffff },
12523                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12524                         0x00000000, 0xffffffff },
12525                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12526                         0x00000000, 0x000000ff },
12527                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12528                         0x00000000, 0xffffffff },
12529                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12530                         0x00000000, 0x000000ff },
12531                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12532                         0x00000000, 0xffffffff },
12533                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12534                         0x00000000, 0xffffffff },
12535                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12536                         0x00000000, 0xffffffff },
12537                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12538                         0x00000000, 0xffffffff },
12539                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12540                         0x00000000, 0xffffffff },
12541                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12542                         0xffffffff, 0x00000000 },
12543                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12544                         0xffffffff, 0x00000000 },
12545
12546                 /* Buffer Manager Control Registers. */
12547                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12548                         0x00000000, 0x007fff80 },
12549                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12550                         0x00000000, 0x007fffff },
12551                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12552                         0x00000000, 0x0000003f },
12553                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12554                         0x00000000, 0x000001ff },
12555                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12556                         0x00000000, 0x000001ff },
12557                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12558                         0xffffffff, 0x00000000 },
12559                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12560                         0xffffffff, 0x00000000 },
12561
12562                 /* Mailbox Registers */
12563                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12564                         0x00000000, 0x000001ff },
12565                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12566                         0x00000000, 0x000001ff },
12567                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12568                         0x00000000, 0x000007ff },
12569                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12570                         0x00000000, 0x000001ff },
12571
12572                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12573         };
12574
12575         is_5705 = is_5750 = 0;
12576         if (tg3_flag(tp, 5705_PLUS)) {
12577                 is_5705 = 1;
12578                 if (tg3_flag(tp, 5750_PLUS))
12579                         is_5750 = 1;
12580         }
12581
12582         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12583                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12584                         continue;
12585
12586                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12587                         continue;
12588
12589                 if (tg3_flag(tp, IS_5788) &&
12590                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12591                         continue;
12592
12593                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12594                         continue;
12595
12596                 offset = (u32) reg_tbl[i].offset;
12597                 read_mask = reg_tbl[i].read_mask;
12598                 write_mask = reg_tbl[i].write_mask;
12599
12600                 /* Save the original register content */
12601                 save_val = tr32(offset);
12602
12603                 /* Determine the read-only value. */
12604                 read_val = save_val & read_mask;
12605
12606                 /* Write zero to the register, then make sure the read-only bits
12607                  * are not changed and the read/write bits are all zeros.
12608                  */
12609                 tw32(offset, 0);
12610
12611                 val = tr32(offset);
12612
12613                 /* Test the read-only and read/write bits. */
12614                 if (((val & read_mask) != read_val) || (val & write_mask))
12615                         goto out;
12616
12617                 /* Write ones to all the bits defined by RdMask and WrMask, then
12618                  * make sure the read-only bits are not changed and the
12619                  * read/write bits are all ones.
12620                  */
12621                 tw32(offset, read_mask | write_mask);
12622
12623                 val = tr32(offset);
12624
12625                 /* Test the read-only bits. */
12626                 if ((val & read_mask) != read_val)
12627                         goto out;
12628
12629                 /* Test the read/write bits. */
12630                 if ((val & write_mask) != write_mask)
12631                         goto out;
12632
12633                 tw32(offset, save_val);
12634         }
12635
12636         return 0;
12637
12638 out:
12639         if (netif_msg_hw(tp))
12640                 netdev_err(tp->dev,
12641                            "Register test failed at offset %x\n", offset);
12642         tw32(offset, save_val);
12643         return -EIO;
12644 }
12645
12646 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12647 {
12648         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12649         int i;
12650         u32 j;
12651
12652         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12653                 for (j = 0; j < len; j += 4) {
12654                         u32 val;
12655
12656                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12657                         tg3_read_mem(tp, offset + j, &val);
12658                         if (val != test_pattern[i])
12659                                 return -EIO;
12660                 }
12661         }
12662         return 0;
12663 }
12664
12665 static int tg3_test_memory(struct tg3 *tp)
12666 {
12667         static struct mem_entry {
12668                 u32 offset;
12669                 u32 len;
12670         } mem_tbl_570x[] = {
12671                 { 0x00000000, 0x00b50},
12672                 { 0x00002000, 0x1c000},
12673                 { 0xffffffff, 0x00000}
12674         }, mem_tbl_5705[] = {
12675                 { 0x00000100, 0x0000c},
12676                 { 0x00000200, 0x00008},
12677                 { 0x00004000, 0x00800},
12678                 { 0x00006000, 0x01000},
12679                 { 0x00008000, 0x02000},
12680                 { 0x00010000, 0x0e000},
12681                 { 0xffffffff, 0x00000}
12682         }, mem_tbl_5755[] = {
12683                 { 0x00000200, 0x00008},
12684                 { 0x00004000, 0x00800},
12685                 { 0x00006000, 0x00800},
12686                 { 0x00008000, 0x02000},
12687                 { 0x00010000, 0x0c000},
12688                 { 0xffffffff, 0x00000}
12689         }, mem_tbl_5906[] = {
12690                 { 0x00000200, 0x00008},
12691                 { 0x00004000, 0x00400},
12692                 { 0x00006000, 0x00400},
12693                 { 0x00008000, 0x01000},
12694                 { 0x00010000, 0x01000},
12695                 { 0xffffffff, 0x00000}
12696         }, mem_tbl_5717[] = {
12697                 { 0x00000200, 0x00008},
12698                 { 0x00010000, 0x0a000},
12699                 { 0x00020000, 0x13c00},
12700                 { 0xffffffff, 0x00000}
12701         }, mem_tbl_57765[] = {
12702                 { 0x00000200, 0x00008},
12703                 { 0x00004000, 0x00800},
12704                 { 0x00006000, 0x09800},
12705                 { 0x00010000, 0x0a000},
12706                 { 0xffffffff, 0x00000}
12707         };
12708         struct mem_entry *mem_tbl;
12709         int err = 0;
12710         int i;
12711
12712         if (tg3_flag(tp, 5717_PLUS))
12713                 mem_tbl = mem_tbl_5717;
12714         else if (tg3_flag(tp, 57765_CLASS) ||
12715                  tg3_asic_rev(tp) == ASIC_REV_5762)
12716                 mem_tbl = mem_tbl_57765;
12717         else if (tg3_flag(tp, 5755_PLUS))
12718                 mem_tbl = mem_tbl_5755;
12719         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12720                 mem_tbl = mem_tbl_5906;
12721         else if (tg3_flag(tp, 5705_PLUS))
12722                 mem_tbl = mem_tbl_5705;
12723         else
12724                 mem_tbl = mem_tbl_570x;
12725
12726         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12727                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12728                 if (err)
12729                         break;
12730         }
12731
12732         return err;
12733 }
12734
12735 #define TG3_TSO_MSS             500
12736
12737 #define TG3_TSO_IP_HDR_LEN      20
12738 #define TG3_TSO_TCP_HDR_LEN     20
12739 #define TG3_TSO_TCP_OPT_LEN     12
12740
12741 static const u8 tg3_tso_header[] = {
12742 0x08, 0x00,
12743 0x45, 0x00, 0x00, 0x00,
12744 0x00, 0x00, 0x40, 0x00,
12745 0x40, 0x06, 0x00, 0x00,
12746 0x0a, 0x00, 0x00, 0x01,
12747 0x0a, 0x00, 0x00, 0x02,
12748 0x0d, 0x00, 0xe0, 0x00,
12749 0x00, 0x00, 0x01, 0x00,
12750 0x00, 0x00, 0x02, 0x00,
12751 0x80, 0x10, 0x10, 0x00,
12752 0x14, 0x09, 0x00, 0x00,
12753 0x01, 0x01, 0x08, 0x0a,
12754 0x11, 0x11, 0x11, 0x11,
12755 0x11, 0x11, 0x11, 0x11,
12756 };
12757
12758 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12759 {
12760         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12761         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12762         u32 budget;
12763         struct sk_buff *skb;
12764         u8 *tx_data, *rx_data;
12765         dma_addr_t map;
12766         int num_pkts, tx_len, rx_len, i, err;
12767         struct tg3_rx_buffer_desc *desc;
12768         struct tg3_napi *tnapi, *rnapi;
12769         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12770
12771         tnapi = &tp->napi[0];
12772         rnapi = &tp->napi[0];
12773         if (tp->irq_cnt > 1) {
12774                 if (tg3_flag(tp, ENABLE_RSS))
12775                         rnapi = &tp->napi[1];
12776                 if (tg3_flag(tp, ENABLE_TSS))
12777                         tnapi = &tp->napi[1];
12778         }
12779         coal_now = tnapi->coal_now | rnapi->coal_now;
12780
12781         err = -EIO;
12782
12783         tx_len = pktsz;
12784         skb = netdev_alloc_skb(tp->dev, tx_len);
12785         if (!skb)
12786                 return -ENOMEM;
12787
12788         tx_data = skb_put(skb, tx_len);
12789         memcpy(tx_data, tp->dev->dev_addr, 6);
12790         memset(tx_data + 6, 0x0, 8);
12791
12792         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12793
12794         if (tso_loopback) {
12795                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12796
12797                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12798                               TG3_TSO_TCP_OPT_LEN;
12799
12800                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12801                        sizeof(tg3_tso_header));
12802                 mss = TG3_TSO_MSS;
12803
12804                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12805                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12806
12807                 /* Set the total length field in the IP header */
12808                 iph->tot_len = htons((u16)(mss + hdr_len));
12809
12810                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12811                               TXD_FLAG_CPU_POST_DMA);
12812
12813                 if (tg3_flag(tp, HW_TSO_1) ||
12814                     tg3_flag(tp, HW_TSO_2) ||
12815                     tg3_flag(tp, HW_TSO_3)) {
12816                         struct tcphdr *th;
12817                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12818                         th = (struct tcphdr *)&tx_data[val];
12819                         th->check = 0;
12820                 } else
12821                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12822
12823                 if (tg3_flag(tp, HW_TSO_3)) {
12824                         mss |= (hdr_len & 0xc) << 12;
12825                         if (hdr_len & 0x10)
12826                                 base_flags |= 0x00000010;
12827                         base_flags |= (hdr_len & 0x3e0) << 5;
12828                 } else if (tg3_flag(tp, HW_TSO_2))
12829                         mss |= hdr_len << 9;
12830                 else if (tg3_flag(tp, HW_TSO_1) ||
12831                          tg3_asic_rev(tp) == ASIC_REV_5705) {
12832                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12833                 } else {
12834                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12835                 }
12836
12837                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12838         } else {
12839                 num_pkts = 1;
12840                 data_off = ETH_HLEN;
12841
12842                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12843                     tx_len > VLAN_ETH_FRAME_LEN)
12844                         base_flags |= TXD_FLAG_JMB_PKT;
12845         }
12846
12847         for (i = data_off; i < tx_len; i++)
12848                 tx_data[i] = (u8) (i & 0xff);
12849
12850         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12851         if (pci_dma_mapping_error(tp->pdev, map)) {
12852                 dev_kfree_skb(skb);
12853                 return -EIO;
12854         }
12855
12856         val = tnapi->tx_prod;
12857         tnapi->tx_buffers[val].skb = skb;
12858         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12859
12860         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12861                rnapi->coal_now);
12862
12863         udelay(10);
12864
12865         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12866
12867         budget = tg3_tx_avail(tnapi);
12868         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12869                             base_flags | TXD_FLAG_END, mss, 0)) {
12870                 tnapi->tx_buffers[val].skb = NULL;
12871                 dev_kfree_skb(skb);
12872                 return -EIO;
12873         }
12874
12875         tnapi->tx_prod++;
12876
12877         /* Sync BD data before updating mailbox */
12878         wmb();
12879
12880         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12881         tr32_mailbox(tnapi->prodmbox);
12882
12883         udelay(10);
12884
12885         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12886         for (i = 0; i < 35; i++) {
12887                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12888                        coal_now);
12889
12890                 udelay(10);
12891
12892                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12893                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12894                 if ((tx_idx == tnapi->tx_prod) &&
12895                     (rx_idx == (rx_start_idx + num_pkts)))
12896                         break;
12897         }
12898
12899         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12900         dev_kfree_skb(skb);
12901
12902         if (tx_idx != tnapi->tx_prod)
12903                 goto out;
12904
12905         if (rx_idx != rx_start_idx + num_pkts)
12906                 goto out;
12907
12908         val = data_off;
12909         while (rx_idx != rx_start_idx) {
12910                 desc = &rnapi->rx_rcb[rx_start_idx++];
12911                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12912                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12913
12914                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12915                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12916                         goto out;
12917
12918                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12919                          - ETH_FCS_LEN;
12920
12921                 if (!tso_loopback) {
12922                         if (rx_len != tx_len)
12923                                 goto out;
12924
12925                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12926                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12927                                         goto out;
12928                         } else {
12929                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12930                                         goto out;
12931                         }
12932                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12933                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12934                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12935                         goto out;
12936                 }
12937
12938                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12939                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12940                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12941                                              mapping);
12942                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12943                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12944                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12945                                              mapping);
12946                 } else
12947                         goto out;
12948
12949                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12950                                             PCI_DMA_FROMDEVICE);
12951
12952                 rx_data += TG3_RX_OFFSET(tp);
12953                 for (i = data_off; i < rx_len; i++, val++) {
12954                         if (*(rx_data + i) != (u8) (val & 0xff))
12955                                 goto out;
12956                 }
12957         }
12958
12959         err = 0;
12960
12961         /* tg3_free_rings will unmap and free the rx_data */
12962 out:
12963         return err;
12964 }
12965
12966 #define TG3_STD_LOOPBACK_FAILED         1
12967 #define TG3_JMB_LOOPBACK_FAILED         2
12968 #define TG3_TSO_LOOPBACK_FAILED         4
12969 #define TG3_LOOPBACK_FAILED \
12970         (TG3_STD_LOOPBACK_FAILED | \
12971          TG3_JMB_LOOPBACK_FAILED | \
12972          TG3_TSO_LOOPBACK_FAILED)
12973
12974 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12975 {
12976         int err = -EIO;
12977         u32 eee_cap;
12978         u32 jmb_pkt_sz = 9000;
12979
12980         if (tp->dma_limit)
12981                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12982
12983         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12984         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12985
12986         if (!netif_running(tp->dev)) {
12987                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12988                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12989                 if (do_extlpbk)
12990                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12991                 goto done;
12992         }
12993
12994         err = tg3_reset_hw(tp, 1);
12995         if (err) {
12996                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12997                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12998                 if (do_extlpbk)
12999                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13000                 goto done;
13001         }
13002
13003         if (tg3_flag(tp, ENABLE_RSS)) {
13004                 int i;
13005
13006                 /* Reroute all rx packets to the 1st queue */
13007                 for (i = MAC_RSS_INDIR_TBL_0;
13008                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13009                         tw32(i, 0x0);
13010         }
13011
13012         /* HW errata - mac loopback fails in some cases on 5780.
13013          * Normal traffic and PHY loopback are not affected by
13014          * errata.  Also, the MAC loopback test is deprecated for
13015          * all newer ASIC revisions.
13016          */
13017         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13018             !tg3_flag(tp, CPMU_PRESENT)) {
13019                 tg3_mac_loopback(tp, true);
13020
13021                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13022                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13023
13024                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13025                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13026                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13027
13028                 tg3_mac_loopback(tp, false);
13029         }
13030
13031         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13032             !tg3_flag(tp, USE_PHYLIB)) {
13033                 int i;
13034
13035                 tg3_phy_lpbk_set(tp, 0, false);
13036
13037                 /* Wait for link */
13038                 for (i = 0; i < 100; i++) {
13039                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13040                                 break;
13041                         mdelay(1);
13042                 }
13043
13044                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13045                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13046                 if (tg3_flag(tp, TSO_CAPABLE) &&
13047                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13048                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13049                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13050                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13051                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13052
13053                 if (do_extlpbk) {
13054                         tg3_phy_lpbk_set(tp, 0, true);
13055
13056                         /* All link indications report up, but the hardware
13057                          * isn't really ready for about 20 msec.  Double it
13058                          * to be sure.
13059                          */
13060                         mdelay(40);
13061
13062                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13063                                 data[TG3_EXT_LOOPB_TEST] |=
13064                                                         TG3_STD_LOOPBACK_FAILED;
13065                         if (tg3_flag(tp, TSO_CAPABLE) &&
13066                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13067                                 data[TG3_EXT_LOOPB_TEST] |=
13068                                                         TG3_TSO_LOOPBACK_FAILED;
13069                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13070                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13071                                 data[TG3_EXT_LOOPB_TEST] |=
13072                                                         TG3_JMB_LOOPBACK_FAILED;
13073                 }
13074
13075                 /* Re-enable gphy autopowerdown. */
13076                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13077                         tg3_phy_toggle_apd(tp, true);
13078         }
13079
13080         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13081                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13082
13083 done:
13084         tp->phy_flags |= eee_cap;
13085
13086         return err;
13087 }
13088
13089 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13090                           u64 *data)
13091 {
13092         struct tg3 *tp = netdev_priv(dev);
13093         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13094
13095         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13096             tg3_power_up(tp)) {
13097                 etest->flags |= ETH_TEST_FL_FAILED;
13098                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13099                 return;
13100         }
13101
13102         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13103
13104         if (tg3_test_nvram(tp) != 0) {
13105                 etest->flags |= ETH_TEST_FL_FAILED;
13106                 data[TG3_NVRAM_TEST] = 1;
13107         }
13108         if (!doextlpbk && tg3_test_link(tp)) {
13109                 etest->flags |= ETH_TEST_FL_FAILED;
13110                 data[TG3_LINK_TEST] = 1;
13111         }
13112         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13113                 int err, err2 = 0, irq_sync = 0;
13114
13115                 if (netif_running(dev)) {
13116                         tg3_phy_stop(tp);
13117                         tg3_netif_stop(tp);
13118                         irq_sync = 1;
13119                 }
13120
13121                 tg3_full_lock(tp, irq_sync);
13122                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13123                 err = tg3_nvram_lock(tp);
13124                 tg3_halt_cpu(tp, RX_CPU_BASE);
13125                 if (!tg3_flag(tp, 5705_PLUS))
13126                         tg3_halt_cpu(tp, TX_CPU_BASE);
13127                 if (!err)
13128                         tg3_nvram_unlock(tp);
13129
13130                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13131                         tg3_phy_reset(tp);
13132
13133                 if (tg3_test_registers(tp) != 0) {
13134                         etest->flags |= ETH_TEST_FL_FAILED;
13135                         data[TG3_REGISTER_TEST] = 1;
13136                 }
13137
13138                 if (tg3_test_memory(tp) != 0) {
13139                         etest->flags |= ETH_TEST_FL_FAILED;
13140                         data[TG3_MEMORY_TEST] = 1;
13141                 }
13142
13143                 if (doextlpbk)
13144                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13145
13146                 if (tg3_test_loopback(tp, data, doextlpbk))
13147                         etest->flags |= ETH_TEST_FL_FAILED;
13148
13149                 tg3_full_unlock(tp);
13150
13151                 if (tg3_test_interrupt(tp) != 0) {
13152                         etest->flags |= ETH_TEST_FL_FAILED;
13153                         data[TG3_INTERRUPT_TEST] = 1;
13154                 }
13155
13156                 tg3_full_lock(tp, 0);
13157
13158                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13159                 if (netif_running(dev)) {
13160                         tg3_flag_set(tp, INIT_COMPLETE);
13161                         err2 = tg3_restart_hw(tp, 1);
13162                         if (!err2)
13163                                 tg3_netif_start(tp);
13164                 }
13165
13166                 tg3_full_unlock(tp);
13167
13168                 if (irq_sync && !err2)
13169                         tg3_phy_start(tp);
13170         }
13171         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13172                 tg3_power_down(tp);
13173
13174 }
13175
13176 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13177                               struct ifreq *ifr, int cmd)
13178 {
13179         struct tg3 *tp = netdev_priv(dev);
13180         struct hwtstamp_config stmpconf;
13181
13182         if (!tg3_flag(tp, PTP_CAPABLE))
13183                 return -EINVAL;
13184
13185         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13186                 return -EFAULT;
13187
13188         if (stmpconf.flags)
13189                 return -EINVAL;
13190
13191         switch (stmpconf.tx_type) {
13192         case HWTSTAMP_TX_ON:
13193                 tg3_flag_set(tp, TX_TSTAMP_EN);
13194                 break;
13195         case HWTSTAMP_TX_OFF:
13196                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13197                 break;
13198         default:
13199                 return -ERANGE;
13200         }
13201
13202         switch (stmpconf.rx_filter) {
13203         case HWTSTAMP_FILTER_NONE:
13204                 tp->rxptpctl = 0;
13205                 break;
13206         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13207                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13208                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13209                 break;
13210         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13211                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13212                                TG3_RX_PTP_CTL_SYNC_EVNT;
13213                 break;
13214         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13215                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13216                                TG3_RX_PTP_CTL_DELAY_REQ;
13217                 break;
13218         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13219                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13220                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13221                 break;
13222         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13223                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13224                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13225                 break;
13226         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13227                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13228                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13229                 break;
13230         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13231                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13232                                TG3_RX_PTP_CTL_SYNC_EVNT;
13233                 break;
13234         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13235                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13236                                TG3_RX_PTP_CTL_SYNC_EVNT;
13237                 break;
13238         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13239                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13240                                TG3_RX_PTP_CTL_SYNC_EVNT;
13241                 break;
13242         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13243                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13244                                TG3_RX_PTP_CTL_DELAY_REQ;
13245                 break;
13246         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13247                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13248                                TG3_RX_PTP_CTL_DELAY_REQ;
13249                 break;
13250         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13251                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13252                                TG3_RX_PTP_CTL_DELAY_REQ;
13253                 break;
13254         default:
13255                 return -ERANGE;
13256         }
13257
13258         if (netif_running(dev) && tp->rxptpctl)
13259                 tw32(TG3_RX_PTP_CTL,
13260                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13261
13262         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13263                 -EFAULT : 0;
13264 }
13265
13266 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13267 {
13268         struct mii_ioctl_data *data = if_mii(ifr);
13269         struct tg3 *tp = netdev_priv(dev);
13270         int err;
13271
13272         if (tg3_flag(tp, USE_PHYLIB)) {
13273                 struct phy_device *phydev;
13274                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13275                         return -EAGAIN;
13276                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13277                 return phy_mii_ioctl(phydev, ifr, cmd);
13278         }
13279
13280         switch (cmd) {
13281         case SIOCGMIIPHY:
13282                 data->phy_id = tp->phy_addr;
13283
13284                 /* fallthru */
13285         case SIOCGMIIREG: {
13286                 u32 mii_regval;
13287
13288                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13289                         break;                  /* We have no PHY */
13290
13291                 if (!netif_running(dev))
13292                         return -EAGAIN;
13293
13294                 spin_lock_bh(&tp->lock);
13295                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13296                                     data->reg_num & 0x1f, &mii_regval);
13297                 spin_unlock_bh(&tp->lock);
13298
13299                 data->val_out = mii_regval;
13300
13301                 return err;
13302         }
13303
13304         case SIOCSMIIREG:
13305                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13306                         break;                  /* We have no PHY */
13307
13308                 if (!netif_running(dev))
13309                         return -EAGAIN;
13310
13311                 spin_lock_bh(&tp->lock);
13312                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13313                                      data->reg_num & 0x1f, data->val_in);
13314                 spin_unlock_bh(&tp->lock);
13315
13316                 return err;
13317
13318         case SIOCSHWTSTAMP:
13319                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13320
13321         default:
13322                 /* do nothing */
13323                 break;
13324         }
13325         return -EOPNOTSUPP;
13326 }
13327
13328 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13329 {
13330         struct tg3 *tp = netdev_priv(dev);
13331
13332         memcpy(ec, &tp->coal, sizeof(*ec));
13333         return 0;
13334 }
13335
13336 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13337 {
13338         struct tg3 *tp = netdev_priv(dev);
13339         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13340         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13341
13342         if (!tg3_flag(tp, 5705_PLUS)) {
13343                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13344                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13345                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13346                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13347         }
13348
13349         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13350             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13351             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13352             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13353             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13354             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13355             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13356             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13357             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13358             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13359                 return -EINVAL;
13360
13361         /* No rx interrupts will be generated if both are zero */
13362         if ((ec->rx_coalesce_usecs == 0) &&
13363             (ec->rx_max_coalesced_frames == 0))
13364                 return -EINVAL;
13365
13366         /* No tx interrupts will be generated if both are zero */
13367         if ((ec->tx_coalesce_usecs == 0) &&
13368             (ec->tx_max_coalesced_frames == 0))
13369                 return -EINVAL;
13370
13371         /* Only copy relevant parameters, ignore all others. */
13372         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13373         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13374         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13375         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13376         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13377         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13378         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13379         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13380         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13381
13382         if (netif_running(dev)) {
13383                 tg3_full_lock(tp, 0);
13384                 __tg3_set_coalesce(tp, &tp->coal);
13385                 tg3_full_unlock(tp);
13386         }
13387         return 0;
13388 }
13389
13390 static const struct ethtool_ops tg3_ethtool_ops = {
13391         .get_settings           = tg3_get_settings,
13392         .set_settings           = tg3_set_settings,
13393         .get_drvinfo            = tg3_get_drvinfo,
13394         .get_regs_len           = tg3_get_regs_len,
13395         .get_regs               = tg3_get_regs,
13396         .get_wol                = tg3_get_wol,
13397         .set_wol                = tg3_set_wol,
13398         .get_msglevel           = tg3_get_msglevel,
13399         .set_msglevel           = tg3_set_msglevel,
13400         .nway_reset             = tg3_nway_reset,
13401         .get_link               = ethtool_op_get_link,
13402         .get_eeprom_len         = tg3_get_eeprom_len,
13403         .get_eeprom             = tg3_get_eeprom,
13404         .set_eeprom             = tg3_set_eeprom,
13405         .get_ringparam          = tg3_get_ringparam,
13406         .set_ringparam          = tg3_set_ringparam,
13407         .get_pauseparam         = tg3_get_pauseparam,
13408         .set_pauseparam         = tg3_set_pauseparam,
13409         .self_test              = tg3_self_test,
13410         .get_strings            = tg3_get_strings,
13411         .set_phys_id            = tg3_set_phys_id,
13412         .get_ethtool_stats      = tg3_get_ethtool_stats,
13413         .get_coalesce           = tg3_get_coalesce,
13414         .set_coalesce           = tg3_set_coalesce,
13415         .get_sset_count         = tg3_get_sset_count,
13416         .get_rxnfc              = tg3_get_rxnfc,
13417         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13418         .get_rxfh_indir         = tg3_get_rxfh_indir,
13419         .set_rxfh_indir         = tg3_set_rxfh_indir,
13420         .get_channels           = tg3_get_channels,
13421         .set_channels           = tg3_set_channels,
13422         .get_ts_info            = tg3_get_ts_info,
13423 };
13424
13425 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13426                                                 struct rtnl_link_stats64 *stats)
13427 {
13428         struct tg3 *tp = netdev_priv(dev);
13429
13430         spin_lock_bh(&tp->lock);
13431         if (!tp->hw_stats) {
13432                 spin_unlock_bh(&tp->lock);
13433                 return &tp->net_stats_prev;
13434         }
13435
13436         tg3_get_nstats(tp, stats);
13437         spin_unlock_bh(&tp->lock);
13438
13439         return stats;
13440 }
13441
13442 static void tg3_set_rx_mode(struct net_device *dev)
13443 {
13444         struct tg3 *tp = netdev_priv(dev);
13445
13446         if (!netif_running(dev))
13447                 return;
13448
13449         tg3_full_lock(tp, 0);
13450         __tg3_set_rx_mode(dev);
13451         tg3_full_unlock(tp);
13452 }
13453
13454 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13455                                int new_mtu)
13456 {
13457         dev->mtu = new_mtu;
13458
13459         if (new_mtu > ETH_DATA_LEN) {
13460                 if (tg3_flag(tp, 5780_CLASS)) {
13461                         netdev_update_features(dev);
13462                         tg3_flag_clear(tp, TSO_CAPABLE);
13463                 } else {
13464                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13465                 }
13466         } else {
13467                 if (tg3_flag(tp, 5780_CLASS)) {
13468                         tg3_flag_set(tp, TSO_CAPABLE);
13469                         netdev_update_features(dev);
13470                 }
13471                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13472         }
13473 }
13474
13475 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13476 {
13477         struct tg3 *tp = netdev_priv(dev);
13478         int err, reset_phy = 0;
13479
13480         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13481                 return -EINVAL;
13482
13483         if (!netif_running(dev)) {
13484                 /* We'll just catch it later when the
13485                  * device is up'd.
13486                  */
13487                 tg3_set_mtu(dev, tp, new_mtu);
13488                 return 0;
13489         }
13490
13491         tg3_phy_stop(tp);
13492
13493         tg3_netif_stop(tp);
13494
13495         tg3_full_lock(tp, 1);
13496
13497         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13498
13499         tg3_set_mtu(dev, tp, new_mtu);
13500
13501         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13502          * breaks all requests to 256 bytes.
13503          */
13504         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13505                 reset_phy = 1;
13506
13507         err = tg3_restart_hw(tp, reset_phy);
13508
13509         if (!err)
13510                 tg3_netif_start(tp);
13511
13512         tg3_full_unlock(tp);
13513
13514         if (!err)
13515                 tg3_phy_start(tp);
13516
13517         return err;
13518 }
13519
13520 static const struct net_device_ops tg3_netdev_ops = {
13521         .ndo_open               = tg3_open,
13522         .ndo_stop               = tg3_close,
13523         .ndo_start_xmit         = tg3_start_xmit,
13524         .ndo_get_stats64        = tg3_get_stats64,
13525         .ndo_validate_addr      = eth_validate_addr,
13526         .ndo_set_rx_mode        = tg3_set_rx_mode,
13527         .ndo_set_mac_address    = tg3_set_mac_addr,
13528         .ndo_do_ioctl           = tg3_ioctl,
13529         .ndo_tx_timeout         = tg3_tx_timeout,
13530         .ndo_change_mtu         = tg3_change_mtu,
13531         .ndo_fix_features       = tg3_fix_features,
13532         .ndo_set_features       = tg3_set_features,
13533 #ifdef CONFIG_NET_POLL_CONTROLLER
13534         .ndo_poll_controller    = tg3_poll_controller,
13535 #endif
13536 };
13537
13538 static void tg3_get_eeprom_size(struct tg3 *tp)
13539 {
13540         u32 cursize, val, magic;
13541
13542         tp->nvram_size = EEPROM_CHIP_SIZE;
13543
13544         if (tg3_nvram_read(tp, 0, &magic) != 0)
13545                 return;
13546
13547         if ((magic != TG3_EEPROM_MAGIC) &&
13548             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13549             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13550                 return;
13551
13552         /*
13553          * Size the chip by reading offsets at increasing powers of two.
13554          * When we encounter our validation signature, we know the addressing
13555          * has wrapped around, and thus have our chip size.
13556          */
13557         cursize = 0x10;
13558
13559         while (cursize < tp->nvram_size) {
13560                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13561                         return;
13562
13563                 if (val == magic)
13564                         break;
13565
13566                 cursize <<= 1;
13567         }
13568
13569         tp->nvram_size = cursize;
13570 }
13571
13572 static void tg3_get_nvram_size(struct tg3 *tp)
13573 {
13574         u32 val;
13575
13576         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13577                 return;
13578
13579         /* Selfboot format */
13580         if (val != TG3_EEPROM_MAGIC) {
13581                 tg3_get_eeprom_size(tp);
13582                 return;
13583         }
13584
13585         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13586                 if (val != 0) {
13587                         /* This is confusing.  We want to operate on the
13588                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13589                          * call will read from NVRAM and byteswap the data
13590                          * according to the byteswapping settings for all
13591                          * other register accesses.  This ensures the data we
13592                          * want will always reside in the lower 16-bits.
13593                          * However, the data in NVRAM is in LE format, which
13594                          * means the data from the NVRAM read will always be
13595                          * opposite the endianness of the CPU.  The 16-bit
13596                          * byteswap then brings the data to CPU endianness.
13597                          */
13598                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13599                         return;
13600                 }
13601         }
13602         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13603 }
13604
13605 static void tg3_get_nvram_info(struct tg3 *tp)
13606 {
13607         u32 nvcfg1;
13608
13609         nvcfg1 = tr32(NVRAM_CFG1);
13610         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13611                 tg3_flag_set(tp, FLASH);
13612         } else {
13613                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13614                 tw32(NVRAM_CFG1, nvcfg1);
13615         }
13616
13617         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13618             tg3_flag(tp, 5780_CLASS)) {
13619                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13620                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13621                         tp->nvram_jedecnum = JEDEC_ATMEL;
13622                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13623                         tg3_flag_set(tp, NVRAM_BUFFERED);
13624                         break;
13625                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13626                         tp->nvram_jedecnum = JEDEC_ATMEL;
13627                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13628                         break;
13629                 case FLASH_VENDOR_ATMEL_EEPROM:
13630                         tp->nvram_jedecnum = JEDEC_ATMEL;
13631                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13632                         tg3_flag_set(tp, NVRAM_BUFFERED);
13633                         break;
13634                 case FLASH_VENDOR_ST:
13635                         tp->nvram_jedecnum = JEDEC_ST;
13636                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13637                         tg3_flag_set(tp, NVRAM_BUFFERED);
13638                         break;
13639                 case FLASH_VENDOR_SAIFUN:
13640                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13641                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13642                         break;
13643                 case FLASH_VENDOR_SST_SMALL:
13644                 case FLASH_VENDOR_SST_LARGE:
13645                         tp->nvram_jedecnum = JEDEC_SST;
13646                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13647                         break;
13648                 }
13649         } else {
13650                 tp->nvram_jedecnum = JEDEC_ATMEL;
13651                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13652                 tg3_flag_set(tp, NVRAM_BUFFERED);
13653         }
13654 }
13655
13656 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13657 {
13658         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13659         case FLASH_5752PAGE_SIZE_256:
13660                 tp->nvram_pagesize = 256;
13661                 break;
13662         case FLASH_5752PAGE_SIZE_512:
13663                 tp->nvram_pagesize = 512;
13664                 break;
13665         case FLASH_5752PAGE_SIZE_1K:
13666                 tp->nvram_pagesize = 1024;
13667                 break;
13668         case FLASH_5752PAGE_SIZE_2K:
13669                 tp->nvram_pagesize = 2048;
13670                 break;
13671         case FLASH_5752PAGE_SIZE_4K:
13672                 tp->nvram_pagesize = 4096;
13673                 break;
13674         case FLASH_5752PAGE_SIZE_264:
13675                 tp->nvram_pagesize = 264;
13676                 break;
13677         case FLASH_5752PAGE_SIZE_528:
13678                 tp->nvram_pagesize = 528;
13679                 break;
13680         }
13681 }
13682
13683 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13684 {
13685         u32 nvcfg1;
13686
13687         nvcfg1 = tr32(NVRAM_CFG1);
13688
13689         /* NVRAM protection for TPM */
13690         if (nvcfg1 & (1 << 27))
13691                 tg3_flag_set(tp, PROTECTED_NVRAM);
13692
13693         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13694         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13695         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13696                 tp->nvram_jedecnum = JEDEC_ATMEL;
13697                 tg3_flag_set(tp, NVRAM_BUFFERED);
13698                 break;
13699         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13700                 tp->nvram_jedecnum = JEDEC_ATMEL;
13701                 tg3_flag_set(tp, NVRAM_BUFFERED);
13702                 tg3_flag_set(tp, FLASH);
13703                 break;
13704         case FLASH_5752VENDOR_ST_M45PE10:
13705         case FLASH_5752VENDOR_ST_M45PE20:
13706         case FLASH_5752VENDOR_ST_M45PE40:
13707                 tp->nvram_jedecnum = JEDEC_ST;
13708                 tg3_flag_set(tp, NVRAM_BUFFERED);
13709                 tg3_flag_set(tp, FLASH);
13710                 break;
13711         }
13712
13713         if (tg3_flag(tp, FLASH)) {
13714                 tg3_nvram_get_pagesize(tp, nvcfg1);
13715         } else {
13716                 /* For eeprom, set pagesize to maximum eeprom size */
13717                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13718
13719                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13720                 tw32(NVRAM_CFG1, nvcfg1);
13721         }
13722 }
13723
13724 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13725 {
13726         u32 nvcfg1, protect = 0;
13727
13728         nvcfg1 = tr32(NVRAM_CFG1);
13729
13730         /* NVRAM protection for TPM */
13731         if (nvcfg1 & (1 << 27)) {
13732                 tg3_flag_set(tp, PROTECTED_NVRAM);
13733                 protect = 1;
13734         }
13735
13736         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13737         switch (nvcfg1) {
13738         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13739         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13740         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13741         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13742                 tp->nvram_jedecnum = JEDEC_ATMEL;
13743                 tg3_flag_set(tp, NVRAM_BUFFERED);
13744                 tg3_flag_set(tp, FLASH);
13745                 tp->nvram_pagesize = 264;
13746                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13747                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13748                         tp->nvram_size = (protect ? 0x3e200 :
13749                                           TG3_NVRAM_SIZE_512KB);
13750                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13751                         tp->nvram_size = (protect ? 0x1f200 :
13752                                           TG3_NVRAM_SIZE_256KB);
13753                 else
13754                         tp->nvram_size = (protect ? 0x1f200 :
13755                                           TG3_NVRAM_SIZE_128KB);
13756                 break;
13757         case FLASH_5752VENDOR_ST_M45PE10:
13758         case FLASH_5752VENDOR_ST_M45PE20:
13759         case FLASH_5752VENDOR_ST_M45PE40:
13760                 tp->nvram_jedecnum = JEDEC_ST;
13761                 tg3_flag_set(tp, NVRAM_BUFFERED);
13762                 tg3_flag_set(tp, FLASH);
13763                 tp->nvram_pagesize = 256;
13764                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13765                         tp->nvram_size = (protect ?
13766                                           TG3_NVRAM_SIZE_64KB :
13767                                           TG3_NVRAM_SIZE_128KB);
13768                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13769                         tp->nvram_size = (protect ?
13770                                           TG3_NVRAM_SIZE_64KB :
13771                                           TG3_NVRAM_SIZE_256KB);
13772                 else
13773                         tp->nvram_size = (protect ?
13774                                           TG3_NVRAM_SIZE_128KB :
13775                                           TG3_NVRAM_SIZE_512KB);
13776                 break;
13777         }
13778 }
13779
13780 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13781 {
13782         u32 nvcfg1;
13783
13784         nvcfg1 = tr32(NVRAM_CFG1);
13785
13786         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13787         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13788         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13789         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13790         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13791                 tp->nvram_jedecnum = JEDEC_ATMEL;
13792                 tg3_flag_set(tp, NVRAM_BUFFERED);
13793                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13794
13795                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13796                 tw32(NVRAM_CFG1, nvcfg1);
13797                 break;
13798         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13799         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13800         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13801         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13802                 tp->nvram_jedecnum = JEDEC_ATMEL;
13803                 tg3_flag_set(tp, NVRAM_BUFFERED);
13804                 tg3_flag_set(tp, FLASH);
13805                 tp->nvram_pagesize = 264;
13806                 break;
13807         case FLASH_5752VENDOR_ST_M45PE10:
13808         case FLASH_5752VENDOR_ST_M45PE20:
13809         case FLASH_5752VENDOR_ST_M45PE40:
13810                 tp->nvram_jedecnum = JEDEC_ST;
13811                 tg3_flag_set(tp, NVRAM_BUFFERED);
13812                 tg3_flag_set(tp, FLASH);
13813                 tp->nvram_pagesize = 256;
13814                 break;
13815         }
13816 }
13817
13818 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13819 {
13820         u32 nvcfg1, protect = 0;
13821
13822         nvcfg1 = tr32(NVRAM_CFG1);
13823
13824         /* NVRAM protection for TPM */
13825         if (nvcfg1 & (1 << 27)) {
13826                 tg3_flag_set(tp, PROTECTED_NVRAM);
13827                 protect = 1;
13828         }
13829
13830         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13831         switch (nvcfg1) {
13832         case FLASH_5761VENDOR_ATMEL_ADB021D:
13833         case FLASH_5761VENDOR_ATMEL_ADB041D:
13834         case FLASH_5761VENDOR_ATMEL_ADB081D:
13835         case FLASH_5761VENDOR_ATMEL_ADB161D:
13836         case FLASH_5761VENDOR_ATMEL_MDB021D:
13837         case FLASH_5761VENDOR_ATMEL_MDB041D:
13838         case FLASH_5761VENDOR_ATMEL_MDB081D:
13839         case FLASH_5761VENDOR_ATMEL_MDB161D:
13840                 tp->nvram_jedecnum = JEDEC_ATMEL;
13841                 tg3_flag_set(tp, NVRAM_BUFFERED);
13842                 tg3_flag_set(tp, FLASH);
13843                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13844                 tp->nvram_pagesize = 256;
13845                 break;
13846         case FLASH_5761VENDOR_ST_A_M45PE20:
13847         case FLASH_5761VENDOR_ST_A_M45PE40:
13848         case FLASH_5761VENDOR_ST_A_M45PE80:
13849         case FLASH_5761VENDOR_ST_A_M45PE16:
13850         case FLASH_5761VENDOR_ST_M_M45PE20:
13851         case FLASH_5761VENDOR_ST_M_M45PE40:
13852         case FLASH_5761VENDOR_ST_M_M45PE80:
13853         case FLASH_5761VENDOR_ST_M_M45PE16:
13854                 tp->nvram_jedecnum = JEDEC_ST;
13855                 tg3_flag_set(tp, NVRAM_BUFFERED);
13856                 tg3_flag_set(tp, FLASH);
13857                 tp->nvram_pagesize = 256;
13858                 break;
13859         }
13860
13861         if (protect) {
13862                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13863         } else {
13864                 switch (nvcfg1) {
13865                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13866                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13867                 case FLASH_5761VENDOR_ST_A_M45PE16:
13868                 case FLASH_5761VENDOR_ST_M_M45PE16:
13869                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13870                         break;
13871                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13872                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13873                 case FLASH_5761VENDOR_ST_A_M45PE80:
13874                 case FLASH_5761VENDOR_ST_M_M45PE80:
13875                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13876                         break;
13877                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13878                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13879                 case FLASH_5761VENDOR_ST_A_M45PE40:
13880                 case FLASH_5761VENDOR_ST_M_M45PE40:
13881                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13882                         break;
13883                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13884                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13885                 case FLASH_5761VENDOR_ST_A_M45PE20:
13886                 case FLASH_5761VENDOR_ST_M_M45PE20:
13887                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13888                         break;
13889                 }
13890         }
13891 }
13892
13893 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13894 {
13895         tp->nvram_jedecnum = JEDEC_ATMEL;
13896         tg3_flag_set(tp, NVRAM_BUFFERED);
13897         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13898 }
13899
13900 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13901 {
13902         u32 nvcfg1;
13903
13904         nvcfg1 = tr32(NVRAM_CFG1);
13905
13906         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13907         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13908         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13909                 tp->nvram_jedecnum = JEDEC_ATMEL;
13910                 tg3_flag_set(tp, NVRAM_BUFFERED);
13911                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13912
13913                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13914                 tw32(NVRAM_CFG1, nvcfg1);
13915                 return;
13916         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13917         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13918         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13919         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13920         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13921         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13922         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13923                 tp->nvram_jedecnum = JEDEC_ATMEL;
13924                 tg3_flag_set(tp, NVRAM_BUFFERED);
13925                 tg3_flag_set(tp, FLASH);
13926
13927                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13928                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13929                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13930                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13931                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13932                         break;
13933                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13934                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13935                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13936                         break;
13937                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13938                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13939                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13940                         break;
13941                 }
13942                 break;
13943         case FLASH_5752VENDOR_ST_M45PE10:
13944         case FLASH_5752VENDOR_ST_M45PE20:
13945         case FLASH_5752VENDOR_ST_M45PE40:
13946                 tp->nvram_jedecnum = JEDEC_ST;
13947                 tg3_flag_set(tp, NVRAM_BUFFERED);
13948                 tg3_flag_set(tp, FLASH);
13949
13950                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13951                 case FLASH_5752VENDOR_ST_M45PE10:
13952                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13953                         break;
13954                 case FLASH_5752VENDOR_ST_M45PE20:
13955                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13956                         break;
13957                 case FLASH_5752VENDOR_ST_M45PE40:
13958                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13959                         break;
13960                 }
13961                 break;
13962         default:
13963                 tg3_flag_set(tp, NO_NVRAM);
13964                 return;
13965         }
13966
13967         tg3_nvram_get_pagesize(tp, nvcfg1);
13968         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13969                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13970 }
13971
13972
13973 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13974 {
13975         u32 nvcfg1;
13976
13977         nvcfg1 = tr32(NVRAM_CFG1);
13978
13979         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13980         case FLASH_5717VENDOR_ATMEL_EEPROM:
13981         case FLASH_5717VENDOR_MICRO_EEPROM:
13982                 tp->nvram_jedecnum = JEDEC_ATMEL;
13983                 tg3_flag_set(tp, NVRAM_BUFFERED);
13984                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13985
13986                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13987                 tw32(NVRAM_CFG1, nvcfg1);
13988                 return;
13989         case FLASH_5717VENDOR_ATMEL_MDB011D:
13990         case FLASH_5717VENDOR_ATMEL_ADB011B:
13991         case FLASH_5717VENDOR_ATMEL_ADB011D:
13992         case FLASH_5717VENDOR_ATMEL_MDB021D:
13993         case FLASH_5717VENDOR_ATMEL_ADB021B:
13994         case FLASH_5717VENDOR_ATMEL_ADB021D:
13995         case FLASH_5717VENDOR_ATMEL_45USPT:
13996                 tp->nvram_jedecnum = JEDEC_ATMEL;
13997                 tg3_flag_set(tp, NVRAM_BUFFERED);
13998                 tg3_flag_set(tp, FLASH);
13999
14000                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14001                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14002                         /* Detect size with tg3_nvram_get_size() */
14003                         break;
14004                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14005                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14006                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14007                         break;
14008                 default:
14009                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14010                         break;
14011                 }
14012                 break;
14013         case FLASH_5717VENDOR_ST_M_M25PE10:
14014         case FLASH_5717VENDOR_ST_A_M25PE10:
14015         case FLASH_5717VENDOR_ST_M_M45PE10:
14016         case FLASH_5717VENDOR_ST_A_M45PE10:
14017         case FLASH_5717VENDOR_ST_M_M25PE20:
14018         case FLASH_5717VENDOR_ST_A_M25PE20:
14019         case FLASH_5717VENDOR_ST_M_M45PE20:
14020         case FLASH_5717VENDOR_ST_A_M45PE20:
14021         case FLASH_5717VENDOR_ST_25USPT:
14022         case FLASH_5717VENDOR_ST_45USPT:
14023                 tp->nvram_jedecnum = JEDEC_ST;
14024                 tg3_flag_set(tp, NVRAM_BUFFERED);
14025                 tg3_flag_set(tp, FLASH);
14026
14027                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14028                 case FLASH_5717VENDOR_ST_M_M25PE20:
14029                 case FLASH_5717VENDOR_ST_M_M45PE20:
14030                         /* Detect size with tg3_nvram_get_size() */
14031                         break;
14032                 case FLASH_5717VENDOR_ST_A_M25PE20:
14033                 case FLASH_5717VENDOR_ST_A_M45PE20:
14034                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14035                         break;
14036                 default:
14037                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14038                         break;
14039                 }
14040                 break;
14041         default:
14042                 tg3_flag_set(tp, NO_NVRAM);
14043                 return;
14044         }
14045
14046         tg3_nvram_get_pagesize(tp, nvcfg1);
14047         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14048                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14049 }
14050
14051 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14052 {
14053         u32 nvcfg1, nvmpinstrp;
14054
14055         nvcfg1 = tr32(NVRAM_CFG1);
14056         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14057
14058         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14059                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14060                         tg3_flag_set(tp, NO_NVRAM);
14061                         return;
14062                 }
14063
14064                 switch (nvmpinstrp) {
14065                 case FLASH_5762_EEPROM_HD:
14066                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14067                         break;
14068                 case FLASH_5762_EEPROM_LD:
14069                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14070                         break;
14071                 case FLASH_5720VENDOR_M_ST_M45PE20:
14072                         /* This pinstrap supports multiple sizes, so force it
14073                          * to read the actual size from location 0xf0.
14074                          */
14075                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14076                         break;
14077                 }
14078         }
14079
14080         switch (nvmpinstrp) {
14081         case FLASH_5720_EEPROM_HD:
14082         case FLASH_5720_EEPROM_LD:
14083                 tp->nvram_jedecnum = JEDEC_ATMEL;
14084                 tg3_flag_set(tp, NVRAM_BUFFERED);
14085
14086                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14087                 tw32(NVRAM_CFG1, nvcfg1);
14088                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14089                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14090                 else
14091                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14092                 return;
14093         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14094         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14095         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14096         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14097         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14098         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14099         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14100         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14101         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14102         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14103         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14104         case FLASH_5720VENDOR_ATMEL_45USPT:
14105                 tp->nvram_jedecnum = JEDEC_ATMEL;
14106                 tg3_flag_set(tp, NVRAM_BUFFERED);
14107                 tg3_flag_set(tp, FLASH);
14108
14109                 switch (nvmpinstrp) {
14110                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14111                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14112                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14113                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14114                         break;
14115                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14116                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14117                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14118                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14119                         break;
14120                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14121                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14122                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14123                         break;
14124                 default:
14125                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14126                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14127                         break;
14128                 }
14129                 break;
14130         case FLASH_5720VENDOR_M_ST_M25PE10:
14131         case FLASH_5720VENDOR_M_ST_M45PE10:
14132         case FLASH_5720VENDOR_A_ST_M25PE10:
14133         case FLASH_5720VENDOR_A_ST_M45PE10:
14134         case FLASH_5720VENDOR_M_ST_M25PE20:
14135         case FLASH_5720VENDOR_M_ST_M45PE20:
14136         case FLASH_5720VENDOR_A_ST_M25PE20:
14137         case FLASH_5720VENDOR_A_ST_M45PE20:
14138         case FLASH_5720VENDOR_M_ST_M25PE40:
14139         case FLASH_5720VENDOR_M_ST_M45PE40:
14140         case FLASH_5720VENDOR_A_ST_M25PE40:
14141         case FLASH_5720VENDOR_A_ST_M45PE40:
14142         case FLASH_5720VENDOR_M_ST_M25PE80:
14143         case FLASH_5720VENDOR_M_ST_M45PE80:
14144         case FLASH_5720VENDOR_A_ST_M25PE80:
14145         case FLASH_5720VENDOR_A_ST_M45PE80:
14146         case FLASH_5720VENDOR_ST_25USPT:
14147         case FLASH_5720VENDOR_ST_45USPT:
14148                 tp->nvram_jedecnum = JEDEC_ST;
14149                 tg3_flag_set(tp, NVRAM_BUFFERED);
14150                 tg3_flag_set(tp, FLASH);
14151
14152                 switch (nvmpinstrp) {
14153                 case FLASH_5720VENDOR_M_ST_M25PE20:
14154                 case FLASH_5720VENDOR_M_ST_M45PE20:
14155                 case FLASH_5720VENDOR_A_ST_M25PE20:
14156                 case FLASH_5720VENDOR_A_ST_M45PE20:
14157                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14158                         break;
14159                 case FLASH_5720VENDOR_M_ST_M25PE40:
14160                 case FLASH_5720VENDOR_M_ST_M45PE40:
14161                 case FLASH_5720VENDOR_A_ST_M25PE40:
14162                 case FLASH_5720VENDOR_A_ST_M45PE40:
14163                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14164                         break;
14165                 case FLASH_5720VENDOR_M_ST_M25PE80:
14166                 case FLASH_5720VENDOR_M_ST_M45PE80:
14167                 case FLASH_5720VENDOR_A_ST_M25PE80:
14168                 case FLASH_5720VENDOR_A_ST_M45PE80:
14169                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14170                         break;
14171                 default:
14172                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14173                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14174                         break;
14175                 }
14176                 break;
14177         default:
14178                 tg3_flag_set(tp, NO_NVRAM);
14179                 return;
14180         }
14181
14182         tg3_nvram_get_pagesize(tp, nvcfg1);
14183         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14184                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14185
14186         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14187                 u32 val;
14188
14189                 if (tg3_nvram_read(tp, 0, &val))
14190                         return;
14191
14192                 if (val != TG3_EEPROM_MAGIC &&
14193                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14194                         tg3_flag_set(tp, NO_NVRAM);
14195         }
14196 }
14197
14198 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14199 static void tg3_nvram_init(struct tg3 *tp)
14200 {
14201         if (tg3_flag(tp, IS_SSB_CORE)) {
14202                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14203                 tg3_flag_clear(tp, NVRAM);
14204                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14205                 tg3_flag_set(tp, NO_NVRAM);
14206                 return;
14207         }
14208
14209         tw32_f(GRC_EEPROM_ADDR,
14210              (EEPROM_ADDR_FSM_RESET |
14211               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14212                EEPROM_ADDR_CLKPERD_SHIFT)));
14213
14214         msleep(1);
14215
14216         /* Enable seeprom accesses. */
14217         tw32_f(GRC_LOCAL_CTRL,
14218              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14219         udelay(100);
14220
14221         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14222             tg3_asic_rev(tp) != ASIC_REV_5701) {
14223                 tg3_flag_set(tp, NVRAM);
14224
14225                 if (tg3_nvram_lock(tp)) {
14226                         netdev_warn(tp->dev,
14227                                     "Cannot get nvram lock, %s failed\n",
14228                                     __func__);
14229                         return;
14230                 }
14231                 tg3_enable_nvram_access(tp);
14232
14233                 tp->nvram_size = 0;
14234
14235                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14236                         tg3_get_5752_nvram_info(tp);
14237                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14238                         tg3_get_5755_nvram_info(tp);
14239                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14240                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14241                          tg3_asic_rev(tp) == ASIC_REV_5785)
14242                         tg3_get_5787_nvram_info(tp);
14243                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14244                         tg3_get_5761_nvram_info(tp);
14245                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14246                         tg3_get_5906_nvram_info(tp);
14247                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14248                          tg3_flag(tp, 57765_CLASS))
14249                         tg3_get_57780_nvram_info(tp);
14250                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14251                          tg3_asic_rev(tp) == ASIC_REV_5719)
14252                         tg3_get_5717_nvram_info(tp);
14253                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14254                          tg3_asic_rev(tp) == ASIC_REV_5762)
14255                         tg3_get_5720_nvram_info(tp);
14256                 else
14257                         tg3_get_nvram_info(tp);
14258
14259                 if (tp->nvram_size == 0)
14260                         tg3_get_nvram_size(tp);
14261
14262                 tg3_disable_nvram_access(tp);
14263                 tg3_nvram_unlock(tp);
14264
14265         } else {
14266                 tg3_flag_clear(tp, NVRAM);
14267                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14268
14269                 tg3_get_eeprom_size(tp);
14270         }
14271 }
14272
14273 struct subsys_tbl_ent {
14274         u16 subsys_vendor, subsys_devid;
14275         u32 phy_id;
14276 };
14277
14278 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14279         /* Broadcom boards. */
14280         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14281           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14282         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14283           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14284         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14285           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14286         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14287           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14288         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14289           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14290         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14291           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14292         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14293           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14294         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14295           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14296         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14297           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14298         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14299           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14300         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14301           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14302
14303         /* 3com boards. */
14304         { TG3PCI_SUBVENDOR_ID_3COM,
14305           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14306         { TG3PCI_SUBVENDOR_ID_3COM,
14307           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14308         { TG3PCI_SUBVENDOR_ID_3COM,
14309           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14310         { TG3PCI_SUBVENDOR_ID_3COM,
14311           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14312         { TG3PCI_SUBVENDOR_ID_3COM,
14313           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14314
14315         /* DELL boards. */
14316         { TG3PCI_SUBVENDOR_ID_DELL,
14317           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14318         { TG3PCI_SUBVENDOR_ID_DELL,
14319           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14320         { TG3PCI_SUBVENDOR_ID_DELL,
14321           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14322         { TG3PCI_SUBVENDOR_ID_DELL,
14323           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14324
14325         /* Compaq boards. */
14326         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14327           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14328         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14329           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14330         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14331           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14332         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14333           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14334         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14335           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14336
14337         /* IBM boards. */
14338         { TG3PCI_SUBVENDOR_ID_IBM,
14339           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14340 };
14341
14342 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14343 {
14344         int i;
14345
14346         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14347                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14348                      tp->pdev->subsystem_vendor) &&
14349                     (subsys_id_to_phy_id[i].subsys_devid ==
14350                      tp->pdev->subsystem_device))
14351                         return &subsys_id_to_phy_id[i];
14352         }
14353         return NULL;
14354 }
14355
14356 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14357 {
14358         u32 val;
14359
14360         tp->phy_id = TG3_PHY_ID_INVALID;
14361         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14362
14363         /* Assume an onboard device and WOL capable by default.  */
14364         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14365         tg3_flag_set(tp, WOL_CAP);
14366
14367         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14368                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14369                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14370                         tg3_flag_set(tp, IS_NIC);
14371                 }
14372                 val = tr32(VCPU_CFGSHDW);
14373                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14374                         tg3_flag_set(tp, ASPM_WORKAROUND);
14375                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14376                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14377                         tg3_flag_set(tp, WOL_ENABLE);
14378                         device_set_wakeup_enable(&tp->pdev->dev, true);
14379                 }
14380                 goto done;
14381         }
14382
14383         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14384         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14385                 u32 nic_cfg, led_cfg;
14386                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14387                 int eeprom_phy_serdes = 0;
14388
14389                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14390                 tp->nic_sram_data_cfg = nic_cfg;
14391
14392                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14393                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14394                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14395                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14396                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14397                     (ver > 0) && (ver < 0x100))
14398                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14399
14400                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14401                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14402
14403                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14404                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14405                         eeprom_phy_serdes = 1;
14406
14407                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14408                 if (nic_phy_id != 0) {
14409                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14410                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14411
14412                         eeprom_phy_id  = (id1 >> 16) << 10;
14413                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14414                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14415                 } else
14416                         eeprom_phy_id = 0;
14417
14418                 tp->phy_id = eeprom_phy_id;
14419                 if (eeprom_phy_serdes) {
14420                         if (!tg3_flag(tp, 5705_PLUS))
14421                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14422                         else
14423                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14424                 }
14425
14426                 if (tg3_flag(tp, 5750_PLUS))
14427                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14428                                     SHASTA_EXT_LED_MODE_MASK);
14429                 else
14430                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14431
14432                 switch (led_cfg) {
14433                 default:
14434                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14435                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14436                         break;
14437
14438                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14439                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14440                         break;
14441
14442                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14443                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14444
14445                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14446                          * read on some older 5700/5701 bootcode.
14447                          */
14448                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14449                             tg3_asic_rev(tp) == ASIC_REV_5701)
14450                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14451
14452                         break;
14453
14454                 case SHASTA_EXT_LED_SHARED:
14455                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14456                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14457                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14458                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14459                                                  LED_CTRL_MODE_PHY_2);
14460                         break;
14461
14462                 case SHASTA_EXT_LED_MAC:
14463                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14464                         break;
14465
14466                 case SHASTA_EXT_LED_COMBO:
14467                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14468                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14469                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14470                                                  LED_CTRL_MODE_PHY_2);
14471                         break;
14472
14473                 }
14474
14475                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14476                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14477                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14478                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14479
14480                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14481                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14482
14483                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14484                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14485                         if ((tp->pdev->subsystem_vendor ==
14486                              PCI_VENDOR_ID_ARIMA) &&
14487                             (tp->pdev->subsystem_device == 0x205a ||
14488                              tp->pdev->subsystem_device == 0x2063))
14489                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14490                 } else {
14491                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14492                         tg3_flag_set(tp, IS_NIC);
14493                 }
14494
14495                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14496                         tg3_flag_set(tp, ENABLE_ASF);
14497                         if (tg3_flag(tp, 5750_PLUS))
14498                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14499                 }
14500
14501                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14502                     tg3_flag(tp, 5750_PLUS))
14503                         tg3_flag_set(tp, ENABLE_APE);
14504
14505                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14506                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14507                         tg3_flag_clear(tp, WOL_CAP);
14508
14509                 if (tg3_flag(tp, WOL_CAP) &&
14510                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14511                         tg3_flag_set(tp, WOL_ENABLE);
14512                         device_set_wakeup_enable(&tp->pdev->dev, true);
14513                 }
14514
14515                 if (cfg2 & (1 << 17))
14516                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14517
14518                 /* serdes signal pre-emphasis in register 0x590 set by */
14519                 /* bootcode if bit 18 is set */
14520                 if (cfg2 & (1 << 18))
14521                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14522
14523                 if ((tg3_flag(tp, 57765_PLUS) ||
14524                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14525                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14526                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14527                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14528
14529                 if (tg3_flag(tp, PCI_EXPRESS)) {
14530                         u32 cfg3;
14531
14532                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14533                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14534                             !tg3_flag(tp, 57765_PLUS) &&
14535                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14536                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14537                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14538                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14539                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14540                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14541                 }
14542
14543                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14544                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14545                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14546                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14547                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14548                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14549         }
14550 done:
14551         if (tg3_flag(tp, WOL_CAP))
14552                 device_set_wakeup_enable(&tp->pdev->dev,
14553                                          tg3_flag(tp, WOL_ENABLE));
14554         else
14555                 device_set_wakeup_capable(&tp->pdev->dev, false);
14556 }
14557
14558 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14559 {
14560         int i, err;
14561         u32 val2, off = offset * 8;
14562
14563         err = tg3_nvram_lock(tp);
14564         if (err)
14565                 return err;
14566
14567         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14568         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14569                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14570         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14571         udelay(10);
14572
14573         for (i = 0; i < 100; i++) {
14574                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14575                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14576                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14577                         break;
14578                 }
14579                 udelay(10);
14580         }
14581
14582         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14583
14584         tg3_nvram_unlock(tp);
14585         if (val2 & APE_OTP_STATUS_CMD_DONE)
14586                 return 0;
14587
14588         return -EBUSY;
14589 }
14590
14591 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14592 {
14593         int i;
14594         u32 val;
14595
14596         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14597         tw32(OTP_CTRL, cmd);
14598
14599         /* Wait for up to 1 ms for command to execute. */
14600         for (i = 0; i < 100; i++) {
14601                 val = tr32(OTP_STATUS);
14602                 if (val & OTP_STATUS_CMD_DONE)
14603                         break;
14604                 udelay(10);
14605         }
14606
14607         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14608 }
14609
14610 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14611  * configuration is a 32-bit value that straddles the alignment boundary.
14612  * We do two 32-bit reads and then shift and merge the results.
14613  */
14614 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14615 {
14616         u32 bhalf_otp, thalf_otp;
14617
14618         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14619
14620         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14621                 return 0;
14622
14623         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14624
14625         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14626                 return 0;
14627
14628         thalf_otp = tr32(OTP_READ_DATA);
14629
14630         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14631
14632         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14633                 return 0;
14634
14635         bhalf_otp = tr32(OTP_READ_DATA);
14636
14637         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14638 }
14639
14640 static void tg3_phy_init_link_config(struct tg3 *tp)
14641 {
14642         u32 adv = ADVERTISED_Autoneg;
14643
14644         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14645                 adv |= ADVERTISED_1000baseT_Half |
14646                        ADVERTISED_1000baseT_Full;
14647
14648         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14649                 adv |= ADVERTISED_100baseT_Half |
14650                        ADVERTISED_100baseT_Full |
14651                        ADVERTISED_10baseT_Half |
14652                        ADVERTISED_10baseT_Full |
14653                        ADVERTISED_TP;
14654         else
14655                 adv |= ADVERTISED_FIBRE;
14656
14657         tp->link_config.advertising = adv;
14658         tp->link_config.speed = SPEED_UNKNOWN;
14659         tp->link_config.duplex = DUPLEX_UNKNOWN;
14660         tp->link_config.autoneg = AUTONEG_ENABLE;
14661         tp->link_config.active_speed = SPEED_UNKNOWN;
14662         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14663
14664         tp->old_link = -1;
14665 }
14666
14667 static int tg3_phy_probe(struct tg3 *tp)
14668 {
14669         u32 hw_phy_id_1, hw_phy_id_2;
14670         u32 hw_phy_id, hw_phy_id_masked;
14671         int err;
14672
14673         /* flow control autonegotiation is default behavior */
14674         tg3_flag_set(tp, PAUSE_AUTONEG);
14675         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14676
14677         if (tg3_flag(tp, ENABLE_APE)) {
14678                 switch (tp->pci_fn) {
14679                 case 0:
14680                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14681                         break;
14682                 case 1:
14683                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14684                         break;
14685                 case 2:
14686                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14687                         break;
14688                 case 3:
14689                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14690                         break;
14691                 }
14692         }
14693
14694         if (!tg3_flag(tp, ENABLE_ASF) &&
14695             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14696             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14697                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14698                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14699
14700         if (tg3_flag(tp, USE_PHYLIB))
14701                 return tg3_phy_init(tp);
14702
14703         /* Reading the PHY ID register can conflict with ASF
14704          * firmware access to the PHY hardware.
14705          */
14706         err = 0;
14707         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14708                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14709         } else {
14710                 /* Now read the physical PHY_ID from the chip and verify
14711                  * that it is sane.  If it doesn't look good, we fall back
14712                  * to either the hard-coded table based PHY_ID and failing
14713                  * that the value found in the eeprom area.
14714                  */
14715                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14716                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14717
14718                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14719                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14720                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14721
14722                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14723         }
14724
14725         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14726                 tp->phy_id = hw_phy_id;
14727                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14728                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14729                 else
14730                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14731         } else {
14732                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14733                         /* Do nothing, phy ID already set up in
14734                          * tg3_get_eeprom_hw_cfg().
14735                          */
14736                 } else {
14737                         struct subsys_tbl_ent *p;
14738
14739                         /* No eeprom signature?  Try the hardcoded
14740                          * subsys device table.
14741                          */
14742                         p = tg3_lookup_by_subsys(tp);
14743                         if (p) {
14744                                 tp->phy_id = p->phy_id;
14745                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14746                                 /* For now we saw the IDs 0xbc050cd0,
14747                                  * 0xbc050f80 and 0xbc050c30 on devices
14748                                  * connected to an BCM4785 and there are
14749                                  * probably more. Just assume that the phy is
14750                                  * supported when it is connected to a SSB core
14751                                  * for now.
14752                                  */
14753                                 return -ENODEV;
14754                         }
14755
14756                         if (!tp->phy_id ||
14757                             tp->phy_id == TG3_PHY_ID_BCM8002)
14758                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14759                 }
14760         }
14761
14762         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14763             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14764              tg3_asic_rev(tp) == ASIC_REV_5720 ||
14765              tg3_asic_rev(tp) == ASIC_REV_57766 ||
14766              tg3_asic_rev(tp) == ASIC_REV_5762 ||
14767              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14768               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14769              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14770               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14771                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14772
14773         tg3_phy_init_link_config(tp);
14774
14775         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14776             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14777             !tg3_flag(tp, ENABLE_APE) &&
14778             !tg3_flag(tp, ENABLE_ASF)) {
14779                 u32 bmsr, dummy;
14780
14781                 tg3_readphy(tp, MII_BMSR, &bmsr);
14782                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14783                     (bmsr & BMSR_LSTATUS))
14784                         goto skip_phy_reset;
14785
14786                 err = tg3_phy_reset(tp);
14787                 if (err)
14788                         return err;
14789
14790                 tg3_phy_set_wirespeed(tp);
14791
14792                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14793                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14794                                             tp->link_config.flowctrl);
14795
14796                         tg3_writephy(tp, MII_BMCR,
14797                                      BMCR_ANENABLE | BMCR_ANRESTART);
14798                 }
14799         }
14800
14801 skip_phy_reset:
14802         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14803                 err = tg3_init_5401phy_dsp(tp);
14804                 if (err)
14805                         return err;
14806
14807                 err = tg3_init_5401phy_dsp(tp);
14808         }
14809
14810         return err;
14811 }
14812
14813 static void tg3_read_vpd(struct tg3 *tp)
14814 {
14815         u8 *vpd_data;
14816         unsigned int block_end, rosize, len;
14817         u32 vpdlen;
14818         int j, i = 0;
14819
14820         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14821         if (!vpd_data)
14822                 goto out_no_vpd;
14823
14824         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14825         if (i < 0)
14826                 goto out_not_found;
14827
14828         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14829         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14830         i += PCI_VPD_LRDT_TAG_SIZE;
14831
14832         if (block_end > vpdlen)
14833                 goto out_not_found;
14834
14835         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14836                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14837         if (j > 0) {
14838                 len = pci_vpd_info_field_size(&vpd_data[j]);
14839
14840                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14841                 if (j + len > block_end || len != 4 ||
14842                     memcmp(&vpd_data[j], "1028", 4))
14843                         goto partno;
14844
14845                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14846                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14847                 if (j < 0)
14848                         goto partno;
14849
14850                 len = pci_vpd_info_field_size(&vpd_data[j]);
14851
14852                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14853                 if (j + len > block_end)
14854                         goto partno;
14855
14856                 if (len >= sizeof(tp->fw_ver))
14857                         len = sizeof(tp->fw_ver) - 1;
14858                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14859                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14860                          &vpd_data[j]);
14861         }
14862
14863 partno:
14864         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14865                                       PCI_VPD_RO_KEYWORD_PARTNO);
14866         if (i < 0)
14867                 goto out_not_found;
14868
14869         len = pci_vpd_info_field_size(&vpd_data[i]);
14870
14871         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14872         if (len > TG3_BPN_SIZE ||
14873             (len + i) > vpdlen)
14874                 goto out_not_found;
14875
14876         memcpy(tp->board_part_number, &vpd_data[i], len);
14877
14878 out_not_found:
14879         kfree(vpd_data);
14880         if (tp->board_part_number[0])
14881                 return;
14882
14883 out_no_vpd:
14884         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14885                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14886                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14887                         strcpy(tp->board_part_number, "BCM5717");
14888                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14889                         strcpy(tp->board_part_number, "BCM5718");
14890                 else
14891                         goto nomatch;
14892         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14893                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14894                         strcpy(tp->board_part_number, "BCM57780");
14895                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14896                         strcpy(tp->board_part_number, "BCM57760");
14897                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14898                         strcpy(tp->board_part_number, "BCM57790");
14899                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14900                         strcpy(tp->board_part_number, "BCM57788");
14901                 else
14902                         goto nomatch;
14903         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14904                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14905                         strcpy(tp->board_part_number, "BCM57761");
14906                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14907                         strcpy(tp->board_part_number, "BCM57765");
14908                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14909                         strcpy(tp->board_part_number, "BCM57781");
14910                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14911                         strcpy(tp->board_part_number, "BCM57785");
14912                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14913                         strcpy(tp->board_part_number, "BCM57791");
14914                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14915                         strcpy(tp->board_part_number, "BCM57795");
14916                 else
14917                         goto nomatch;
14918         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14919                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14920                         strcpy(tp->board_part_number, "BCM57762");
14921                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14922                         strcpy(tp->board_part_number, "BCM57766");
14923                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14924                         strcpy(tp->board_part_number, "BCM57782");
14925                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14926                         strcpy(tp->board_part_number, "BCM57786");
14927                 else
14928                         goto nomatch;
14929         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14930                 strcpy(tp->board_part_number, "BCM95906");
14931         } else {
14932 nomatch:
14933                 strcpy(tp->board_part_number, "none");
14934         }
14935 }
14936
14937 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14938 {
14939         u32 val;
14940
14941         if (tg3_nvram_read(tp, offset, &val) ||
14942             (val & 0xfc000000) != 0x0c000000 ||
14943             tg3_nvram_read(tp, offset + 4, &val) ||
14944             val != 0)
14945                 return 0;
14946
14947         return 1;
14948 }
14949
14950 static void tg3_read_bc_ver(struct tg3 *tp)
14951 {
14952         u32 val, offset, start, ver_offset;
14953         int i, dst_off;
14954         bool newver = false;
14955
14956         if (tg3_nvram_read(tp, 0xc, &offset) ||
14957             tg3_nvram_read(tp, 0x4, &start))
14958                 return;
14959
14960         offset = tg3_nvram_logical_addr(tp, offset);
14961
14962         if (tg3_nvram_read(tp, offset, &val))
14963                 return;
14964
14965         if ((val & 0xfc000000) == 0x0c000000) {
14966                 if (tg3_nvram_read(tp, offset + 4, &val))
14967                         return;
14968
14969                 if (val == 0)
14970                         newver = true;
14971         }
14972
14973         dst_off = strlen(tp->fw_ver);
14974
14975         if (newver) {
14976                 if (TG3_VER_SIZE - dst_off < 16 ||
14977                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14978                         return;
14979
14980                 offset = offset + ver_offset - start;
14981                 for (i = 0; i < 16; i += 4) {
14982                         __be32 v;
14983                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14984                                 return;
14985
14986                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14987                 }
14988         } else {
14989                 u32 major, minor;
14990
14991                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14992                         return;
14993
14994                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14995                         TG3_NVM_BCVER_MAJSFT;
14996                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14997                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14998                          "v%d.%02d", major, minor);
14999         }
15000 }
15001
15002 static void tg3_read_hwsb_ver(struct tg3 *tp)
15003 {
15004         u32 val, major, minor;
15005
15006         /* Use native endian representation */
15007         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15008                 return;
15009
15010         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15011                 TG3_NVM_HWSB_CFG1_MAJSFT;
15012         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15013                 TG3_NVM_HWSB_CFG1_MINSFT;
15014
15015         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15016 }
15017
15018 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15019 {
15020         u32 offset, major, minor, build;
15021
15022         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15023
15024         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15025                 return;
15026
15027         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15028         case TG3_EEPROM_SB_REVISION_0:
15029                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15030                 break;
15031         case TG3_EEPROM_SB_REVISION_2:
15032                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15033                 break;
15034         case TG3_EEPROM_SB_REVISION_3:
15035                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15036                 break;
15037         case TG3_EEPROM_SB_REVISION_4:
15038                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15039                 break;
15040         case TG3_EEPROM_SB_REVISION_5:
15041                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15042                 break;
15043         case TG3_EEPROM_SB_REVISION_6:
15044                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15045                 break;
15046         default:
15047                 return;
15048         }
15049
15050         if (tg3_nvram_read(tp, offset, &val))
15051                 return;
15052
15053         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15054                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15055         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15056                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15057         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15058
15059         if (minor > 99 || build > 26)
15060                 return;
15061
15062         offset = strlen(tp->fw_ver);
15063         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15064                  " v%d.%02d", major, minor);
15065
15066         if (build > 0) {
15067                 offset = strlen(tp->fw_ver);
15068                 if (offset < TG3_VER_SIZE - 1)
15069                         tp->fw_ver[offset] = 'a' + build - 1;
15070         }
15071 }
15072
15073 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15074 {
15075         u32 val, offset, start;
15076         int i, vlen;
15077
15078         for (offset = TG3_NVM_DIR_START;
15079              offset < TG3_NVM_DIR_END;
15080              offset += TG3_NVM_DIRENT_SIZE) {
15081                 if (tg3_nvram_read(tp, offset, &val))
15082                         return;
15083
15084                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15085                         break;
15086         }
15087
15088         if (offset == TG3_NVM_DIR_END)
15089                 return;
15090
15091         if (!tg3_flag(tp, 5705_PLUS))
15092                 start = 0x08000000;
15093         else if (tg3_nvram_read(tp, offset - 4, &start))
15094                 return;
15095
15096         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15097             !tg3_fw_img_is_valid(tp, offset) ||
15098             tg3_nvram_read(tp, offset + 8, &val))
15099                 return;
15100
15101         offset += val - start;
15102
15103         vlen = strlen(tp->fw_ver);
15104
15105         tp->fw_ver[vlen++] = ',';
15106         tp->fw_ver[vlen++] = ' ';
15107
15108         for (i = 0; i < 4; i++) {
15109                 __be32 v;
15110                 if (tg3_nvram_read_be32(tp, offset, &v))
15111                         return;
15112
15113                 offset += sizeof(v);
15114
15115                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15116                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15117                         break;
15118                 }
15119
15120                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15121                 vlen += sizeof(v);
15122         }
15123 }
15124
15125 static void tg3_probe_ncsi(struct tg3 *tp)
15126 {
15127         u32 apedata;
15128
15129         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15130         if (apedata != APE_SEG_SIG_MAGIC)
15131                 return;
15132
15133         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15134         if (!(apedata & APE_FW_STATUS_READY))
15135                 return;
15136
15137         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15138                 tg3_flag_set(tp, APE_HAS_NCSI);
15139 }
15140
15141 static void tg3_read_dash_ver(struct tg3 *tp)
15142 {
15143         int vlen;
15144         u32 apedata;
15145         char *fwtype;
15146
15147         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15148
15149         if (tg3_flag(tp, APE_HAS_NCSI))
15150                 fwtype = "NCSI";
15151         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15152                 fwtype = "SMASH";
15153         else
15154                 fwtype = "DASH";
15155
15156         vlen = strlen(tp->fw_ver);
15157
15158         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15159                  fwtype,
15160                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15161                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15162                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15163                  (apedata & APE_FW_VERSION_BLDMSK));
15164 }
15165
15166 static void tg3_read_otp_ver(struct tg3 *tp)
15167 {
15168         u32 val, val2;
15169
15170         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15171                 return;
15172
15173         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15174             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15175             TG3_OTP_MAGIC0_VALID(val)) {
15176                 u64 val64 = (u64) val << 32 | val2;
15177                 u32 ver = 0;
15178                 int i, vlen;
15179
15180                 for (i = 0; i < 7; i++) {
15181                         if ((val64 & 0xff) == 0)
15182                                 break;
15183                         ver = val64 & 0xff;
15184                         val64 >>= 8;
15185                 }
15186                 vlen = strlen(tp->fw_ver);
15187                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15188         }
15189 }
15190
15191 static void tg3_read_fw_ver(struct tg3 *tp)
15192 {
15193         u32 val;
15194         bool vpd_vers = false;
15195
15196         if (tp->fw_ver[0] != 0)
15197                 vpd_vers = true;
15198
15199         if (tg3_flag(tp, NO_NVRAM)) {
15200                 strcat(tp->fw_ver, "sb");
15201                 tg3_read_otp_ver(tp);
15202                 return;
15203         }
15204
15205         if (tg3_nvram_read(tp, 0, &val))
15206                 return;
15207
15208         if (val == TG3_EEPROM_MAGIC)
15209                 tg3_read_bc_ver(tp);
15210         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15211                 tg3_read_sb_ver(tp, val);
15212         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15213                 tg3_read_hwsb_ver(tp);
15214
15215         if (tg3_flag(tp, ENABLE_ASF)) {
15216                 if (tg3_flag(tp, ENABLE_APE)) {
15217                         tg3_probe_ncsi(tp);
15218                         if (!vpd_vers)
15219                                 tg3_read_dash_ver(tp);
15220                 } else if (!vpd_vers) {
15221                         tg3_read_mgmtfw_ver(tp);
15222                 }
15223         }
15224
15225         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15226 }
15227
15228 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15229 {
15230         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15231                 return TG3_RX_RET_MAX_SIZE_5717;
15232         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15233                 return TG3_RX_RET_MAX_SIZE_5700;
15234         else
15235                 return TG3_RX_RET_MAX_SIZE_5705;
15236 }
15237
15238 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15239         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15240         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15241         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15242         { },
15243 };
15244
15245 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15246 {
15247         struct pci_dev *peer;
15248         unsigned int func, devnr = tp->pdev->devfn & ~7;
15249
15250         for (func = 0; func < 8; func++) {
15251                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15252                 if (peer && peer != tp->pdev)
15253                         break;
15254                 pci_dev_put(peer);
15255         }
15256         /* 5704 can be configured in single-port mode, set peer to
15257          * tp->pdev in that case.
15258          */
15259         if (!peer) {
15260                 peer = tp->pdev;
15261                 return peer;
15262         }
15263
15264         /*
15265          * We don't need to keep the refcount elevated; there's no way
15266          * to remove one half of this device without removing the other
15267          */
15268         pci_dev_put(peer);
15269
15270         return peer;
15271 }
15272
15273 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15274 {
15275         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15276         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15277                 u32 reg;
15278
15279                 /* All devices that use the alternate
15280                  * ASIC REV location have a CPMU.
15281                  */
15282                 tg3_flag_set(tp, CPMU_PRESENT);
15283
15284                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15285                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15286                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15287                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15288                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15289                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15290                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15291                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15292                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15293                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15294                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15295                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15296                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15297                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15298                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15299                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15300                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15301                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15302                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15303                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15304                 else
15305                         reg = TG3PCI_PRODID_ASICREV;
15306
15307                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15308         }
15309
15310         /* Wrong chip ID in 5752 A0. This code can be removed later
15311          * as A0 is not in production.
15312          */
15313         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15314                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15315
15316         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15317                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15318
15319         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15320             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15321             tg3_asic_rev(tp) == ASIC_REV_5720)
15322                 tg3_flag_set(tp, 5717_PLUS);
15323
15324         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15325             tg3_asic_rev(tp) == ASIC_REV_57766)
15326                 tg3_flag_set(tp, 57765_CLASS);
15327
15328         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15329              tg3_asic_rev(tp) == ASIC_REV_5762)
15330                 tg3_flag_set(tp, 57765_PLUS);
15331
15332         /* Intentionally exclude ASIC_REV_5906 */
15333         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15334             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15335             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15336             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15337             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15338             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15339             tg3_flag(tp, 57765_PLUS))
15340                 tg3_flag_set(tp, 5755_PLUS);
15341
15342         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15343             tg3_asic_rev(tp) == ASIC_REV_5714)
15344                 tg3_flag_set(tp, 5780_CLASS);
15345
15346         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15347             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15348             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15349             tg3_flag(tp, 5755_PLUS) ||
15350             tg3_flag(tp, 5780_CLASS))
15351                 tg3_flag_set(tp, 5750_PLUS);
15352
15353         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15354             tg3_flag(tp, 5750_PLUS))
15355                 tg3_flag_set(tp, 5705_PLUS);
15356 }
15357
15358 static bool tg3_10_100_only_device(struct tg3 *tp,
15359                                    const struct pci_device_id *ent)
15360 {
15361         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15362
15363         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15364              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15365             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15366                 return true;
15367
15368         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15369                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15370                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15371                                 return true;
15372                 } else {
15373                         return true;
15374                 }
15375         }
15376
15377         return false;
15378 }
15379
15380 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15381 {
15382         u32 misc_ctrl_reg;
15383         u32 pci_state_reg, grc_misc_cfg;
15384         u32 val;
15385         u16 pci_cmd;
15386         int err;
15387
15388         /* Force memory write invalidate off.  If we leave it on,
15389          * then on 5700_BX chips we have to enable a workaround.
15390          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15391          * to match the cacheline size.  The Broadcom driver have this
15392          * workaround but turns MWI off all the times so never uses
15393          * it.  This seems to suggest that the workaround is insufficient.
15394          */
15395         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15396         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15397         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15398
15399         /* Important! -- Make sure register accesses are byteswapped
15400          * correctly.  Also, for those chips that require it, make
15401          * sure that indirect register accesses are enabled before
15402          * the first operation.
15403          */
15404         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15405                               &misc_ctrl_reg);
15406         tp->misc_host_ctrl |= (misc_ctrl_reg &
15407                                MISC_HOST_CTRL_CHIPREV);
15408         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15409                                tp->misc_host_ctrl);
15410
15411         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15412
15413         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15414          * we need to disable memory and use config. cycles
15415          * only to access all registers. The 5702/03 chips
15416          * can mistakenly decode the special cycles from the
15417          * ICH chipsets as memory write cycles, causing corruption
15418          * of register and memory space. Only certain ICH bridges
15419          * will drive special cycles with non-zero data during the
15420          * address phase which can fall within the 5703's address
15421          * range. This is not an ICH bug as the PCI spec allows
15422          * non-zero address during special cycles. However, only
15423          * these ICH bridges are known to drive non-zero addresses
15424          * during special cycles.
15425          *
15426          * Since special cycles do not cross PCI bridges, we only
15427          * enable this workaround if the 5703 is on the secondary
15428          * bus of these ICH bridges.
15429          */
15430         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15431             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15432                 static struct tg3_dev_id {
15433                         u32     vendor;
15434                         u32     device;
15435                         u32     rev;
15436                 } ich_chipsets[] = {
15437                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15438                           PCI_ANY_ID },
15439                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15440                           PCI_ANY_ID },
15441                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15442                           0xa },
15443                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15444                           PCI_ANY_ID },
15445                         { },
15446                 };
15447                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15448                 struct pci_dev *bridge = NULL;
15449
15450                 while (pci_id->vendor != 0) {
15451                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15452                                                 bridge);
15453                         if (!bridge) {
15454                                 pci_id++;
15455                                 continue;
15456                         }
15457                         if (pci_id->rev != PCI_ANY_ID) {
15458                                 if (bridge->revision > pci_id->rev)
15459                                         continue;
15460                         }
15461                         if (bridge->subordinate &&
15462                             (bridge->subordinate->number ==
15463                              tp->pdev->bus->number)) {
15464                                 tg3_flag_set(tp, ICH_WORKAROUND);
15465                                 pci_dev_put(bridge);
15466                                 break;
15467                         }
15468                 }
15469         }
15470
15471         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15472                 static struct tg3_dev_id {
15473                         u32     vendor;
15474                         u32     device;
15475                 } bridge_chipsets[] = {
15476                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15477                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15478                         { },
15479                 };
15480                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15481                 struct pci_dev *bridge = NULL;
15482
15483                 while (pci_id->vendor != 0) {
15484                         bridge = pci_get_device(pci_id->vendor,
15485                                                 pci_id->device,
15486                                                 bridge);
15487                         if (!bridge) {
15488                                 pci_id++;
15489                                 continue;
15490                         }
15491                         if (bridge->subordinate &&
15492                             (bridge->subordinate->number <=
15493                              tp->pdev->bus->number) &&
15494                             (bridge->subordinate->busn_res.end >=
15495                              tp->pdev->bus->number)) {
15496                                 tg3_flag_set(tp, 5701_DMA_BUG);
15497                                 pci_dev_put(bridge);
15498                                 break;
15499                         }
15500                 }
15501         }
15502
15503         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15504          * DMA addresses > 40-bit. This bridge may have other additional
15505          * 57xx devices behind it in some 4-port NIC designs for example.
15506          * Any tg3 device found behind the bridge will also need the 40-bit
15507          * DMA workaround.
15508          */
15509         if (tg3_flag(tp, 5780_CLASS)) {
15510                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15511                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15512         } else {
15513                 struct pci_dev *bridge = NULL;
15514
15515                 do {
15516                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15517                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15518                                                 bridge);
15519                         if (bridge && bridge->subordinate &&
15520                             (bridge->subordinate->number <=
15521                              tp->pdev->bus->number) &&
15522                             (bridge->subordinate->busn_res.end >=
15523                              tp->pdev->bus->number)) {
15524                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15525                                 pci_dev_put(bridge);
15526                                 break;
15527                         }
15528                 } while (bridge);
15529         }
15530
15531         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15532             tg3_asic_rev(tp) == ASIC_REV_5714)
15533                 tp->pdev_peer = tg3_find_peer(tp);
15534
15535         /* Determine TSO capabilities */
15536         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15537                 ; /* Do nothing. HW bug. */
15538         else if (tg3_flag(tp, 57765_PLUS))
15539                 tg3_flag_set(tp, HW_TSO_3);
15540         else if (tg3_flag(tp, 5755_PLUS) ||
15541                  tg3_asic_rev(tp) == ASIC_REV_5906)
15542                 tg3_flag_set(tp, HW_TSO_2);
15543         else if (tg3_flag(tp, 5750_PLUS)) {
15544                 tg3_flag_set(tp, HW_TSO_1);
15545                 tg3_flag_set(tp, TSO_BUG);
15546                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15547                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15548                         tg3_flag_clear(tp, TSO_BUG);
15549         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15550                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15551                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15552                 tg3_flag_set(tp, FW_TSO);
15553                 tg3_flag_set(tp, TSO_BUG);
15554                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15555                         tp->fw_needed = FIRMWARE_TG3TSO5;
15556                 else
15557                         tp->fw_needed = FIRMWARE_TG3TSO;
15558         }
15559
15560         /* Selectively allow TSO based on operating conditions */
15561         if (tg3_flag(tp, HW_TSO_1) ||
15562             tg3_flag(tp, HW_TSO_2) ||
15563             tg3_flag(tp, HW_TSO_3) ||
15564             tg3_flag(tp, FW_TSO)) {
15565                 /* For firmware TSO, assume ASF is disabled.
15566                  * We'll disable TSO later if we discover ASF
15567                  * is enabled in tg3_get_eeprom_hw_cfg().
15568                  */
15569                 tg3_flag_set(tp, TSO_CAPABLE);
15570         } else {
15571                 tg3_flag_clear(tp, TSO_CAPABLE);
15572                 tg3_flag_clear(tp, TSO_BUG);
15573                 tp->fw_needed = NULL;
15574         }
15575
15576         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15577                 tp->fw_needed = FIRMWARE_TG3;
15578
15579         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15580                 tp->fw_needed = FIRMWARE_TG357766;
15581
15582         tp->irq_max = 1;
15583
15584         if (tg3_flag(tp, 5750_PLUS)) {
15585                 tg3_flag_set(tp, SUPPORT_MSI);
15586                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15587                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15588                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15589                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15590                      tp->pdev_peer == tp->pdev))
15591                         tg3_flag_clear(tp, SUPPORT_MSI);
15592
15593                 if (tg3_flag(tp, 5755_PLUS) ||
15594                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15595                         tg3_flag_set(tp, 1SHOT_MSI);
15596                 }
15597
15598                 if (tg3_flag(tp, 57765_PLUS)) {
15599                         tg3_flag_set(tp, SUPPORT_MSIX);
15600                         tp->irq_max = TG3_IRQ_MAX_VECS;
15601                 }
15602         }
15603
15604         tp->txq_max = 1;
15605         tp->rxq_max = 1;
15606         if (tp->irq_max > 1) {
15607                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15608                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15609
15610                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15611                     tg3_asic_rev(tp) == ASIC_REV_5720)
15612                         tp->txq_max = tp->irq_max - 1;
15613         }
15614
15615         if (tg3_flag(tp, 5755_PLUS) ||
15616             tg3_asic_rev(tp) == ASIC_REV_5906)
15617                 tg3_flag_set(tp, SHORT_DMA_BUG);
15618
15619         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15620                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15621
15622         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15623             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15624             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15625             tg3_asic_rev(tp) == ASIC_REV_5762)
15626                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15627
15628         if (tg3_flag(tp, 57765_PLUS) &&
15629             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15630                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15631
15632         if (!tg3_flag(tp, 5705_PLUS) ||
15633             tg3_flag(tp, 5780_CLASS) ||
15634             tg3_flag(tp, USE_JUMBO_BDFLAG))
15635                 tg3_flag_set(tp, JUMBO_CAPABLE);
15636
15637         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15638                               &pci_state_reg);
15639
15640         if (pci_is_pcie(tp->pdev)) {
15641                 u16 lnkctl;
15642
15643                 tg3_flag_set(tp, PCI_EXPRESS);
15644
15645                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15646                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15647                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15648                                 tg3_flag_clear(tp, HW_TSO_2);
15649                                 tg3_flag_clear(tp, TSO_CAPABLE);
15650                         }
15651                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15652                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15653                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15654                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15655                                 tg3_flag_set(tp, CLKREQ_BUG);
15656                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15657                         tg3_flag_set(tp, L1PLLPD_EN);
15658                 }
15659         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15660                 /* BCM5785 devices are effectively PCIe devices, and should
15661                  * follow PCIe codepaths, but do not have a PCIe capabilities
15662                  * section.
15663                  */
15664                 tg3_flag_set(tp, PCI_EXPRESS);
15665         } else if (!tg3_flag(tp, 5705_PLUS) ||
15666                    tg3_flag(tp, 5780_CLASS)) {
15667                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15668                 if (!tp->pcix_cap) {
15669                         dev_err(&tp->pdev->dev,
15670                                 "Cannot find PCI-X capability, aborting\n");
15671                         return -EIO;
15672                 }
15673
15674                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15675                         tg3_flag_set(tp, PCIX_MODE);
15676         }
15677
15678         /* If we have an AMD 762 or VIA K8T800 chipset, write
15679          * reordering to the mailbox registers done by the host
15680          * controller can cause major troubles.  We read back from
15681          * every mailbox register write to force the writes to be
15682          * posted to the chip in order.
15683          */
15684         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15685             !tg3_flag(tp, PCI_EXPRESS))
15686                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15687
15688         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15689                              &tp->pci_cacheline_sz);
15690         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15691                              &tp->pci_lat_timer);
15692         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15693             tp->pci_lat_timer < 64) {
15694                 tp->pci_lat_timer = 64;
15695                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15696                                       tp->pci_lat_timer);
15697         }
15698
15699         /* Important! -- It is critical that the PCI-X hw workaround
15700          * situation is decided before the first MMIO register access.
15701          */
15702         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15703                 /* 5700 BX chips need to have their TX producer index
15704                  * mailboxes written twice to workaround a bug.
15705                  */
15706                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15707
15708                 /* If we are in PCI-X mode, enable register write workaround.
15709                  *
15710                  * The workaround is to use indirect register accesses
15711                  * for all chip writes not to mailbox registers.
15712                  */
15713                 if (tg3_flag(tp, PCIX_MODE)) {
15714                         u32 pm_reg;
15715
15716                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15717
15718                         /* The chip can have it's power management PCI config
15719                          * space registers clobbered due to this bug.
15720                          * So explicitly force the chip into D0 here.
15721                          */
15722                         pci_read_config_dword(tp->pdev,
15723                                               tp->pm_cap + PCI_PM_CTRL,
15724                                               &pm_reg);
15725                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15726                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15727                         pci_write_config_dword(tp->pdev,
15728                                                tp->pm_cap + PCI_PM_CTRL,
15729                                                pm_reg);
15730
15731                         /* Also, force SERR#/PERR# in PCI command. */
15732                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15733                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15734                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15735                 }
15736         }
15737
15738         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15739                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15740         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15741                 tg3_flag_set(tp, PCI_32BIT);
15742
15743         /* Chip-specific fixup from Broadcom driver */
15744         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15745             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15746                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15747                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15748         }
15749
15750         /* Default fast path register access methods */
15751         tp->read32 = tg3_read32;
15752         tp->write32 = tg3_write32;
15753         tp->read32_mbox = tg3_read32;
15754         tp->write32_mbox = tg3_write32;
15755         tp->write32_tx_mbox = tg3_write32;
15756         tp->write32_rx_mbox = tg3_write32;
15757
15758         /* Various workaround register access methods */
15759         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15760                 tp->write32 = tg3_write_indirect_reg32;
15761         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15762                  (tg3_flag(tp, PCI_EXPRESS) &&
15763                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15764                 /*
15765                  * Back to back register writes can cause problems on these
15766                  * chips, the workaround is to read back all reg writes
15767                  * except those to mailbox regs.
15768                  *
15769                  * See tg3_write_indirect_reg32().
15770                  */
15771                 tp->write32 = tg3_write_flush_reg32;
15772         }
15773
15774         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15775                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15776                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15777                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15778         }
15779
15780         if (tg3_flag(tp, ICH_WORKAROUND)) {
15781                 tp->read32 = tg3_read_indirect_reg32;
15782                 tp->write32 = tg3_write_indirect_reg32;
15783                 tp->read32_mbox = tg3_read_indirect_mbox;
15784                 tp->write32_mbox = tg3_write_indirect_mbox;
15785                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15786                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15787
15788                 iounmap(tp->regs);
15789                 tp->regs = NULL;
15790
15791                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15792                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15793                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15794         }
15795         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15796                 tp->read32_mbox = tg3_read32_mbox_5906;
15797                 tp->write32_mbox = tg3_write32_mbox_5906;
15798                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15799                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15800         }
15801
15802         if (tp->write32 == tg3_write_indirect_reg32 ||
15803             (tg3_flag(tp, PCIX_MODE) &&
15804              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15805               tg3_asic_rev(tp) == ASIC_REV_5701)))
15806                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15807
15808         /* The memory arbiter has to be enabled in order for SRAM accesses
15809          * to succeed.  Normally on powerup the tg3 chip firmware will make
15810          * sure it is enabled, but other entities such as system netboot
15811          * code might disable it.
15812          */
15813         val = tr32(MEMARB_MODE);
15814         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15815
15816         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15817         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15818             tg3_flag(tp, 5780_CLASS)) {
15819                 if (tg3_flag(tp, PCIX_MODE)) {
15820                         pci_read_config_dword(tp->pdev,
15821                                               tp->pcix_cap + PCI_X_STATUS,
15822                                               &val);
15823                         tp->pci_fn = val & 0x7;
15824                 }
15825         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15826                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15827                    tg3_asic_rev(tp) == ASIC_REV_5720) {
15828                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15829                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15830                         val = tr32(TG3_CPMU_STATUS);
15831
15832                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15833                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15834                 else
15835                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15836                                      TG3_CPMU_STATUS_FSHFT_5719;
15837         }
15838
15839         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15840                 tp->write32_tx_mbox = tg3_write_flush_reg32;
15841                 tp->write32_rx_mbox = tg3_write_flush_reg32;
15842         }
15843
15844         /* Get eeprom hw config before calling tg3_set_power_state().
15845          * In particular, the TG3_FLAG_IS_NIC flag must be
15846          * determined before calling tg3_set_power_state() so that
15847          * we know whether or not to switch out of Vaux power.
15848          * When the flag is set, it means that GPIO1 is used for eeprom
15849          * write protect and also implies that it is a LOM where GPIOs
15850          * are not used to switch power.
15851          */
15852         tg3_get_eeprom_hw_cfg(tp);
15853
15854         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15855                 tg3_flag_clear(tp, TSO_CAPABLE);
15856                 tg3_flag_clear(tp, TSO_BUG);
15857                 tp->fw_needed = NULL;
15858         }
15859
15860         if (tg3_flag(tp, ENABLE_APE)) {
15861                 /* Allow reads and writes to the
15862                  * APE register and memory space.
15863                  */
15864                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15865                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15866                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15867                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15868                                        pci_state_reg);
15869
15870                 tg3_ape_lock_init(tp);
15871         }
15872
15873         /* Set up tp->grc_local_ctrl before calling
15874          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15875          * will bring 5700's external PHY out of reset.
15876          * It is also used as eeprom write protect on LOMs.
15877          */
15878         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15879         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15880             tg3_flag(tp, EEPROM_WRITE_PROT))
15881                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15882                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15883         /* Unused GPIO3 must be driven as output on 5752 because there
15884          * are no pull-up resistors on unused GPIO pins.
15885          */
15886         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15887                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15888
15889         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15890             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15891             tg3_flag(tp, 57765_CLASS))
15892                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15893
15894         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15895             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15896                 /* Turn off the debug UART. */
15897                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15898                 if (tg3_flag(tp, IS_NIC))
15899                         /* Keep VMain power. */
15900                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15901                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15902         }
15903
15904         if (tg3_asic_rev(tp) == ASIC_REV_5762)
15905                 tp->grc_local_ctrl |=
15906                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15907
15908         /* Switch out of Vaux if it is a NIC */
15909         tg3_pwrsrc_switch_to_vmain(tp);
15910
15911         /* Derive initial jumbo mode from MTU assigned in
15912          * ether_setup() via the alloc_etherdev() call
15913          */
15914         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15915                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15916
15917         /* Determine WakeOnLan speed to use. */
15918         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15919             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15920             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15921             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15922                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15923         } else {
15924                 tg3_flag_set(tp, WOL_SPEED_100MB);
15925         }
15926
15927         if (tg3_asic_rev(tp) == ASIC_REV_5906)
15928                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15929
15930         /* A few boards don't want Ethernet@WireSpeed phy feature */
15931         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15932             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15933              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15934              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15935             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15936             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15937                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15938
15939         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15940             tg3_chip_rev(tp) == CHIPREV_5704_AX)
15941                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15942         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15943                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15944
15945         if (tg3_flag(tp, 5705_PLUS) &&
15946             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15947             tg3_asic_rev(tp) != ASIC_REV_5785 &&
15948             tg3_asic_rev(tp) != ASIC_REV_57780 &&
15949             !tg3_flag(tp, 57765_PLUS)) {
15950                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15951                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
15952                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
15953                     tg3_asic_rev(tp) == ASIC_REV_5761) {
15954                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15955                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15956                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15957                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15958                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15959                 } else
15960                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15961         }
15962
15963         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15964             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15965                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15966                 if (tp->phy_otp == 0)
15967                         tp->phy_otp = TG3_OTP_DEFAULT;
15968         }
15969
15970         if (tg3_flag(tp, CPMU_PRESENT))
15971                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15972         else
15973                 tp->mi_mode = MAC_MI_MODE_BASE;
15974
15975         tp->coalesce_mode = 0;
15976         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15977             tg3_chip_rev(tp) != CHIPREV_5700_BX)
15978                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15979
15980         /* Set these bits to enable statistics workaround. */
15981         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15982             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15983             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15984                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15985                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15986         }
15987
15988         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15989             tg3_asic_rev(tp) == ASIC_REV_57780)
15990                 tg3_flag_set(tp, USE_PHYLIB);
15991
15992         err = tg3_mdio_init(tp);
15993         if (err)
15994                 return err;
15995
15996         /* Initialize data/descriptor byte/word swapping. */
15997         val = tr32(GRC_MODE);
15998         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15999             tg3_asic_rev(tp) == ASIC_REV_5762)
16000                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16001                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16002                         GRC_MODE_B2HRX_ENABLE |
16003                         GRC_MODE_HTX2B_ENABLE |
16004                         GRC_MODE_HOST_STACKUP);
16005         else
16006                 val &= GRC_MODE_HOST_STACKUP;
16007
16008         tw32(GRC_MODE, val | tp->grc_mode);
16009
16010         tg3_switch_clocks(tp);
16011
16012         /* Clear this out for sanity. */
16013         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16014
16015         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16016                               &pci_state_reg);
16017         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16018             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16019                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16020                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16021                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16022                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16023                         void __iomem *sram_base;
16024
16025                         /* Write some dummy words into the SRAM status block
16026                          * area, see if it reads back correctly.  If the return
16027                          * value is bad, force enable the PCIX workaround.
16028                          */
16029                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16030
16031                         writel(0x00000000, sram_base);
16032                         writel(0x00000000, sram_base + 4);
16033                         writel(0xffffffff, sram_base + 4);
16034                         if (readl(sram_base) != 0x00000000)
16035                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16036                 }
16037         }
16038
16039         udelay(50);
16040         tg3_nvram_init(tp);
16041
16042         /* If the device has an NVRAM, no need to load patch firmware */
16043         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16044             !tg3_flag(tp, NO_NVRAM))
16045                 tp->fw_needed = NULL;
16046
16047         grc_misc_cfg = tr32(GRC_MISC_CFG);
16048         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16049
16050         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16051             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16052              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16053                 tg3_flag_set(tp, IS_5788);
16054
16055         if (!tg3_flag(tp, IS_5788) &&
16056             tg3_asic_rev(tp) != ASIC_REV_5700)
16057                 tg3_flag_set(tp, TAGGED_STATUS);
16058         if (tg3_flag(tp, TAGGED_STATUS)) {
16059                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16060                                       HOSTCC_MODE_CLRTICK_TXBD);
16061
16062                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16063                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16064                                        tp->misc_host_ctrl);
16065         }
16066
16067         /* Preserve the APE MAC_MODE bits */
16068         if (tg3_flag(tp, ENABLE_APE))
16069                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16070         else
16071                 tp->mac_mode = 0;
16072
16073         if (tg3_10_100_only_device(tp, ent))
16074                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16075
16076         err = tg3_phy_probe(tp);
16077         if (err) {
16078                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16079                 /* ... but do not return immediately ... */
16080                 tg3_mdio_fini(tp);
16081         }
16082
16083         tg3_read_vpd(tp);
16084         tg3_read_fw_ver(tp);
16085
16086         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16087                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16088         } else {
16089                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16090                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16091                 else
16092                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16093         }
16094
16095         /* 5700 {AX,BX} chips have a broken status block link
16096          * change bit implementation, so we must use the
16097          * status register in those cases.
16098          */
16099         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16100                 tg3_flag_set(tp, USE_LINKCHG_REG);
16101         else
16102                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16103
16104         /* The led_ctrl is set during tg3_phy_probe, here we might
16105          * have to force the link status polling mechanism based
16106          * upon subsystem IDs.
16107          */
16108         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16109             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16110             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16111                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16112                 tg3_flag_set(tp, USE_LINKCHG_REG);
16113         }
16114
16115         /* For all SERDES we poll the MAC status register. */
16116         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16117                 tg3_flag_set(tp, POLL_SERDES);
16118         else
16119                 tg3_flag_clear(tp, POLL_SERDES);
16120
16121         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16122         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16123         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16124             tg3_flag(tp, PCIX_MODE)) {
16125                 tp->rx_offset = NET_SKB_PAD;
16126 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16127                 tp->rx_copy_thresh = ~(u16)0;
16128 #endif
16129         }
16130
16131         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16132         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16133         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16134
16135         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16136
16137         /* Increment the rx prod index on the rx std ring by at most
16138          * 8 for these chips to workaround hw errata.
16139          */
16140         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16141             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16142             tg3_asic_rev(tp) == ASIC_REV_5755)
16143                 tp->rx_std_max_post = 8;
16144
16145         if (tg3_flag(tp, ASPM_WORKAROUND))
16146                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16147                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16148
16149         return err;
16150 }
16151
16152 #ifdef CONFIG_SPARC
16153 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16154 {
16155         struct net_device *dev = tp->dev;
16156         struct pci_dev *pdev = tp->pdev;
16157         struct device_node *dp = pci_device_to_OF_node(pdev);
16158         const unsigned char *addr;
16159         int len;
16160
16161         addr = of_get_property(dp, "local-mac-address", &len);
16162         if (addr && len == 6) {
16163                 memcpy(dev->dev_addr, addr, 6);
16164                 return 0;
16165         }
16166         return -ENODEV;
16167 }
16168
16169 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16170 {
16171         struct net_device *dev = tp->dev;
16172
16173         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16174         return 0;
16175 }
16176 #endif
16177
16178 static int tg3_get_device_address(struct tg3 *tp)
16179 {
16180         struct net_device *dev = tp->dev;
16181         u32 hi, lo, mac_offset;
16182         int addr_ok = 0;
16183         int err;
16184
16185 #ifdef CONFIG_SPARC
16186         if (!tg3_get_macaddr_sparc(tp))
16187                 return 0;
16188 #endif
16189
16190         if (tg3_flag(tp, IS_SSB_CORE)) {
16191                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16192                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16193                         return 0;
16194         }
16195
16196         mac_offset = 0x7c;
16197         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16198             tg3_flag(tp, 5780_CLASS)) {
16199                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16200                         mac_offset = 0xcc;
16201                 if (tg3_nvram_lock(tp))
16202                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16203                 else
16204                         tg3_nvram_unlock(tp);
16205         } else if (tg3_flag(tp, 5717_PLUS)) {
16206                 if (tp->pci_fn & 1)
16207                         mac_offset = 0xcc;
16208                 if (tp->pci_fn > 1)
16209                         mac_offset += 0x18c;
16210         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16211                 mac_offset = 0x10;
16212
16213         /* First try to get it from MAC address mailbox. */
16214         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16215         if ((hi >> 16) == 0x484b) {
16216                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16217                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16218
16219                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16220                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16221                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16222                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16223                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16224
16225                 /* Some old bootcode may report a 0 MAC address in SRAM */
16226                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16227         }
16228         if (!addr_ok) {
16229                 /* Next, try NVRAM. */
16230                 if (!tg3_flag(tp, NO_NVRAM) &&
16231                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16232                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16233                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16234                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16235                 }
16236                 /* Finally just fetch it out of the MAC control regs. */
16237                 else {
16238                         hi = tr32(MAC_ADDR_0_HIGH);
16239                         lo = tr32(MAC_ADDR_0_LOW);
16240
16241                         dev->dev_addr[5] = lo & 0xff;
16242                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16243                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16244                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16245                         dev->dev_addr[1] = hi & 0xff;
16246                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16247                 }
16248         }
16249
16250         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16251 #ifdef CONFIG_SPARC
16252                 if (!tg3_get_default_macaddr_sparc(tp))
16253                         return 0;
16254 #endif
16255                 return -EINVAL;
16256         }
16257         return 0;
16258 }
16259
16260 #define BOUNDARY_SINGLE_CACHELINE       1
16261 #define BOUNDARY_MULTI_CACHELINE        2
16262
16263 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16264 {
16265         int cacheline_size;
16266         u8 byte;
16267         int goal;
16268
16269         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16270         if (byte == 0)
16271                 cacheline_size = 1024;
16272         else
16273                 cacheline_size = (int) byte * 4;
16274
16275         /* On 5703 and later chips, the boundary bits have no
16276          * effect.
16277          */
16278         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16279             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16280             !tg3_flag(tp, PCI_EXPRESS))
16281                 goto out;
16282
16283 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16284         goal = BOUNDARY_MULTI_CACHELINE;
16285 #else
16286 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16287         goal = BOUNDARY_SINGLE_CACHELINE;
16288 #else
16289         goal = 0;
16290 #endif
16291 #endif
16292
16293         if (tg3_flag(tp, 57765_PLUS)) {
16294                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16295                 goto out;
16296         }
16297
16298         if (!goal)
16299                 goto out;
16300
16301         /* PCI controllers on most RISC systems tend to disconnect
16302          * when a device tries to burst across a cache-line boundary.
16303          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16304          *
16305          * Unfortunately, for PCI-E there are only limited
16306          * write-side controls for this, and thus for reads
16307          * we will still get the disconnects.  We'll also waste
16308          * these PCI cycles for both read and write for chips
16309          * other than 5700 and 5701 which do not implement the
16310          * boundary bits.
16311          */
16312         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16313                 switch (cacheline_size) {
16314                 case 16:
16315                 case 32:
16316                 case 64:
16317                 case 128:
16318                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16319                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16320                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16321                         } else {
16322                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16323                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16324                         }
16325                         break;
16326
16327                 case 256:
16328                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16329                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16330                         break;
16331
16332                 default:
16333                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16334                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16335                         break;
16336                 }
16337         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16338                 switch (cacheline_size) {
16339                 case 16:
16340                 case 32:
16341                 case 64:
16342                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16343                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16344                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16345                                 break;
16346                         }
16347                         /* fallthrough */
16348                 case 128:
16349                 default:
16350                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16351                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16352                         break;
16353                 }
16354         } else {
16355                 switch (cacheline_size) {
16356                 case 16:
16357                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16358                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16359                                         DMA_RWCTRL_WRITE_BNDRY_16);
16360                                 break;
16361                         }
16362                         /* fallthrough */
16363                 case 32:
16364                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16365                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16366                                         DMA_RWCTRL_WRITE_BNDRY_32);
16367                                 break;
16368                         }
16369                         /* fallthrough */
16370                 case 64:
16371                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16372                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16373                                         DMA_RWCTRL_WRITE_BNDRY_64);
16374                                 break;
16375                         }
16376                         /* fallthrough */
16377                 case 128:
16378                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16379                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16380                                         DMA_RWCTRL_WRITE_BNDRY_128);
16381                                 break;
16382                         }
16383                         /* fallthrough */
16384                 case 256:
16385                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16386                                 DMA_RWCTRL_WRITE_BNDRY_256);
16387                         break;
16388                 case 512:
16389                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16390                                 DMA_RWCTRL_WRITE_BNDRY_512);
16391                         break;
16392                 case 1024:
16393                 default:
16394                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16395                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16396                         break;
16397                 }
16398         }
16399
16400 out:
16401         return val;
16402 }
16403
16404 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16405                            int size, int to_device)
16406 {
16407         struct tg3_internal_buffer_desc test_desc;
16408         u32 sram_dma_descs;
16409         int i, ret;
16410
16411         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16412
16413         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16414         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16415         tw32(RDMAC_STATUS, 0);
16416         tw32(WDMAC_STATUS, 0);
16417
16418         tw32(BUFMGR_MODE, 0);
16419         tw32(FTQ_RESET, 0);
16420
16421         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16422         test_desc.addr_lo = buf_dma & 0xffffffff;
16423         test_desc.nic_mbuf = 0x00002100;
16424         test_desc.len = size;
16425
16426         /*
16427          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16428          * the *second* time the tg3 driver was getting loaded after an
16429          * initial scan.
16430          *
16431          * Broadcom tells me:
16432          *   ...the DMA engine is connected to the GRC block and a DMA
16433          *   reset may affect the GRC block in some unpredictable way...
16434          *   The behavior of resets to individual blocks has not been tested.
16435          *
16436          * Broadcom noted the GRC reset will also reset all sub-components.
16437          */
16438         if (to_device) {
16439                 test_desc.cqid_sqid = (13 << 8) | 2;
16440
16441                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16442                 udelay(40);
16443         } else {
16444                 test_desc.cqid_sqid = (16 << 8) | 7;
16445
16446                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16447                 udelay(40);
16448         }
16449         test_desc.flags = 0x00000005;
16450
16451         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16452                 u32 val;
16453
16454                 val = *(((u32 *)&test_desc) + i);
16455                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16456                                        sram_dma_descs + (i * sizeof(u32)));
16457                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16458         }
16459         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16460
16461         if (to_device)
16462                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16463         else
16464                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16465
16466         ret = -ENODEV;
16467         for (i = 0; i < 40; i++) {
16468                 u32 val;
16469
16470                 if (to_device)
16471                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16472                 else
16473                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16474                 if ((val & 0xffff) == sram_dma_descs) {
16475                         ret = 0;
16476                         break;
16477                 }
16478
16479                 udelay(100);
16480         }
16481
16482         return ret;
16483 }
16484
16485 #define TEST_BUFFER_SIZE        0x2000
16486
16487 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16488         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16489         { },
16490 };
16491
16492 static int tg3_test_dma(struct tg3 *tp)
16493 {
16494         dma_addr_t buf_dma;
16495         u32 *buf, saved_dma_rwctrl;
16496         int ret = 0;
16497
16498         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16499                                  &buf_dma, GFP_KERNEL);
16500         if (!buf) {
16501                 ret = -ENOMEM;
16502                 goto out_nofree;
16503         }
16504
16505         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16506                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16507
16508         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16509
16510         if (tg3_flag(tp, 57765_PLUS))
16511                 goto out;
16512
16513         if (tg3_flag(tp, PCI_EXPRESS)) {
16514                 /* DMA read watermark not used on PCIE */
16515                 tp->dma_rwctrl |= 0x00180000;
16516         } else if (!tg3_flag(tp, PCIX_MODE)) {
16517                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16518                     tg3_asic_rev(tp) == ASIC_REV_5750)
16519                         tp->dma_rwctrl |= 0x003f0000;
16520                 else
16521                         tp->dma_rwctrl |= 0x003f000f;
16522         } else {
16523                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16524                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16525                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16526                         u32 read_water = 0x7;
16527
16528                         /* If the 5704 is behind the EPB bridge, we can
16529                          * do the less restrictive ONE_DMA workaround for
16530                          * better performance.
16531                          */
16532                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16533                             tg3_asic_rev(tp) == ASIC_REV_5704)
16534                                 tp->dma_rwctrl |= 0x8000;
16535                         else if (ccval == 0x6 || ccval == 0x7)
16536                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16537
16538                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16539                                 read_water = 4;
16540                         /* Set bit 23 to enable PCIX hw bug fix */
16541                         tp->dma_rwctrl |=
16542                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16543                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16544                                 (1 << 23);
16545                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16546                         /* 5780 always in PCIX mode */
16547                         tp->dma_rwctrl |= 0x00144000;
16548                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16549                         /* 5714 always in PCIX mode */
16550                         tp->dma_rwctrl |= 0x00148000;
16551                 } else {
16552                         tp->dma_rwctrl |= 0x001b000f;
16553                 }
16554         }
16555         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16556                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16557
16558         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16559             tg3_asic_rev(tp) == ASIC_REV_5704)
16560                 tp->dma_rwctrl &= 0xfffffff0;
16561
16562         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16563             tg3_asic_rev(tp) == ASIC_REV_5701) {
16564                 /* Remove this if it causes problems for some boards. */
16565                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16566
16567                 /* On 5700/5701 chips, we need to set this bit.
16568                  * Otherwise the chip will issue cacheline transactions
16569                  * to streamable DMA memory with not all the byte
16570                  * enables turned on.  This is an error on several
16571                  * RISC PCI controllers, in particular sparc64.
16572                  *
16573                  * On 5703/5704 chips, this bit has been reassigned
16574                  * a different meaning.  In particular, it is used
16575                  * on those chips to enable a PCI-X workaround.
16576                  */
16577                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16578         }
16579
16580         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16581
16582 #if 0
16583         /* Unneeded, already done by tg3_get_invariants.  */
16584         tg3_switch_clocks(tp);
16585 #endif
16586
16587         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16588             tg3_asic_rev(tp) != ASIC_REV_5701)
16589                 goto out;
16590
16591         /* It is best to perform DMA test with maximum write burst size
16592          * to expose the 5700/5701 write DMA bug.
16593          */
16594         saved_dma_rwctrl = tp->dma_rwctrl;
16595         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16596         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16597
16598         while (1) {
16599                 u32 *p = buf, i;
16600
16601                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16602                         p[i] = i;
16603
16604                 /* Send the buffer to the chip. */
16605                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16606                 if (ret) {
16607                         dev_err(&tp->pdev->dev,
16608                                 "%s: Buffer write failed. err = %d\n",
16609                                 __func__, ret);
16610                         break;
16611                 }
16612
16613 #if 0
16614                 /* validate data reached card RAM correctly. */
16615                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16616                         u32 val;
16617                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16618                         if (le32_to_cpu(val) != p[i]) {
16619                                 dev_err(&tp->pdev->dev,
16620                                         "%s: Buffer corrupted on device! "
16621                                         "(%d != %d)\n", __func__, val, i);
16622                                 /* ret = -ENODEV here? */
16623                         }
16624                         p[i] = 0;
16625                 }
16626 #endif
16627                 /* Now read it back. */
16628                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16629                 if (ret) {
16630                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16631                                 "err = %d\n", __func__, ret);
16632                         break;
16633                 }
16634
16635                 /* Verify it. */
16636                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16637                         if (p[i] == i)
16638                                 continue;
16639
16640                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16641                             DMA_RWCTRL_WRITE_BNDRY_16) {
16642                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16643                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16644                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16645                                 break;
16646                         } else {
16647                                 dev_err(&tp->pdev->dev,
16648                                         "%s: Buffer corrupted on read back! "
16649                                         "(%d != %d)\n", __func__, p[i], i);
16650                                 ret = -ENODEV;
16651                                 goto out;
16652                         }
16653                 }
16654
16655                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16656                         /* Success. */
16657                         ret = 0;
16658                         break;
16659                 }
16660         }
16661         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16662             DMA_RWCTRL_WRITE_BNDRY_16) {
16663                 /* DMA test passed without adjusting DMA boundary,
16664                  * now look for chipsets that are known to expose the
16665                  * DMA bug without failing the test.
16666                  */
16667                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16668                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16669                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16670                 } else {
16671                         /* Safe to use the calculated DMA boundary. */
16672                         tp->dma_rwctrl = saved_dma_rwctrl;
16673                 }
16674
16675                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16676         }
16677
16678 out:
16679         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16680 out_nofree:
16681         return ret;
16682 }
16683
16684 static void tg3_init_bufmgr_config(struct tg3 *tp)
16685 {
16686         if (tg3_flag(tp, 57765_PLUS)) {
16687                 tp->bufmgr_config.mbuf_read_dma_low_water =
16688                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16689                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16690                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16691                 tp->bufmgr_config.mbuf_high_water =
16692                         DEFAULT_MB_HIGH_WATER_57765;
16693
16694                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16695                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16696                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16697                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16698                 tp->bufmgr_config.mbuf_high_water_jumbo =
16699                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16700         } else if (tg3_flag(tp, 5705_PLUS)) {
16701                 tp->bufmgr_config.mbuf_read_dma_low_water =
16702                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16703                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16704                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16705                 tp->bufmgr_config.mbuf_high_water =
16706                         DEFAULT_MB_HIGH_WATER_5705;
16707                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16708                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16709                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16710                         tp->bufmgr_config.mbuf_high_water =
16711                                 DEFAULT_MB_HIGH_WATER_5906;
16712                 }
16713
16714                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16715                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16716                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16717                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16718                 tp->bufmgr_config.mbuf_high_water_jumbo =
16719                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16720         } else {
16721                 tp->bufmgr_config.mbuf_read_dma_low_water =
16722                         DEFAULT_MB_RDMA_LOW_WATER;
16723                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16724                         DEFAULT_MB_MACRX_LOW_WATER;
16725                 tp->bufmgr_config.mbuf_high_water =
16726                         DEFAULT_MB_HIGH_WATER;
16727
16728                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16729                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16730                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16731                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16732                 tp->bufmgr_config.mbuf_high_water_jumbo =
16733                         DEFAULT_MB_HIGH_WATER_JUMBO;
16734         }
16735
16736         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16737         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16738 }
16739
16740 static char *tg3_phy_string(struct tg3 *tp)
16741 {
16742         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16743         case TG3_PHY_ID_BCM5400:        return "5400";
16744         case TG3_PHY_ID_BCM5401:        return "5401";
16745         case TG3_PHY_ID_BCM5411:        return "5411";
16746         case TG3_PHY_ID_BCM5701:        return "5701";
16747         case TG3_PHY_ID_BCM5703:        return "5703";
16748         case TG3_PHY_ID_BCM5704:        return "5704";
16749         case TG3_PHY_ID_BCM5705:        return "5705";
16750         case TG3_PHY_ID_BCM5750:        return "5750";
16751         case TG3_PHY_ID_BCM5752:        return "5752";
16752         case TG3_PHY_ID_BCM5714:        return "5714";
16753         case TG3_PHY_ID_BCM5780:        return "5780";
16754         case TG3_PHY_ID_BCM5755:        return "5755";
16755         case TG3_PHY_ID_BCM5787:        return "5787";
16756         case TG3_PHY_ID_BCM5784:        return "5784";
16757         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16758         case TG3_PHY_ID_BCM5906:        return "5906";
16759         case TG3_PHY_ID_BCM5761:        return "5761";
16760         case TG3_PHY_ID_BCM5718C:       return "5718C";
16761         case TG3_PHY_ID_BCM5718S:       return "5718S";
16762         case TG3_PHY_ID_BCM57765:       return "57765";
16763         case TG3_PHY_ID_BCM5719C:       return "5719C";
16764         case TG3_PHY_ID_BCM5720C:       return "5720C";
16765         case TG3_PHY_ID_BCM5762:        return "5762C";
16766         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16767         case 0:                 return "serdes";
16768         default:                return "unknown";
16769         }
16770 }
16771
16772 static char *tg3_bus_string(struct tg3 *tp, char *str)
16773 {
16774         if (tg3_flag(tp, PCI_EXPRESS)) {
16775                 strcpy(str, "PCI Express");
16776                 return str;
16777         } else if (tg3_flag(tp, PCIX_MODE)) {
16778                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16779
16780                 strcpy(str, "PCIX:");
16781
16782                 if ((clock_ctrl == 7) ||
16783                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16784                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16785                         strcat(str, "133MHz");
16786                 else if (clock_ctrl == 0)
16787                         strcat(str, "33MHz");
16788                 else if (clock_ctrl == 2)
16789                         strcat(str, "50MHz");
16790                 else if (clock_ctrl == 4)
16791                         strcat(str, "66MHz");
16792                 else if (clock_ctrl == 6)
16793                         strcat(str, "100MHz");
16794         } else {
16795                 strcpy(str, "PCI:");
16796                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16797                         strcat(str, "66MHz");
16798                 else
16799                         strcat(str, "33MHz");
16800         }
16801         if (tg3_flag(tp, PCI_32BIT))
16802                 strcat(str, ":32-bit");
16803         else
16804                 strcat(str, ":64-bit");
16805         return str;
16806 }
16807
16808 static void tg3_init_coal(struct tg3 *tp)
16809 {
16810         struct ethtool_coalesce *ec = &tp->coal;
16811
16812         memset(ec, 0, sizeof(*ec));
16813         ec->cmd = ETHTOOL_GCOALESCE;
16814         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16815         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16816         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16817         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16818         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16819         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16820         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16821         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16822         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16823
16824         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16825                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16826                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16827                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16828                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16829                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16830         }
16831
16832         if (tg3_flag(tp, 5705_PLUS)) {
16833                 ec->rx_coalesce_usecs_irq = 0;
16834                 ec->tx_coalesce_usecs_irq = 0;
16835                 ec->stats_block_coalesce_usecs = 0;
16836         }
16837 }
16838
16839 static int tg3_init_one(struct pci_dev *pdev,
16840                                   const struct pci_device_id *ent)
16841 {
16842         struct net_device *dev;
16843         struct tg3 *tp;
16844         int i, err, pm_cap;
16845         u32 sndmbx, rcvmbx, intmbx;
16846         char str[40];
16847         u64 dma_mask, persist_dma_mask;
16848         netdev_features_t features = 0;
16849
16850         printk_once(KERN_INFO "%s\n", version);
16851
16852         err = pci_enable_device(pdev);
16853         if (err) {
16854                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16855                 return err;
16856         }
16857
16858         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16859         if (err) {
16860                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16861                 goto err_out_disable_pdev;
16862         }
16863
16864         pci_set_master(pdev);
16865
16866         /* Find power-management capability. */
16867         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16868         if (pm_cap == 0) {
16869                 dev_err(&pdev->dev,
16870                         "Cannot find Power Management capability, aborting\n");
16871                 err = -EIO;
16872                 goto err_out_free_res;
16873         }
16874
16875         err = pci_set_power_state(pdev, PCI_D0);
16876         if (err) {
16877                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16878                 goto err_out_free_res;
16879         }
16880
16881         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16882         if (!dev) {
16883                 err = -ENOMEM;
16884                 goto err_out_power_down;
16885         }
16886
16887         SET_NETDEV_DEV(dev, &pdev->dev);
16888
16889         tp = netdev_priv(dev);
16890         tp->pdev = pdev;
16891         tp->dev = dev;
16892         tp->pm_cap = pm_cap;
16893         tp->rx_mode = TG3_DEF_RX_MODE;
16894         tp->tx_mode = TG3_DEF_TX_MODE;
16895         tp->irq_sync = 1;
16896
16897         if (tg3_debug > 0)
16898                 tp->msg_enable = tg3_debug;
16899         else
16900                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16901
16902         if (pdev_is_ssb_gige_core(pdev)) {
16903                 tg3_flag_set(tp, IS_SSB_CORE);
16904                 if (ssb_gige_must_flush_posted_writes(pdev))
16905                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16906                 if (ssb_gige_one_dma_at_once(pdev))
16907                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16908                 if (ssb_gige_have_roboswitch(pdev))
16909                         tg3_flag_set(tp, ROBOSWITCH);
16910                 if (ssb_gige_is_rgmii(pdev))
16911                         tg3_flag_set(tp, RGMII_MODE);
16912         }
16913
16914         /* The word/byte swap controls here control register access byte
16915          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16916          * setting below.
16917          */
16918         tp->misc_host_ctrl =
16919                 MISC_HOST_CTRL_MASK_PCI_INT |
16920                 MISC_HOST_CTRL_WORD_SWAP |
16921                 MISC_HOST_CTRL_INDIR_ACCESS |
16922                 MISC_HOST_CTRL_PCISTATE_RW;
16923
16924         /* The NONFRM (non-frame) byte/word swap controls take effect
16925          * on descriptor entries, anything which isn't packet data.
16926          *
16927          * The StrongARM chips on the board (one for tx, one for rx)
16928          * are running in big-endian mode.
16929          */
16930         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16931                         GRC_MODE_WSWAP_NONFRM_DATA);
16932 #ifdef __BIG_ENDIAN
16933         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16934 #endif
16935         spin_lock_init(&tp->lock);
16936         spin_lock_init(&tp->indirect_lock);
16937         INIT_WORK(&tp->reset_task, tg3_reset_task);
16938
16939         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16940         if (!tp->regs) {
16941                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16942                 err = -ENOMEM;
16943                 goto err_out_free_dev;
16944         }
16945
16946         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16947             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16948             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16949             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16950             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16951             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16952             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16953             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16954             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16955             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16956             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16957             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16958                 tg3_flag_set(tp, ENABLE_APE);
16959                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16960                 if (!tp->aperegs) {
16961                         dev_err(&pdev->dev,
16962                                 "Cannot map APE registers, aborting\n");
16963                         err = -ENOMEM;
16964                         goto err_out_iounmap;
16965                 }
16966         }
16967
16968         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16969         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16970
16971         dev->ethtool_ops = &tg3_ethtool_ops;
16972         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16973         dev->netdev_ops = &tg3_netdev_ops;
16974         dev->irq = pdev->irq;
16975
16976         err = tg3_get_invariants(tp, ent);
16977         if (err) {
16978                 dev_err(&pdev->dev,
16979                         "Problem fetching invariants of chip, aborting\n");
16980                 goto err_out_apeunmap;
16981         }
16982
16983         /* The EPB bridge inside 5714, 5715, and 5780 and any
16984          * device behind the EPB cannot support DMA addresses > 40-bit.
16985          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16986          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16987          * do DMA address check in tg3_start_xmit().
16988          */
16989         if (tg3_flag(tp, IS_5788))
16990                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16991         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16992                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16993 #ifdef CONFIG_HIGHMEM
16994                 dma_mask = DMA_BIT_MASK(64);
16995 #endif
16996         } else
16997                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16998
16999         /* Configure DMA attributes. */
17000         if (dma_mask > DMA_BIT_MASK(32)) {
17001                 err = pci_set_dma_mask(pdev, dma_mask);
17002                 if (!err) {
17003                         features |= NETIF_F_HIGHDMA;
17004                         err = pci_set_consistent_dma_mask(pdev,
17005                                                           persist_dma_mask);
17006                         if (err < 0) {
17007                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17008                                         "DMA for consistent allocations\n");
17009                                 goto err_out_apeunmap;
17010                         }
17011                 }
17012         }
17013         if (err || dma_mask == DMA_BIT_MASK(32)) {
17014                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17015                 if (err) {
17016                         dev_err(&pdev->dev,
17017                                 "No usable DMA configuration, aborting\n");
17018                         goto err_out_apeunmap;
17019                 }
17020         }
17021
17022         tg3_init_bufmgr_config(tp);
17023
17024         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
17025
17026         /* 5700 B0 chips do not support checksumming correctly due
17027          * to hardware bugs.
17028          */
17029         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17030                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17031
17032                 if (tg3_flag(tp, 5755_PLUS))
17033                         features |= NETIF_F_IPV6_CSUM;
17034         }
17035
17036         /* TSO is on by default on chips that support hardware TSO.
17037          * Firmware TSO on older chips gives lower performance, so it
17038          * is off by default, but can be enabled using ethtool.
17039          */
17040         if ((tg3_flag(tp, HW_TSO_1) ||
17041              tg3_flag(tp, HW_TSO_2) ||
17042              tg3_flag(tp, HW_TSO_3)) &&
17043             (features & NETIF_F_IP_CSUM))
17044                 features |= NETIF_F_TSO;
17045         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17046                 if (features & NETIF_F_IPV6_CSUM)
17047                         features |= NETIF_F_TSO6;
17048                 if (tg3_flag(tp, HW_TSO_3) ||
17049                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17050                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17051                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17052                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17053                     tg3_asic_rev(tp) == ASIC_REV_57780)
17054                         features |= NETIF_F_TSO_ECN;
17055         }
17056
17057         dev->features |= features;
17058         dev->vlan_features |= features;
17059
17060         /*
17061          * Add loopback capability only for a subset of devices that support
17062          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17063          * loopback for the remaining devices.
17064          */
17065         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17066             !tg3_flag(tp, CPMU_PRESENT))
17067                 /* Add the loopback capability */
17068                 features |= NETIF_F_LOOPBACK;
17069
17070         dev->hw_features |= features;
17071
17072         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17073             !tg3_flag(tp, TSO_CAPABLE) &&
17074             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17075                 tg3_flag_set(tp, MAX_RXPEND_64);
17076                 tp->rx_pending = 63;
17077         }
17078
17079         err = tg3_get_device_address(tp);
17080         if (err) {
17081                 dev_err(&pdev->dev,
17082                         "Could not obtain valid ethernet address, aborting\n");
17083                 goto err_out_apeunmap;
17084         }
17085
17086         /*
17087          * Reset chip in case UNDI or EFI driver did not shutdown
17088          * DMA self test will enable WDMAC and we'll see (spurious)
17089          * pending DMA on the PCI bus at that point.
17090          */
17091         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17092             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17093                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17094                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17095         }
17096
17097         err = tg3_test_dma(tp);
17098         if (err) {
17099                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17100                 goto err_out_apeunmap;
17101         }
17102
17103         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17104         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17105         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17106         for (i = 0; i < tp->irq_max; i++) {
17107                 struct tg3_napi *tnapi = &tp->napi[i];
17108
17109                 tnapi->tp = tp;
17110                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17111
17112                 tnapi->int_mbox = intmbx;
17113                 if (i <= 4)
17114                         intmbx += 0x8;
17115                 else
17116                         intmbx += 0x4;
17117
17118                 tnapi->consmbox = rcvmbx;
17119                 tnapi->prodmbox = sndmbx;
17120
17121                 if (i)
17122                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17123                 else
17124                         tnapi->coal_now = HOSTCC_MODE_NOW;
17125
17126                 if (!tg3_flag(tp, SUPPORT_MSIX))
17127                         break;
17128
17129                 /*
17130                  * If we support MSIX, we'll be using RSS.  If we're using
17131                  * RSS, the first vector only handles link interrupts and the
17132                  * remaining vectors handle rx and tx interrupts.  Reuse the
17133                  * mailbox values for the next iteration.  The values we setup
17134                  * above are still useful for the single vectored mode.
17135                  */
17136                 if (!i)
17137                         continue;
17138
17139                 rcvmbx += 0x8;
17140
17141                 if (sndmbx & 0x4)
17142                         sndmbx -= 0x4;
17143                 else
17144                         sndmbx += 0xc;
17145         }
17146
17147         tg3_init_coal(tp);
17148
17149         pci_set_drvdata(pdev, dev);
17150
17151         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17152             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17153             tg3_asic_rev(tp) == ASIC_REV_5762)
17154                 tg3_flag_set(tp, PTP_CAPABLE);
17155
17156         if (tg3_flag(tp, 5717_PLUS)) {
17157                 /* Resume a low-power mode */
17158                 tg3_frob_aux_power(tp, false);
17159         }
17160
17161         tg3_timer_init(tp);
17162
17163         tg3_carrier_off(tp);
17164
17165         err = register_netdev(dev);
17166         if (err) {
17167                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17168                 goto err_out_apeunmap;
17169         }
17170
17171         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17172                     tp->board_part_number,
17173                     tg3_chip_rev_id(tp),
17174                     tg3_bus_string(tp, str),
17175                     dev->dev_addr);
17176
17177         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17178                 struct phy_device *phydev;
17179                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17180                 netdev_info(dev,
17181                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17182                             phydev->drv->name, dev_name(&phydev->dev));
17183         } else {
17184                 char *ethtype;
17185
17186                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17187                         ethtype = "10/100Base-TX";
17188                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17189                         ethtype = "1000Base-SX";
17190                 else
17191                         ethtype = "10/100/1000Base-T";
17192
17193                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17194                             "(WireSpeed[%d], EEE[%d])\n",
17195                             tg3_phy_string(tp), ethtype,
17196                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17197                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17198         }
17199
17200         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17201                     (dev->features & NETIF_F_RXCSUM) != 0,
17202                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17203                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17204                     tg3_flag(tp, ENABLE_ASF) != 0,
17205                     tg3_flag(tp, TSO_CAPABLE) != 0);
17206         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17207                     tp->dma_rwctrl,
17208                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17209                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17210
17211         pci_save_state(pdev);
17212
17213         return 0;
17214
17215 err_out_apeunmap:
17216         if (tp->aperegs) {
17217                 iounmap(tp->aperegs);
17218                 tp->aperegs = NULL;
17219         }
17220
17221 err_out_iounmap:
17222         if (tp->regs) {
17223                 iounmap(tp->regs);
17224                 tp->regs = NULL;
17225         }
17226
17227 err_out_free_dev:
17228         free_netdev(dev);
17229
17230 err_out_power_down:
17231         pci_set_power_state(pdev, PCI_D3hot);
17232
17233 err_out_free_res:
17234         pci_release_regions(pdev);
17235
17236 err_out_disable_pdev:
17237         pci_disable_device(pdev);
17238         pci_set_drvdata(pdev, NULL);
17239         return err;
17240 }
17241
17242 static void tg3_remove_one(struct pci_dev *pdev)
17243 {
17244         struct net_device *dev = pci_get_drvdata(pdev);
17245
17246         if (dev) {
17247                 struct tg3 *tp = netdev_priv(dev);
17248
17249                 release_firmware(tp->fw);
17250
17251                 tg3_reset_task_cancel(tp);
17252
17253                 if (tg3_flag(tp, USE_PHYLIB)) {
17254                         tg3_phy_fini(tp);
17255                         tg3_mdio_fini(tp);
17256                 }
17257
17258                 unregister_netdev(dev);
17259                 if (tp->aperegs) {
17260                         iounmap(tp->aperegs);
17261                         tp->aperegs = NULL;
17262                 }
17263                 if (tp->regs) {
17264                         iounmap(tp->regs);
17265                         tp->regs = NULL;
17266                 }
17267                 free_netdev(dev);
17268                 pci_release_regions(pdev);
17269                 pci_disable_device(pdev);
17270                 pci_set_drvdata(pdev, NULL);
17271         }
17272 }
17273
17274 #ifdef CONFIG_PM_SLEEP
17275 static int tg3_suspend(struct device *device)
17276 {
17277         struct pci_dev *pdev = to_pci_dev(device);
17278         struct net_device *dev = pci_get_drvdata(pdev);
17279         struct tg3 *tp = netdev_priv(dev);
17280         int err;
17281
17282         if (!netif_running(dev))
17283                 return 0;
17284
17285         tg3_reset_task_cancel(tp);
17286         tg3_phy_stop(tp);
17287         tg3_netif_stop(tp);
17288
17289         tg3_timer_stop(tp);
17290
17291         tg3_full_lock(tp, 1);
17292         tg3_disable_ints(tp);
17293         tg3_full_unlock(tp);
17294
17295         netif_device_detach(dev);
17296
17297         tg3_full_lock(tp, 0);
17298         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17299         tg3_flag_clear(tp, INIT_COMPLETE);
17300         tg3_full_unlock(tp);
17301
17302         err = tg3_power_down_prepare(tp);
17303         if (err) {
17304                 int err2;
17305
17306                 tg3_full_lock(tp, 0);
17307
17308                 tg3_flag_set(tp, INIT_COMPLETE);
17309                 err2 = tg3_restart_hw(tp, 1);
17310                 if (err2)
17311                         goto out;
17312
17313                 tg3_timer_start(tp);
17314
17315                 netif_device_attach(dev);
17316                 tg3_netif_start(tp);
17317
17318 out:
17319                 tg3_full_unlock(tp);
17320
17321                 if (!err2)
17322                         tg3_phy_start(tp);
17323         }
17324
17325         return err;
17326 }
17327
17328 static int tg3_resume(struct device *device)
17329 {
17330         struct pci_dev *pdev = to_pci_dev(device);
17331         struct net_device *dev = pci_get_drvdata(pdev);
17332         struct tg3 *tp = netdev_priv(dev);
17333         int err;
17334
17335         if (!netif_running(dev))
17336                 return 0;
17337
17338         netif_device_attach(dev);
17339
17340         tg3_full_lock(tp, 0);
17341
17342         tg3_flag_set(tp, INIT_COMPLETE);
17343         err = tg3_restart_hw(tp,
17344                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17345         if (err)
17346                 goto out;
17347
17348         tg3_timer_start(tp);
17349
17350         tg3_netif_start(tp);
17351
17352 out:
17353         tg3_full_unlock(tp);
17354
17355         if (!err)
17356                 tg3_phy_start(tp);
17357
17358         return err;
17359 }
17360
17361 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17362 #define TG3_PM_OPS (&tg3_pm_ops)
17363
17364 #else
17365
17366 #define TG3_PM_OPS NULL
17367
17368 #endif /* CONFIG_PM_SLEEP */
17369
17370 /**
17371  * tg3_io_error_detected - called when PCI error is detected
17372  * @pdev: Pointer to PCI device
17373  * @state: The current pci connection state
17374  *
17375  * This function is called after a PCI bus error affecting
17376  * this device has been detected.
17377  */
17378 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17379                                               pci_channel_state_t state)
17380 {
17381         struct net_device *netdev = pci_get_drvdata(pdev);
17382         struct tg3 *tp = netdev_priv(netdev);
17383         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17384
17385         netdev_info(netdev, "PCI I/O error detected\n");
17386
17387         rtnl_lock();
17388
17389         if (!netif_running(netdev))
17390                 goto done;
17391
17392         tg3_phy_stop(tp);
17393
17394         tg3_netif_stop(tp);
17395
17396         tg3_timer_stop(tp);
17397
17398         /* Want to make sure that the reset task doesn't run */
17399         tg3_reset_task_cancel(tp);
17400
17401         netif_device_detach(netdev);
17402
17403         /* Clean up software state, even if MMIO is blocked */
17404         tg3_full_lock(tp, 0);
17405         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17406         tg3_full_unlock(tp);
17407
17408 done:
17409         if (state == pci_channel_io_perm_failure)
17410                 err = PCI_ERS_RESULT_DISCONNECT;
17411         else
17412                 pci_disable_device(pdev);
17413
17414         rtnl_unlock();
17415
17416         return err;
17417 }
17418
17419 /**
17420  * tg3_io_slot_reset - called after the pci bus has been reset.
17421  * @pdev: Pointer to PCI device
17422  *
17423  * Restart the card from scratch, as if from a cold-boot.
17424  * At this point, the card has exprienced a hard reset,
17425  * followed by fixups by BIOS, and has its config space
17426  * set up identically to what it was at cold boot.
17427  */
17428 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17429 {
17430         struct net_device *netdev = pci_get_drvdata(pdev);
17431         struct tg3 *tp = netdev_priv(netdev);
17432         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17433         int err;
17434
17435         rtnl_lock();
17436
17437         if (pci_enable_device(pdev)) {
17438                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17439                 goto done;
17440         }
17441
17442         pci_set_master(pdev);
17443         pci_restore_state(pdev);
17444         pci_save_state(pdev);
17445
17446         if (!netif_running(netdev)) {
17447                 rc = PCI_ERS_RESULT_RECOVERED;
17448                 goto done;
17449         }
17450
17451         err = tg3_power_up(tp);
17452         if (err)
17453                 goto done;
17454
17455         rc = PCI_ERS_RESULT_RECOVERED;
17456
17457 done:
17458         rtnl_unlock();
17459
17460         return rc;
17461 }
17462
17463 /**
17464  * tg3_io_resume - called when traffic can start flowing again.
17465  * @pdev: Pointer to PCI device
17466  *
17467  * This callback is called when the error recovery driver tells
17468  * us that its OK to resume normal operation.
17469  */
17470 static void tg3_io_resume(struct pci_dev *pdev)
17471 {
17472         struct net_device *netdev = pci_get_drvdata(pdev);
17473         struct tg3 *tp = netdev_priv(netdev);
17474         int err;
17475
17476         rtnl_lock();
17477
17478         if (!netif_running(netdev))
17479                 goto done;
17480
17481         tg3_full_lock(tp, 0);
17482         tg3_flag_set(tp, INIT_COMPLETE);
17483         err = tg3_restart_hw(tp, 1);
17484         if (err) {
17485                 tg3_full_unlock(tp);
17486                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17487                 goto done;
17488         }
17489
17490         netif_device_attach(netdev);
17491
17492         tg3_timer_start(tp);
17493
17494         tg3_netif_start(tp);
17495
17496         tg3_full_unlock(tp);
17497
17498         tg3_phy_start(tp);
17499
17500 done:
17501         rtnl_unlock();
17502 }
17503
17504 static const struct pci_error_handlers tg3_err_handler = {
17505         .error_detected = tg3_io_error_detected,
17506         .slot_reset     = tg3_io_slot_reset,
17507         .resume         = tg3_io_resume
17508 };
17509
17510 static struct pci_driver tg3_driver = {
17511         .name           = DRV_MODULE_NAME,
17512         .id_table       = tg3_pci_tbl,
17513         .probe          = tg3_init_one,
17514         .remove         = tg3_remove_one,
17515         .err_handler    = &tg3_err_handler,
17516         .driver.pm      = TG3_PM_OPS,
17517 };
17518
17519 static int __init tg3_init(void)
17520 {
17521         return pci_register_driver(&tg3_driver);
17522 }
17523
17524 static void __exit tg3_cleanup(void)
17525 {
17526         pci_unregister_driver(&tg3_driver);
17527 }
17528
17529 module_init(tg3_init);
17530 module_exit(tg3_cleanup);