Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     133
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "Jul 29, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 if (pci_channel_offline(tp->pdev))
748                         break;
749
750                 udelay(10);
751         }
752
753         if (status != bit) {
754                 /* Revoke the lock request. */
755                 tg3_ape_write32(tp, gnt + off, bit);
756                 ret = -EBUSY;
757         }
758
759         return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764         u32 gnt, bit;
765
766         if (!tg3_flag(tp, ENABLE_APE))
767                 return;
768
769         switch (locknum) {
770         case TG3_APE_LOCK_GPIO:
771                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772                         return;
773         case TG3_APE_LOCK_GRC:
774         case TG3_APE_LOCK_MEM:
775                 if (!tp->pci_fn)
776                         bit = APE_LOCK_GRANT_DRIVER;
777                 else
778                         bit = 1 << tp->pci_fn;
779                 break;
780         case TG3_APE_LOCK_PHY0:
781         case TG3_APE_LOCK_PHY1:
782         case TG3_APE_LOCK_PHY2:
783         case TG3_APE_LOCK_PHY3:
784                 bit = APE_LOCK_GRANT_DRIVER;
785                 break;
786         default:
787                 return;
788         }
789
790         if (tg3_asic_rev(tp) == ASIC_REV_5761)
791                 gnt = TG3_APE_LOCK_GRANT;
792         else
793                 gnt = TG3_APE_PER_LOCK_GRANT;
794
795         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800         u32 apedata;
801
802         while (timeout_us) {
803                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804                         return -EBUSY;
805
806                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808                         break;
809
810                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812                 udelay(10);
813                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814         }
815
816         return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821         u32 i, apedata;
822
823         for (i = 0; i < timeout_us / 10; i++) {
824                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827                         break;
828
829                 udelay(10);
830         }
831
832         return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836                                    u32 len)
837 {
838         int err;
839         u32 i, bufoff, msgoff, maxlen, apedata;
840
841         if (!tg3_flag(tp, APE_HAS_NCSI))
842                 return 0;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845         if (apedata != APE_SEG_SIG_MAGIC)
846                 return -ENODEV;
847
848         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849         if (!(apedata & APE_FW_STATUS_READY))
850                 return -EAGAIN;
851
852         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853                  TG3_APE_SHMEM_BASE;
854         msgoff = bufoff + 2 * sizeof(u32);
855         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857         while (len) {
858                 u32 length;
859
860                 /* Cap xfer sizes to scratchpad limits. */
861                 length = (len > maxlen) ? maxlen : len;
862                 len -= length;
863
864                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865                 if (!(apedata & APE_FW_STATUS_READY))
866                         return -EAGAIN;
867
868                 /* Wait for up to 1 msec for APE to service previous event. */
869                 err = tg3_ape_event_lock(tp, 1000);
870                 if (err)
871                         return err;
872
873                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874                           APE_EVENT_STATUS_SCRTCHPD_READ |
875                           APE_EVENT_STATUS_EVENT_PENDING;
876                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878                 tg3_ape_write32(tp, bufoff, base_off);
879                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884                 base_off += length;
885
886                 if (tg3_ape_wait_for_event(tp, 30000))
887                         return -EAGAIN;
888
889                 for (i = 0; length; i += 4, length -= 4) {
890                         u32 val = tg3_ape_read32(tp, msgoff + i);
891                         memcpy(data, &val, sizeof(u32));
892                         data++;
893                 }
894         }
895
896         return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901         int err;
902         u32 apedata;
903
904         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905         if (apedata != APE_SEG_SIG_MAGIC)
906                 return -EAGAIN;
907
908         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909         if (!(apedata & APE_FW_STATUS_READY))
910                 return -EAGAIN;
911
912         /* Wait for up to 1 millisecond for APE to service previous event. */
913         err = tg3_ape_event_lock(tp, 1000);
914         if (err)
915                 return err;
916
917         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918                         event | APE_EVENT_STATUS_EVENT_PENDING);
919
920         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923         return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928         u32 event;
929         u32 apedata;
930
931         if (!tg3_flag(tp, ENABLE_APE))
932                 return;
933
934         switch (kind) {
935         case RESET_KIND_INIT:
936                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937                                 APE_HOST_SEG_SIG_MAGIC);
938                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939                                 APE_HOST_SEG_LEN_MAGIC);
940                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945                                 APE_HOST_BEHAV_NO_PHYLOCK);
946                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947                                     TG3_APE_HOST_DRVR_STATE_START);
948
949                 event = APE_EVENT_STATUS_STATE_START;
950                 break;
951         case RESET_KIND_SHUTDOWN:
952                 /* With the interface we are currently using,
953                  * APE does not track driver state.  Wiping
954                  * out the HOST SEGMENT SIGNATURE forces
955                  * the APE to assume OS absent status.
956                  */
957                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959                 if (device_may_wakeup(&tp->pdev->dev) &&
960                     tg3_flag(tp, WOL_ENABLE)) {
961                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962                                             TG3_APE_HOST_WOL_SPEED_AUTO);
963                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964                 } else
965                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969                 event = APE_EVENT_STATUS_STATE_UNLOAD;
970                 break;
971         default:
972                 return;
973         }
974
975         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976
977         tg3_ape_send_event(tp, event);
978 }
979
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tw32(TG3PCI_MISC_HOST_CTRL,
985              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986         for (i = 0; i < tp->irq_max; i++)
987                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992         int i;
993
994         tp->irq_sync = 0;
995         wmb();
996
997         tw32(TG3PCI_MISC_HOST_CTRL,
998              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999
1000         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001         for (i = 0; i < tp->irq_cnt; i++) {
1002                 struct tg3_napi *tnapi = &tp->napi[i];
1003
1004                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005                 if (tg3_flag(tp, 1SHOT_MSI))
1006                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007
1008                 tp->coal_now |= tnapi->coal_now;
1009         }
1010
1011         /* Force an initial interrupt */
1012         if (!tg3_flag(tp, TAGGED_STATUS) &&
1013             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015         else
1016                 tw32(HOSTCC_MODE, tp->coal_now);
1017
1018         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023         struct tg3 *tp = tnapi->tp;
1024         struct tg3_hw_status *sblk = tnapi->hw_status;
1025         unsigned int work_exists = 0;
1026
1027         /* check for phy events */
1028         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029                 if (sblk->status & SD_STATUS_LINK_CHG)
1030                         work_exists = 1;
1031         }
1032
1033         /* check for TX work to do */
1034         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035                 work_exists = 1;
1036
1037         /* check for RX work to do */
1038         if (tnapi->rx_rcb_prod_idx &&
1039             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040                 work_exists = 1;
1041
1042         return work_exists;
1043 }
1044
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052         struct tg3 *tp = tnapi->tp;
1053
1054         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055         mmiowb();
1056
1057         /* When doing tagged status, this work check is unnecessary.
1058          * The last_tag we write above tells the chip which piece of
1059          * work we've completed.
1060          */
1061         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068         u32 clock_ctrl;
1069         u32 orig_clock_ctrl;
1070
1071         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072                 return;
1073
1074         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075
1076         orig_clock_ctrl = clock_ctrl;
1077         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078                        CLOCK_CTRL_CLKRUN_OENABLE |
1079                        0x1f);
1080         tp->pci_clock_ctrl = clock_ctrl;
1081
1082         if (tg3_flag(tp, 5705_PLUS)) {
1083                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086                 }
1087         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089                             clock_ctrl |
1090                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091                             40);
1092                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094                             40);
1095         }
1096         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098
1099 #define PHY_BUSY_LOOPS  5000
1100
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102                          u32 *val)
1103 {
1104         u32 frame_val;
1105         unsigned int loops;
1106         int ret;
1107
1108         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109                 tw32_f(MAC_MI_MODE,
1110                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111                 udelay(80);
1112         }
1113
1114         tg3_ape_lock(tp, tp->phy_ape_lock);
1115
1116         *val = 0x0;
1117
1118         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119                       MI_COM_PHY_ADDR_MASK);
1120         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121                       MI_COM_REG_ADDR_MASK);
1122         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123
1124         tw32_f(MAC_MI_COM, frame_val);
1125
1126         loops = PHY_BUSY_LOOPS;
1127         while (loops != 0) {
1128                 udelay(10);
1129                 frame_val = tr32(MAC_MI_COM);
1130
1131                 if ((frame_val & MI_COM_BUSY) == 0) {
1132                         udelay(5);
1133                         frame_val = tr32(MAC_MI_COM);
1134                         break;
1135                 }
1136                 loops -= 1;
1137         }
1138
1139         ret = -EBUSY;
1140         if (loops != 0) {
1141                 *val = frame_val & MI_COM_DATA_MASK;
1142                 ret = 0;
1143         }
1144
1145         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147                 udelay(80);
1148         }
1149
1150         tg3_ape_unlock(tp, tp->phy_ape_lock);
1151
1152         return ret;
1153 }
1154
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161                           u32 val)
1162 {
1163         u32 frame_val;
1164         unsigned int loops;
1165         int ret;
1166
1167         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169                 return 0;
1170
1171         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172                 tw32_f(MAC_MI_MODE,
1173                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174                 udelay(80);
1175         }
1176
1177         tg3_ape_lock(tp, tp->phy_ape_lock);
1178
1179         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180                       MI_COM_PHY_ADDR_MASK);
1181         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182                       MI_COM_REG_ADDR_MASK);
1183         frame_val |= (val & MI_COM_DATA_MASK);
1184         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185
1186         tw32_f(MAC_MI_COM, frame_val);
1187
1188         loops = PHY_BUSY_LOOPS;
1189         while (loops != 0) {
1190                 udelay(10);
1191                 frame_val = tr32(MAC_MI_COM);
1192                 if ((frame_val & MI_COM_BUSY) == 0) {
1193                         udelay(5);
1194                         frame_val = tr32(MAC_MI_COM);
1195                         break;
1196                 }
1197                 loops -= 1;
1198         }
1199
1200         ret = -EBUSY;
1201         if (loops != 0)
1202                 ret = 0;
1203
1204         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206                 udelay(80);
1207         }
1208
1209         tg3_ape_unlock(tp, tp->phy_ape_lock);
1210
1211         return ret;
1212 }
1213
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221         int err;
1222
1223         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224         if (err)
1225                 goto done;
1226
1227         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228         if (err)
1229                 goto done;
1230
1231         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233         if (err)
1234                 goto done;
1235
1236         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237
1238 done:
1239         return err;
1240 }
1241
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244         int err;
1245
1246         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251         if (err)
1252                 goto done;
1253
1254         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256         if (err)
1257                 goto done;
1258
1259         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260
1261 done:
1262         return err;
1263 }
1264
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270         if (!err)
1271                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1294         if (!err)
1295                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296
1297         return err;
1298 }
1299
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303                 set |= MII_TG3_AUXCTL_MISC_WREN;
1304
1305         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310         u32 val;
1311         int err;
1312
1313         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314
1315         if (err)
1316                 return err;
1317
1318         if (enable)
1319                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320         else
1321                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322
1323         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325
1326         return err;
1327 }
1328
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331         u32 phy_control;
1332         int limit, err;
1333
1334         /* OK, reset it, and poll the BMCR_RESET bit until it
1335          * clears or we time out.
1336          */
1337         phy_control = BMCR_RESET;
1338         err = tg3_writephy(tp, MII_BMCR, phy_control);
1339         if (err != 0)
1340                 return -EBUSY;
1341
1342         limit = 5000;
1343         while (limit--) {
1344                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345                 if (err != 0)
1346                         return -EBUSY;
1347
1348                 if ((phy_control & BMCR_RESET) == 0) {
1349                         udelay(40);
1350                         break;
1351                 }
1352                 udelay(10);
1353         }
1354         if (limit < 0)
1355                 return -EBUSY;
1356
1357         return 0;
1358 }
1359
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362         struct tg3 *tp = bp->priv;
1363         u32 val;
1364
1365         spin_lock_bh(&tp->lock);
1366
1367         if (tg3_readphy(tp, reg, &val))
1368                 val = -EIO;
1369
1370         spin_unlock_bh(&tp->lock);
1371
1372         return val;
1373 }
1374
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377         struct tg3 *tp = bp->priv;
1378         u32 ret = 0;
1379
1380         spin_lock_bh(&tp->lock);
1381
1382         if (tg3_writephy(tp, reg, val))
1383                 ret = -EIO;
1384
1385         spin_unlock_bh(&tp->lock);
1386
1387         return ret;
1388 }
1389
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392         return 0;
1393 }
1394
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397         u32 val;
1398         struct phy_device *phydev;
1399
1400         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402         case PHY_ID_BCM50610:
1403         case PHY_ID_BCM50610M:
1404                 val = MAC_PHYCFG2_50610_LED_MODES;
1405                 break;
1406         case PHY_ID_BCMAC131:
1407                 val = MAC_PHYCFG2_AC131_LED_MODES;
1408                 break;
1409         case PHY_ID_RTL8211C:
1410                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8201E:
1413                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414                 break;
1415         default:
1416                 return;
1417         }
1418
1419         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420                 tw32(MAC_PHYCFG2, val);
1421
1422                 val = tr32(MAC_PHYCFG1);
1423                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426                 tw32(MAC_PHYCFG1, val);
1427
1428                 return;
1429         }
1430
1431         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433                        MAC_PHYCFG2_FMODE_MASK_MASK |
1434                        MAC_PHYCFG2_GMODE_MASK_MASK |
1435                        MAC_PHYCFG2_ACT_MASK_MASK   |
1436                        MAC_PHYCFG2_QUAL_MASK_MASK |
1437                        MAC_PHYCFG2_INBAND_ENABLE;
1438
1439         tw32(MAC_PHYCFG2, val);
1440
1441         val = tr32(MAC_PHYCFG1);
1442         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449         }
1450         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452         tw32(MAC_PHYCFG1, val);
1453
1454         val = tr32(MAC_EXT_RGMII_MODE);
1455         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456                  MAC_RGMII_MODE_RX_QUALITY |
1457                  MAC_RGMII_MODE_RX_ACTIVITY |
1458                  MAC_RGMII_MODE_RX_ENG_DET |
1459                  MAC_RGMII_MODE_TX_ENABLE |
1460                  MAC_RGMII_MODE_TX_LOWPWR |
1461                  MAC_RGMII_MODE_TX_RESET);
1462         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464                         val |= MAC_RGMII_MODE_RX_INT_B |
1465                                MAC_RGMII_MODE_RX_QUALITY |
1466                                MAC_RGMII_MODE_RX_ACTIVITY |
1467                                MAC_RGMII_MODE_RX_ENG_DET;
1468                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469                         val |= MAC_RGMII_MODE_TX_ENABLE |
1470                                MAC_RGMII_MODE_TX_LOWPWR |
1471                                MAC_RGMII_MODE_TX_RESET;
1472         }
1473         tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479         tw32_f(MAC_MI_MODE, tp->mi_mode);
1480         udelay(80);
1481
1482         if (tg3_flag(tp, MDIOBUS_INITED) &&
1483             tg3_asic_rev(tp) == ASIC_REV_5785)
1484                 tg3_mdio_config_5785(tp);
1485 }
1486
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489         int i;
1490         u32 reg;
1491         struct phy_device *phydev;
1492
1493         if (tg3_flag(tp, 5717_PLUS)) {
1494                 u32 is_serdes;
1495
1496                 tp->phy_addr = tp->pci_fn + 1;
1497
1498                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500                 else
1501                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1503                 if (is_serdes)
1504                         tp->phy_addr += 7;
1505         } else
1506                 tp->phy_addr = TG3_PHY_MII_ADDR;
1507
1508         tg3_mdio_start(tp);
1509
1510         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511                 return 0;
1512
1513         tp->mdio_bus = mdiobus_alloc();
1514         if (tp->mdio_bus == NULL)
1515                 return -ENOMEM;
1516
1517         tp->mdio_bus->name     = "tg3 mdio bus";
1518         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520         tp->mdio_bus->priv     = tp;
1521         tp->mdio_bus->parent   = &tp->pdev->dev;
1522         tp->mdio_bus->read     = &tg3_mdio_read;
1523         tp->mdio_bus->write    = &tg3_mdio_write;
1524         tp->mdio_bus->reset    = &tg3_mdio_reset;
1525         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527
1528         for (i = 0; i < PHY_MAX_ADDR; i++)
1529                 tp->mdio_bus->irq[i] = PHY_POLL;
1530
1531         /* The bus registration will look for all the PHYs on the mdio bus.
1532          * Unfortunately, it does not ensure the PHY is powered up before
1533          * accessing the PHY ID registers.  A chip reset is the
1534          * quickest way to bring the device back to an operational state..
1535          */
1536         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537                 tg3_bmcr_reset(tp);
1538
1539         i = mdiobus_register(tp->mdio_bus);
1540         if (i) {
1541                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542                 mdiobus_free(tp->mdio_bus);
1543                 return i;
1544         }
1545
1546         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547
1548         if (!phydev || !phydev->drv) {
1549                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550                 mdiobus_unregister(tp->mdio_bus);
1551                 mdiobus_free(tp->mdio_bus);
1552                 return -ENODEV;
1553         }
1554
1555         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556         case PHY_ID_BCM57780:
1557                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559                 break;
1560         case PHY_ID_BCM50610:
1561         case PHY_ID_BCM50610M:
1562                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563                                      PHY_BRCM_RX_REFCLK_UNUSED |
1564                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572                 /* fallthru */
1573         case PHY_ID_RTL8211C:
1574                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575                 break;
1576         case PHY_ID_RTL8201E:
1577         case PHY_ID_BCMAC131:
1578                 phydev->interface = PHY_INTERFACE_MODE_MII;
1579                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581                 break;
1582         }
1583
1584         tg3_flag_set(tp, MDIOBUS_INITED);
1585
1586         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587                 tg3_mdio_config_5785(tp);
1588
1589         return 0;
1590 }
1591
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594         if (tg3_flag(tp, MDIOBUS_INITED)) {
1595                 tg3_flag_clear(tp, MDIOBUS_INITED);
1596                 mdiobus_unregister(tp->mdio_bus);
1597                 mdiobus_free(tp->mdio_bus);
1598         }
1599 }
1600
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604         u32 val;
1605
1606         val = tr32(GRC_RX_CPU_EVENT);
1607         val |= GRC_RX_CPU_DRIVER_EVENT;
1608         tw32_f(GRC_RX_CPU_EVENT, val);
1609
1610         tp->last_event_jiffies = jiffies;
1611 }
1612
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618         int i;
1619         unsigned int delay_cnt;
1620         long time_remain;
1621
1622         /* If enough time has passed, no wait is necessary. */
1623         time_remain = (long)(tp->last_event_jiffies + 1 +
1624                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625                       (long)jiffies;
1626         if (time_remain < 0)
1627                 return;
1628
1629         /* Check if we can shorten the wait time. */
1630         delay_cnt = jiffies_to_usecs(time_remain);
1631         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633         delay_cnt = (delay_cnt >> 3) + 1;
1634
1635         for (i = 0; i < delay_cnt; i++) {
1636                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637                         break;
1638                 if (pci_channel_offline(tp->pdev))
1639                         break;
1640
1641                 udelay(8);
1642         }
1643 }
1644
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1647 {
1648         u32 reg, val;
1649
1650         val = 0;
1651         if (!tg3_readphy(tp, MII_BMCR, &reg))
1652                 val = reg << 16;
1653         if (!tg3_readphy(tp, MII_BMSR, &reg))
1654                 val |= (reg & 0xffff);
1655         *data++ = val;
1656
1657         val = 0;
1658         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1659                 val = reg << 16;
1660         if (!tg3_readphy(tp, MII_LPA, &reg))
1661                 val |= (reg & 0xffff);
1662         *data++ = val;
1663
1664         val = 0;
1665         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1666                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1667                         val = reg << 16;
1668                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1669                         val |= (reg & 0xffff);
1670         }
1671         *data++ = val;
1672
1673         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1674                 val = reg << 16;
1675         else
1676                 val = 0;
1677         *data++ = val;
1678 }
1679
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3 *tp)
1682 {
1683         u32 data[4];
1684
1685         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1686                 return;
1687
1688         tg3_phy_gather_ump_data(tp, data);
1689
1690         tg3_wait_for_event_ack(tp);
1691
1692         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1693         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1694         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1695         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1696         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1697         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1698
1699         tg3_generate_fw_event(tp);
1700 }
1701
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3 *tp)
1704 {
1705         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1706                 /* Wait for RX cpu to ACK the previous event. */
1707                 tg3_wait_for_event_ack(tp);
1708
1709                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1710
1711                 tg3_generate_fw_event(tp);
1712
1713                 /* Wait for RX cpu to ACK this event. */
1714                 tg3_wait_for_event_ack(tp);
1715         }
1716 }
1717
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1720 {
1721         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1722                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1723
1724         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1725                 switch (kind) {
1726                 case RESET_KIND_INIT:
1727                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728                                       DRV_STATE_START);
1729                         break;
1730
1731                 case RESET_KIND_SHUTDOWN:
1732                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733                                       DRV_STATE_UNLOAD);
1734                         break;
1735
1736                 case RESET_KIND_SUSPEND:
1737                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738                                       DRV_STATE_SUSPEND);
1739                         break;
1740
1741                 default:
1742                         break;
1743                 }
1744         }
1745 }
1746
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751                 switch (kind) {
1752                 case RESET_KIND_INIT:
1753                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754                                       DRV_STATE_START_DONE);
1755                         break;
1756
1757                 case RESET_KIND_SHUTDOWN:
1758                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759                                       DRV_STATE_UNLOAD_DONE);
1760                         break;
1761
1762                 default:
1763                         break;
1764                 }
1765         }
1766 }
1767
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1770 {
1771         if (tg3_flag(tp, ENABLE_ASF)) {
1772                 switch (kind) {
1773                 case RESET_KIND_INIT:
1774                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775                                       DRV_STATE_START);
1776                         break;
1777
1778                 case RESET_KIND_SHUTDOWN:
1779                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780                                       DRV_STATE_UNLOAD);
1781                         break;
1782
1783                 case RESET_KIND_SUSPEND:
1784                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785                                       DRV_STATE_SUSPEND);
1786                         break;
1787
1788                 default:
1789                         break;
1790                 }
1791         }
1792 }
1793
1794 static int tg3_poll_fw(struct tg3 *tp)
1795 {
1796         int i;
1797         u32 val;
1798
1799         if (tg3_flag(tp, NO_FWARE_REPORTED))
1800                 return 0;
1801
1802         if (tg3_flag(tp, IS_SSB_CORE)) {
1803                 /* We don't use firmware. */
1804                 return 0;
1805         }
1806
1807         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808                 /* Wait up to 20ms for init done. */
1809                 for (i = 0; i < 200; i++) {
1810                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811                                 return 0;
1812                         if (pci_channel_offline(tp->pdev))
1813                                 return -ENODEV;
1814
1815                         udelay(100);
1816                 }
1817                 return -ENODEV;
1818         }
1819
1820         /* Wait for firmware initialization to complete. */
1821         for (i = 0; i < 100000; i++) {
1822                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1823                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1824                         break;
1825                 if (pci_channel_offline(tp->pdev)) {
1826                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1827                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1828                                 netdev_info(tp->dev, "No firmware running\n");
1829                         }
1830
1831                         break;
1832                 }
1833
1834                 udelay(10);
1835         }
1836
1837         /* Chip might not be fitted with firmware.  Some Sun onboard
1838          * parts are configured like that.  So don't signal the timeout
1839          * of the above loop as an error, but do report the lack of
1840          * running firmware once.
1841          */
1842         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1843                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1844
1845                 netdev_info(tp->dev, "No firmware running\n");
1846         }
1847
1848         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1849                 /* The 57765 A0 needs a little more
1850                  * time to do some important work.
1851                  */
1852                 mdelay(10);
1853         }
1854
1855         return 0;
1856 }
1857
1858 static void tg3_link_report(struct tg3 *tp)
1859 {
1860         if (!netif_carrier_ok(tp->dev)) {
1861                 netif_info(tp, link, tp->dev, "Link is down\n");
1862                 tg3_ump_link_report(tp);
1863         } else if (netif_msg_link(tp)) {
1864                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1865                             (tp->link_config.active_speed == SPEED_1000 ?
1866                              1000 :
1867                              (tp->link_config.active_speed == SPEED_100 ?
1868                               100 : 10)),
1869                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1870                              "full" : "half"));
1871
1872                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1873                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1874                             "on" : "off",
1875                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1876                             "on" : "off");
1877
1878                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1879                         netdev_info(tp->dev, "EEE is %s\n",
1880                                     tp->setlpicnt ? "enabled" : "disabled");
1881
1882                 tg3_ump_link_report(tp);
1883         }
1884
1885         tp->link_up = netif_carrier_ok(tp->dev);
1886 }
1887
1888 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1889 {
1890         u32 flowctrl = 0;
1891
1892         if (adv & ADVERTISE_PAUSE_CAP) {
1893                 flowctrl |= FLOW_CTRL_RX;
1894                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1895                         flowctrl |= FLOW_CTRL_TX;
1896         } else if (adv & ADVERTISE_PAUSE_ASYM)
1897                 flowctrl |= FLOW_CTRL_TX;
1898
1899         return flowctrl;
1900 }
1901
1902 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1903 {
1904         u16 miireg;
1905
1906         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1907                 miireg = ADVERTISE_1000XPAUSE;
1908         else if (flow_ctrl & FLOW_CTRL_TX)
1909                 miireg = ADVERTISE_1000XPSE_ASYM;
1910         else if (flow_ctrl & FLOW_CTRL_RX)
1911                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1912         else
1913                 miireg = 0;
1914
1915         return miireg;
1916 }
1917
1918 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1919 {
1920         u32 flowctrl = 0;
1921
1922         if (adv & ADVERTISE_1000XPAUSE) {
1923                 flowctrl |= FLOW_CTRL_RX;
1924                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1925                         flowctrl |= FLOW_CTRL_TX;
1926         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1927                 flowctrl |= FLOW_CTRL_TX;
1928
1929         return flowctrl;
1930 }
1931
1932 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1933 {
1934         u8 cap = 0;
1935
1936         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1937                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1938         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1939                 if (lcladv & ADVERTISE_1000XPAUSE)
1940                         cap = FLOW_CTRL_RX;
1941                 if (rmtadv & ADVERTISE_1000XPAUSE)
1942                         cap = FLOW_CTRL_TX;
1943         }
1944
1945         return cap;
1946 }
1947
1948 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1949 {
1950         u8 autoneg;
1951         u8 flowctrl = 0;
1952         u32 old_rx_mode = tp->rx_mode;
1953         u32 old_tx_mode = tp->tx_mode;
1954
1955         if (tg3_flag(tp, USE_PHYLIB))
1956                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1957         else
1958                 autoneg = tp->link_config.autoneg;
1959
1960         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1961                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1962                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1963                 else
1964                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1965         } else
1966                 flowctrl = tp->link_config.flowctrl;
1967
1968         tp->link_config.active_flowctrl = flowctrl;
1969
1970         if (flowctrl & FLOW_CTRL_RX)
1971                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1972         else
1973                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1974
1975         if (old_rx_mode != tp->rx_mode)
1976                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1977
1978         if (flowctrl & FLOW_CTRL_TX)
1979                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1980         else
1981                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1982
1983         if (old_tx_mode != tp->tx_mode)
1984                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1985 }
1986
1987 static void tg3_adjust_link(struct net_device *dev)
1988 {
1989         u8 oldflowctrl, linkmesg = 0;
1990         u32 mac_mode, lcl_adv, rmt_adv;
1991         struct tg3 *tp = netdev_priv(dev);
1992         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1993
1994         spin_lock_bh(&tp->lock);
1995
1996         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1997                                     MAC_MODE_HALF_DUPLEX);
1998
1999         oldflowctrl = tp->link_config.active_flowctrl;
2000
2001         if (phydev->link) {
2002                 lcl_adv = 0;
2003                 rmt_adv = 0;
2004
2005                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2006                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2007                 else if (phydev->speed == SPEED_1000 ||
2008                          tg3_asic_rev(tp) != ASIC_REV_5785)
2009                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2010                 else
2011                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2012
2013                 if (phydev->duplex == DUPLEX_HALF)
2014                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2015                 else {
2016                         lcl_adv = mii_advertise_flowctrl(
2017                                   tp->link_config.flowctrl);
2018
2019                         if (phydev->pause)
2020                                 rmt_adv = LPA_PAUSE_CAP;
2021                         if (phydev->asym_pause)
2022                                 rmt_adv |= LPA_PAUSE_ASYM;
2023                 }
2024
2025                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2026         } else
2027                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028
2029         if (mac_mode != tp->mac_mode) {
2030                 tp->mac_mode = mac_mode;
2031                 tw32_f(MAC_MODE, tp->mac_mode);
2032                 udelay(40);
2033         }
2034
2035         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2036                 if (phydev->speed == SPEED_10)
2037                         tw32(MAC_MI_STAT,
2038                              MAC_MI_STAT_10MBPS_MODE |
2039                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2040                 else
2041                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2042         }
2043
2044         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2045                 tw32(MAC_TX_LENGTHS,
2046                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2047                       (6 << TX_LENGTHS_IPG_SHIFT) |
2048                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2049         else
2050                 tw32(MAC_TX_LENGTHS,
2051                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052                       (6 << TX_LENGTHS_IPG_SHIFT) |
2053                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054
2055         if (phydev->link != tp->old_link ||
2056             phydev->speed != tp->link_config.active_speed ||
2057             phydev->duplex != tp->link_config.active_duplex ||
2058             oldflowctrl != tp->link_config.active_flowctrl)
2059                 linkmesg = 1;
2060
2061         tp->old_link = phydev->link;
2062         tp->link_config.active_speed = phydev->speed;
2063         tp->link_config.active_duplex = phydev->duplex;
2064
2065         spin_unlock_bh(&tp->lock);
2066
2067         if (linkmesg)
2068                 tg3_link_report(tp);
2069 }
2070
2071 static int tg3_phy_init(struct tg3 *tp)
2072 {
2073         struct phy_device *phydev;
2074
2075         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2076                 return 0;
2077
2078         /* Bring the PHY back to a known state. */
2079         tg3_bmcr_reset(tp);
2080
2081         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2082
2083         /* Attach the MAC to the PHY. */
2084         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2085                              tg3_adjust_link, phydev->interface);
2086         if (IS_ERR(phydev)) {
2087                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2088                 return PTR_ERR(phydev);
2089         }
2090
2091         /* Mask with MAC supported features. */
2092         switch (phydev->interface) {
2093         case PHY_INTERFACE_MODE_GMII:
2094         case PHY_INTERFACE_MODE_RGMII:
2095                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2096                         phydev->supported &= (PHY_GBIT_FEATURES |
2097                                               SUPPORTED_Pause |
2098                                               SUPPORTED_Asym_Pause);
2099                         break;
2100                 }
2101                 /* fallthru */
2102         case PHY_INTERFACE_MODE_MII:
2103                 phydev->supported &= (PHY_BASIC_FEATURES |
2104                                       SUPPORTED_Pause |
2105                                       SUPPORTED_Asym_Pause);
2106                 break;
2107         default:
2108                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2109                 return -EINVAL;
2110         }
2111
2112         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2113
2114         phydev->advertising = phydev->supported;
2115
2116         return 0;
2117 }
2118
2119 static void tg3_phy_start(struct tg3 *tp)
2120 {
2121         struct phy_device *phydev;
2122
2123         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2124                 return;
2125
2126         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2127
2128         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2130                 phydev->speed = tp->link_config.speed;
2131                 phydev->duplex = tp->link_config.duplex;
2132                 phydev->autoneg = tp->link_config.autoneg;
2133                 phydev->advertising = tp->link_config.advertising;
2134         }
2135
2136         phy_start(phydev);
2137
2138         phy_start_aneg(phydev);
2139 }
2140
2141 static void tg3_phy_stop(struct tg3 *tp)
2142 {
2143         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144                 return;
2145
2146         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2147 }
2148
2149 static void tg3_phy_fini(struct tg3 *tp)
2150 {
2151         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2153                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2154         }
2155 }
2156
2157 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2158 {
2159         int err;
2160         u32 val;
2161
2162         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2163                 return 0;
2164
2165         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2166                 /* Cannot do read-modify-write on 5401 */
2167                 err = tg3_phy_auxctl_write(tp,
2168                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2169                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2170                                            0x4c20);
2171                 goto done;
2172         }
2173
2174         err = tg3_phy_auxctl_read(tp,
2175                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2176         if (err)
2177                 return err;
2178
2179         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2180         err = tg3_phy_auxctl_write(tp,
2181                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2182
2183 done:
2184         return err;
2185 }
2186
2187 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2188 {
2189         u32 phytest;
2190
2191         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2192                 u32 phy;
2193
2194                 tg3_writephy(tp, MII_TG3_FET_TEST,
2195                              phytest | MII_TG3_FET_SHADOW_EN);
2196                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2197                         if (enable)
2198                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199                         else
2200                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2201                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2202                 }
2203                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2204         }
2205 }
2206
2207 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2208 {
2209         u32 reg;
2210
2211         if (!tg3_flag(tp, 5705_PLUS) ||
2212             (tg3_flag(tp, 5717_PLUS) &&
2213              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214                 return;
2215
2216         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2217                 tg3_phy_fet_toggle_apd(tp, enable);
2218                 return;
2219         }
2220
2221         reg = MII_TG3_MISC_SHDW_WREN |
2222               MII_TG3_MISC_SHDW_SCR5_SEL |
2223               MII_TG3_MISC_SHDW_SCR5_LPED |
2224               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225               MII_TG3_MISC_SHDW_SCR5_SDTL |
2226               MII_TG3_MISC_SHDW_SCR5_C125OE;
2227         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2229
2230         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231
2232
2233         reg = MII_TG3_MISC_SHDW_WREN |
2234               MII_TG3_MISC_SHDW_APD_SEL |
2235               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2236         if (enable)
2237                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2238
2239         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2240 }
2241
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2243 {
2244         u32 phy;
2245
2246         if (!tg3_flag(tp, 5705_PLUS) ||
2247             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2248                 return;
2249
2250         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2251                 u32 ephy;
2252
2253                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2255
2256                         tg3_writephy(tp, MII_TG3_FET_TEST,
2257                                      ephy | MII_TG3_FET_SHADOW_EN);
2258                         if (!tg3_readphy(tp, reg, &phy)) {
2259                                 if (enable)
2260                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2261                                 else
2262                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263                                 tg3_writephy(tp, reg, phy);
2264                         }
2265                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2266                 }
2267         } else {
2268                 int ret;
2269
2270                 ret = tg3_phy_auxctl_read(tp,
2271                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2272                 if (!ret) {
2273                         if (enable)
2274                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2275                         else
2276                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277                         tg3_phy_auxctl_write(tp,
2278                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2279                 }
2280         }
2281 }
2282
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2284 {
2285         int ret;
2286         u32 val;
2287
2288         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2289                 return;
2290
2291         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2292         if (!ret)
2293                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2295 }
2296
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2298 {
2299         u32 otp, phy;
2300
2301         if (!tp->phy_otp)
2302                 return;
2303
2304         otp = tp->phy_otp;
2305
2306         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2307                 return;
2308
2309         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2312
2313         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2316
2317         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2320
2321         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2323
2324         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2326
2327         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2330
2331         tg3_phy_toggle_auxctl_smdsp(tp, false);
2332 }
2333
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2335 {
2336         u32 val;
2337         struct ethtool_eee *dest = &tp->eee;
2338
2339         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2340                 return;
2341
2342         if (eee)
2343                 dest = eee;
2344
2345         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2346                 return;
2347
2348         /* Pull eee_active */
2349         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351                 dest->eee_active = 1;
2352         } else
2353                 dest->eee_active = 0;
2354
2355         /* Pull lp advertised settings */
2356         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2357                 return;
2358         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359
2360         /* Pull advertised and eee_enabled settings */
2361         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2362                 return;
2363         dest->eee_enabled = !!val;
2364         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365
2366         /* Pull tx_lpi_enabled */
2367         val = tr32(TG3_CPMU_EEE_MODE);
2368         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2369
2370         /* Pull lpi timer value */
2371         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2372 }
2373
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2375 {
2376         u32 val;
2377
2378         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2379                 return;
2380
2381         tp->setlpicnt = 0;
2382
2383         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2384             current_link_up &&
2385             tp->link_config.active_duplex == DUPLEX_FULL &&
2386             (tp->link_config.active_speed == SPEED_100 ||
2387              tp->link_config.active_speed == SPEED_1000)) {
2388                 u32 eeectl;
2389
2390                 if (tp->link_config.active_speed == SPEED_1000)
2391                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2392                 else
2393                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2394
2395                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2396
2397                 tg3_eee_pull_config(tp, NULL);
2398                 if (tp->eee.eee_active)
2399                         tp->setlpicnt = 2;
2400         }
2401
2402         if (!tp->setlpicnt) {
2403                 if (current_link_up &&
2404                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2407                 }
2408
2409                 val = tr32(TG3_CPMU_EEE_MODE);
2410                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2411         }
2412 }
2413
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2415 {
2416         u32 val;
2417
2418         if (tp->link_config.active_speed == SPEED_1000 &&
2419             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421              tg3_flag(tp, 57765_CLASS)) &&
2422             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423                 val = MII_TG3_DSP_TAP26_ALNOKO |
2424                       MII_TG3_DSP_TAP26_RMRXSTO;
2425                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2427         }
2428
2429         val = tr32(TG3_CPMU_EEE_MODE);
2430         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2431 }
2432
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2434 {
2435         int limit = 100;
2436
2437         while (limit--) {
2438                 u32 tmp32;
2439
2440                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441                         if ((tmp32 & 0x1000) == 0)
2442                                 break;
2443                 }
2444         }
2445         if (limit < 0)
2446                 return -EBUSY;
2447
2448         return 0;
2449 }
2450
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2452 {
2453         static const u32 test_pat[4][6] = {
2454         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2458         };
2459         int chan;
2460
2461         for (chan = 0; chan < 4; chan++) {
2462                 int i;
2463
2464                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465                              (chan * 0x2000) | 0x0200);
2466                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2467
2468                 for (i = 0; i < 6; i++)
2469                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2470                                      test_pat[chan][i]);
2471
2472                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473                 if (tg3_wait_macro_done(tp)) {
2474                         *resetp = 1;
2475                         return -EBUSY;
2476                 }
2477
2478                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479                              (chan * 0x2000) | 0x0200);
2480                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481                 if (tg3_wait_macro_done(tp)) {
2482                         *resetp = 1;
2483                         return -EBUSY;
2484                 }
2485
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487                 if (tg3_wait_macro_done(tp)) {
2488                         *resetp = 1;
2489                         return -EBUSY;
2490                 }
2491
2492                 for (i = 0; i < 6; i += 2) {
2493                         u32 low, high;
2494
2495                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497                             tg3_wait_macro_done(tp)) {
2498                                 *resetp = 1;
2499                                 return -EBUSY;
2500                         }
2501                         low &= 0x7fff;
2502                         high &= 0x000f;
2503                         if (low != test_pat[chan][i] ||
2504                             high != test_pat[chan][i+1]) {
2505                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2508
2509                                 return -EBUSY;
2510                         }
2511                 }
2512         }
2513
2514         return 0;
2515 }
2516
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2518 {
2519         int chan;
2520
2521         for (chan = 0; chan < 4; chan++) {
2522                 int i;
2523
2524                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525                              (chan * 0x2000) | 0x0200);
2526                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527                 for (i = 0; i < 6; i++)
2528                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530                 if (tg3_wait_macro_done(tp))
2531                         return -EBUSY;
2532         }
2533
2534         return 0;
2535 }
2536
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2538 {
2539         u32 reg32, phy9_orig;
2540         int retries, do_phy_reset, err;
2541
2542         retries = 10;
2543         do_phy_reset = 1;
2544         do {
2545                 if (do_phy_reset) {
2546                         err = tg3_bmcr_reset(tp);
2547                         if (err)
2548                                 return err;
2549                         do_phy_reset = 0;
2550                 }
2551
2552                 /* Disable transmitter and interrupt.  */
2553                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2554                         continue;
2555
2556                 reg32 |= 0x3000;
2557                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2558
2559                 /* Set full-duplex, 1000 mbps.  */
2560                 tg3_writephy(tp, MII_BMCR,
2561                              BMCR_FULLDPLX | BMCR_SPEED1000);
2562
2563                 /* Set to master mode.  */
2564                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2565                         continue;
2566
2567                 tg3_writephy(tp, MII_CTRL1000,
2568                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2569
2570                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2571                 if (err)
2572                         return err;
2573
2574                 /* Block the PHY control access.  */
2575                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2576
2577                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2578                 if (!err)
2579                         break;
2580         } while (--retries);
2581
2582         err = tg3_phy_reset_chanpat(tp);
2583         if (err)
2584                 return err;
2585
2586         tg3_phydsp_write(tp, 0x8005, 0x0000);
2587
2588         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2590
2591         tg3_phy_toggle_auxctl_smdsp(tp, false);
2592
2593         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2594
2595         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2596                 reg32 &= ~0x3000;
2597                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2598         } else if (!err)
2599                 err = -EBUSY;
2600
2601         return err;
2602 }
2603
2604 static void tg3_carrier_off(struct tg3 *tp)
2605 {
2606         netif_carrier_off(tp->dev);
2607         tp->link_up = false;
2608 }
2609
2610 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2611 {
2612         if (tg3_flag(tp, ENABLE_ASF))
2613                 netdev_warn(tp->dev,
2614                             "Management side-band traffic will be interrupted during phy settings change\n");
2615 }
2616
2617 /* This will reset the tigon3 PHY if there is no valid
2618  * link unless the FORCE argument is non-zero.
2619  */
2620 static int tg3_phy_reset(struct tg3 *tp)
2621 {
2622         u32 val, cpmuctrl;
2623         int err;
2624
2625         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2626                 val = tr32(GRC_MISC_CFG);
2627                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2628                 udelay(40);
2629         }
2630         err  = tg3_readphy(tp, MII_BMSR, &val);
2631         err |= tg3_readphy(tp, MII_BMSR, &val);
2632         if (err != 0)
2633                 return -EBUSY;
2634
2635         if (netif_running(tp->dev) && tp->link_up) {
2636                 netif_carrier_off(tp->dev);
2637                 tg3_link_report(tp);
2638         }
2639
2640         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2641             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2642             tg3_asic_rev(tp) == ASIC_REV_5705) {
2643                 err = tg3_phy_reset_5703_4_5(tp);
2644                 if (err)
2645                         return err;
2646                 goto out;
2647         }
2648
2649         cpmuctrl = 0;
2650         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2651             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2652                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2653                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2654                         tw32(TG3_CPMU_CTRL,
2655                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2656         }
2657
2658         err = tg3_bmcr_reset(tp);
2659         if (err)
2660                 return err;
2661
2662         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2663                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2664                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2665
2666                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2667         }
2668
2669         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2670             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2671                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2672                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2673                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2674                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2675                         udelay(40);
2676                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2677                 }
2678         }
2679
2680         if (tg3_flag(tp, 5717_PLUS) &&
2681             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2682                 return 0;
2683
2684         tg3_phy_apply_otp(tp);
2685
2686         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2687                 tg3_phy_toggle_apd(tp, true);
2688         else
2689                 tg3_phy_toggle_apd(tp, false);
2690
2691 out:
2692         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2693             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2694                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2695                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2696                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2697         }
2698
2699         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2700                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2701                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2702         }
2703
2704         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2705                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2707                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2708                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2709                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2710                 }
2711         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2712                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2714                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2715                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2716                                 tg3_writephy(tp, MII_TG3_TEST1,
2717                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2718                         } else
2719                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2720
2721                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2722                 }
2723         }
2724
2725         /* Set Extended packet length bit (bit 14) on all chips that */
2726         /* support jumbo frames */
2727         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2728                 /* Cannot do read-modify-write on 5401 */
2729                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2730         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2731                 /* Set bit 14 with read-modify-write to preserve other bits */
2732                 err = tg3_phy_auxctl_read(tp,
2733                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2734                 if (!err)
2735                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2736                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2737         }
2738
2739         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740          * jumbo frames transmission.
2741          */
2742         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2744                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2745                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2746         }
2747
2748         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2749                 /* adjust output voltage */
2750                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2751         }
2752
2753         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2754                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2755
2756         tg3_phy_toggle_automdix(tp, true);
2757         tg3_phy_set_wirespeed(tp);
2758         return 0;
2759 }
2760
2761 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2763 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2764                                           TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769          (TG3_GPIO_MSG_DRVR_PRES << 12))
2770
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775          (TG3_GPIO_MSG_NEED_VAUX << 12))
2776
2777 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2778 {
2779         u32 status, shift;
2780
2781         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2782             tg3_asic_rev(tp) == ASIC_REV_5719)
2783                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2784         else
2785                 status = tr32(TG3_CPMU_DRV_STATUS);
2786
2787         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2788         status &= ~(TG3_GPIO_MSG_MASK << shift);
2789         status |= (newstat << shift);
2790
2791         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792             tg3_asic_rev(tp) == ASIC_REV_5719)
2793                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2794         else
2795                 tw32(TG3_CPMU_DRV_STATUS, status);
2796
2797         return status >> TG3_APE_GPIO_MSG_SHIFT;
2798 }
2799
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2801 {
2802         if (!tg3_flag(tp, IS_NIC))
2803                 return 0;
2804
2805         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2807             tg3_asic_rev(tp) == ASIC_REV_5720) {
2808                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2809                         return -EIO;
2810
2811                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2812
2813                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2815
2816                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2817         } else {
2818                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2819                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2820         }
2821
2822         return 0;
2823 }
2824
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2826 {
2827         u32 grc_local_ctrl;
2828
2829         if (!tg3_flag(tp, IS_NIC) ||
2830             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2831             tg3_asic_rev(tp) == ASIC_REV_5701)
2832                 return;
2833
2834         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2835
2836         tw32_wait_f(GRC_LOCAL_CTRL,
2837                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2838                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2839
2840         tw32_wait_f(GRC_LOCAL_CTRL,
2841                     grc_local_ctrl,
2842                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2843
2844         tw32_wait_f(GRC_LOCAL_CTRL,
2845                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 }
2848
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2850 {
2851         if (!tg3_flag(tp, IS_NIC))
2852                 return;
2853
2854         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855             tg3_asic_rev(tp) == ASIC_REV_5701) {
2856                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2857                             (GRC_LCLCTRL_GPIO_OE0 |
2858                              GRC_LCLCTRL_GPIO_OE1 |
2859                              GRC_LCLCTRL_GPIO_OE2 |
2860                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2861                              GRC_LCLCTRL_GPIO_OUTPUT1),
2862                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2863         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2864                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2865                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2867                                      GRC_LCLCTRL_GPIO_OE1 |
2868                                      GRC_LCLCTRL_GPIO_OE2 |
2869                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2870                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2871                                      tp->grc_local_ctrl;
2872                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2873                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2874
2875                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2876                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2878
2879                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2880                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2882         } else {
2883                 u32 no_gpio2;
2884                 u32 grc_local_ctrl = 0;
2885
2886                 /* Workaround to prevent overdrawing Amps. */
2887                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2888                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2889                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2890                                     grc_local_ctrl,
2891                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2892                 }
2893
2894                 /* On 5753 and variants, GPIO2 cannot be used. */
2895                 no_gpio2 = tp->nic_sram_data_cfg &
2896                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2897
2898                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2899                                   GRC_LCLCTRL_GPIO_OE1 |
2900                                   GRC_LCLCTRL_GPIO_OE2 |
2901                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2902                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2903                 if (no_gpio2) {
2904                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2905                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2906                 }
2907                 tw32_wait_f(GRC_LOCAL_CTRL,
2908                             tp->grc_local_ctrl | grc_local_ctrl,
2909                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2910
2911                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2912
2913                 tw32_wait_f(GRC_LOCAL_CTRL,
2914                             tp->grc_local_ctrl | grc_local_ctrl,
2915                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2916
2917                 if (!no_gpio2) {
2918                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2919                         tw32_wait_f(GRC_LOCAL_CTRL,
2920                                     tp->grc_local_ctrl | grc_local_ctrl,
2921                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2922                 }
2923         }
2924 }
2925
2926 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2927 {
2928         u32 msg = 0;
2929
2930         /* Serialize power state transitions */
2931         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2932                 return;
2933
2934         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2935                 msg = TG3_GPIO_MSG_NEED_VAUX;
2936
2937         msg = tg3_set_function_status(tp, msg);
2938
2939         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2940                 goto done;
2941
2942         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2943                 tg3_pwrsrc_switch_to_vaux(tp);
2944         else
2945                 tg3_pwrsrc_die_with_vmain(tp);
2946
2947 done:
2948         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2949 }
2950
2951 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2952 {
2953         bool need_vaux = false;
2954
2955         /* The GPIOs do something completely different on 57765. */
2956         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2957                 return;
2958
2959         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2960             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2961             tg3_asic_rev(tp) == ASIC_REV_5720) {
2962                 tg3_frob_aux_power_5717(tp, include_wol ?
2963                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2964                 return;
2965         }
2966
2967         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2968                 struct net_device *dev_peer;
2969
2970                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2971
2972                 /* remove_one() may have been run on the peer. */
2973                 if (dev_peer) {
2974                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2975
2976                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2977                                 return;
2978
2979                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2980                             tg3_flag(tp_peer, ENABLE_ASF))
2981                                 need_vaux = true;
2982                 }
2983         }
2984
2985         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2986             tg3_flag(tp, ENABLE_ASF))
2987                 need_vaux = true;
2988
2989         if (need_vaux)
2990                 tg3_pwrsrc_switch_to_vaux(tp);
2991         else
2992                 tg3_pwrsrc_die_with_vmain(tp);
2993 }
2994
2995 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2996 {
2997         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2998                 return 1;
2999         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3000                 if (speed != SPEED_10)
3001                         return 1;
3002         } else if (speed == SPEED_10)
3003                 return 1;
3004
3005         return 0;
3006 }
3007
3008 static bool tg3_phy_power_bug(struct tg3 *tp)
3009 {
3010         switch (tg3_asic_rev(tp)) {
3011         case ASIC_REV_5700:
3012         case ASIC_REV_5704:
3013                 return true;
3014         case ASIC_REV_5780:
3015                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3016                         return true;
3017                 return false;
3018         case ASIC_REV_5717:
3019                 if (!tp->pci_fn)
3020                         return true;
3021                 return false;
3022         case ASIC_REV_5719:
3023         case ASIC_REV_5720:
3024                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3025                     !tp->pci_fn)
3026                         return true;
3027                 return false;
3028         }
3029
3030         return false;
3031 }
3032
3033 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3034 {
3035         u32 val;
3036
3037         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3038                 return;
3039
3040         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3041                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3042                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3043                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3044
3045                         sg_dig_ctrl |=
3046                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3047                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3048                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3049                 }
3050                 return;
3051         }
3052
3053         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3054                 tg3_bmcr_reset(tp);
3055                 val = tr32(GRC_MISC_CFG);
3056                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3057                 udelay(40);
3058                 return;
3059         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3060                 u32 phytest;
3061                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3062                         u32 phy;
3063
3064                         tg3_writephy(tp, MII_ADVERTISE, 0);
3065                         tg3_writephy(tp, MII_BMCR,
3066                                      BMCR_ANENABLE | BMCR_ANRESTART);
3067
3068                         tg3_writephy(tp, MII_TG3_FET_TEST,
3069                                      phytest | MII_TG3_FET_SHADOW_EN);
3070                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3071                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3072                                 tg3_writephy(tp,
3073                                              MII_TG3_FET_SHDW_AUXMODE4,
3074                                              phy);
3075                         }
3076                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3077                 }
3078                 return;
3079         } else if (do_low_power) {
3080                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3081                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3082
3083                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3084                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3085                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3086                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3087         }
3088
3089         /* The PHY should not be powered down on some chips because
3090          * of bugs.
3091          */
3092         if (tg3_phy_power_bug(tp))
3093                 return;
3094
3095         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3096             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3097                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3098                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3099                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3100                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3101         }
3102
3103         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3104 }
3105
3106 /* tp->lock is held. */
3107 static int tg3_nvram_lock(struct tg3 *tp)
3108 {
3109         if (tg3_flag(tp, NVRAM)) {
3110                 int i;
3111
3112                 if (tp->nvram_lock_cnt == 0) {
3113                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3114                         for (i = 0; i < 8000; i++) {
3115                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3116                                         break;
3117                                 udelay(20);
3118                         }
3119                         if (i == 8000) {
3120                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3121                                 return -ENODEV;
3122                         }
3123                 }
3124                 tp->nvram_lock_cnt++;
3125         }
3126         return 0;
3127 }
3128
3129 /* tp->lock is held. */
3130 static void tg3_nvram_unlock(struct tg3 *tp)
3131 {
3132         if (tg3_flag(tp, NVRAM)) {
3133                 if (tp->nvram_lock_cnt > 0)
3134                         tp->nvram_lock_cnt--;
3135                 if (tp->nvram_lock_cnt == 0)
3136                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3137         }
3138 }
3139
3140 /* tp->lock is held. */
3141 static void tg3_enable_nvram_access(struct tg3 *tp)
3142 {
3143         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3144                 u32 nvaccess = tr32(NVRAM_ACCESS);
3145
3146                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3147         }
3148 }
3149
3150 /* tp->lock is held. */
3151 static void tg3_disable_nvram_access(struct tg3 *tp)
3152 {
3153         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154                 u32 nvaccess = tr32(NVRAM_ACCESS);
3155
3156                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3157         }
3158 }
3159
3160 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3161                                         u32 offset, u32 *val)
3162 {
3163         u32 tmp;
3164         int i;
3165
3166         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3167                 return -EINVAL;
3168
3169         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3170                                         EEPROM_ADDR_DEVID_MASK |
3171                                         EEPROM_ADDR_READ);
3172         tw32(GRC_EEPROM_ADDR,
3173              tmp |
3174              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3175              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3176               EEPROM_ADDR_ADDR_MASK) |
3177              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3178
3179         for (i = 0; i < 1000; i++) {
3180                 tmp = tr32(GRC_EEPROM_ADDR);
3181
3182                 if (tmp & EEPROM_ADDR_COMPLETE)
3183                         break;
3184                 msleep(1);
3185         }
3186         if (!(tmp & EEPROM_ADDR_COMPLETE))
3187                 return -EBUSY;
3188
3189         tmp = tr32(GRC_EEPROM_DATA);
3190
3191         /*
3192          * The data will always be opposite the native endian
3193          * format.  Perform a blind byteswap to compensate.
3194          */
3195         *val = swab32(tmp);
3196
3197         return 0;
3198 }
3199
3200 #define NVRAM_CMD_TIMEOUT 10000
3201
3202 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3203 {
3204         int i;
3205
3206         tw32(NVRAM_CMD, nvram_cmd);
3207         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3208                 udelay(10);
3209                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3210                         udelay(10);
3211                         break;
3212                 }
3213         }
3214
3215         if (i == NVRAM_CMD_TIMEOUT)
3216                 return -EBUSY;
3217
3218         return 0;
3219 }
3220
3221 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3222 {
3223         if (tg3_flag(tp, NVRAM) &&
3224             tg3_flag(tp, NVRAM_BUFFERED) &&
3225             tg3_flag(tp, FLASH) &&
3226             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3227             (tp->nvram_jedecnum == JEDEC_ATMEL))
3228
3229                 addr = ((addr / tp->nvram_pagesize) <<
3230                         ATMEL_AT45DB0X1B_PAGE_POS) +
3231                        (addr % tp->nvram_pagesize);
3232
3233         return addr;
3234 }
3235
3236 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3237 {
3238         if (tg3_flag(tp, NVRAM) &&
3239             tg3_flag(tp, NVRAM_BUFFERED) &&
3240             tg3_flag(tp, FLASH) &&
3241             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3242             (tp->nvram_jedecnum == JEDEC_ATMEL))
3243
3244                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3245                         tp->nvram_pagesize) +
3246                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3247
3248         return addr;
3249 }
3250
3251 /* NOTE: Data read in from NVRAM is byteswapped according to
3252  * the byteswapping settings for all other register accesses.
3253  * tg3 devices are BE devices, so on a BE machine, the data
3254  * returned will be exactly as it is seen in NVRAM.  On a LE
3255  * machine, the 32-bit value will be byteswapped.
3256  */
3257 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3258 {
3259         int ret;
3260
3261         if (!tg3_flag(tp, NVRAM))
3262                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3263
3264         offset = tg3_nvram_phys_addr(tp, offset);
3265
3266         if (offset > NVRAM_ADDR_MSK)
3267                 return -EINVAL;
3268
3269         ret = tg3_nvram_lock(tp);
3270         if (ret)
3271                 return ret;
3272
3273         tg3_enable_nvram_access(tp);
3274
3275         tw32(NVRAM_ADDR, offset);
3276         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3277                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3278
3279         if (ret == 0)
3280                 *val = tr32(NVRAM_RDDATA);
3281
3282         tg3_disable_nvram_access(tp);
3283
3284         tg3_nvram_unlock(tp);
3285
3286         return ret;
3287 }
3288
3289 /* Ensures NVRAM data is in bytestream format. */
3290 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3291 {
3292         u32 v;
3293         int res = tg3_nvram_read(tp, offset, &v);
3294         if (!res)
3295                 *val = cpu_to_be32(v);
3296         return res;
3297 }
3298
3299 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3300                                     u32 offset, u32 len, u8 *buf)
3301 {
3302         int i, j, rc = 0;
3303         u32 val;
3304
3305         for (i = 0; i < len; i += 4) {
3306                 u32 addr;
3307                 __be32 data;
3308
3309                 addr = offset + i;
3310
3311                 memcpy(&data, buf + i, 4);
3312
3313                 /*
3314                  * The SEEPROM interface expects the data to always be opposite
3315                  * the native endian format.  We accomplish this by reversing
3316                  * all the operations that would have been performed on the
3317                  * data from a call to tg3_nvram_read_be32().
3318                  */
3319                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3320
3321                 val = tr32(GRC_EEPROM_ADDR);
3322                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3323
3324                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3325                         EEPROM_ADDR_READ);
3326                 tw32(GRC_EEPROM_ADDR, val |
3327                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3328                         (addr & EEPROM_ADDR_ADDR_MASK) |
3329                         EEPROM_ADDR_START |
3330                         EEPROM_ADDR_WRITE);
3331
3332                 for (j = 0; j < 1000; j++) {
3333                         val = tr32(GRC_EEPROM_ADDR);
3334
3335                         if (val & EEPROM_ADDR_COMPLETE)
3336                                 break;
3337                         msleep(1);
3338                 }
3339                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3340                         rc = -EBUSY;
3341                         break;
3342                 }
3343         }
3344
3345         return rc;
3346 }
3347
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3350                 u8 *buf)
3351 {
3352         int ret = 0;
3353         u32 pagesize = tp->nvram_pagesize;
3354         u32 pagemask = pagesize - 1;
3355         u32 nvram_cmd;
3356         u8 *tmp;
3357
3358         tmp = kmalloc(pagesize, GFP_KERNEL);
3359         if (tmp == NULL)
3360                 return -ENOMEM;
3361
3362         while (len) {
3363                 int j;
3364                 u32 phy_addr, page_off, size;
3365
3366                 phy_addr = offset & ~pagemask;
3367
3368                 for (j = 0; j < pagesize; j += 4) {
3369                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3370                                                   (__be32 *) (tmp + j));
3371                         if (ret)
3372                                 break;
3373                 }
3374                 if (ret)
3375                         break;
3376
3377                 page_off = offset & pagemask;
3378                 size = pagesize;
3379                 if (len < size)
3380                         size = len;
3381
3382                 len -= size;
3383
3384                 memcpy(tmp + page_off, buf, size);
3385
3386                 offset = offset + (pagesize - page_off);
3387
3388                 tg3_enable_nvram_access(tp);
3389
3390                 /*
3391                  * Before we can erase the flash page, we need
3392                  * to issue a special "write enable" command.
3393                  */
3394                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3395
3396                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3397                         break;
3398
3399                 /* Erase the target page */
3400                 tw32(NVRAM_ADDR, phy_addr);
3401
3402                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3403                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3404
3405                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3406                         break;
3407
3408                 /* Issue another write enable to start the write. */
3409                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3410
3411                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3412                         break;
3413
3414                 for (j = 0; j < pagesize; j += 4) {
3415                         __be32 data;
3416
3417                         data = *((__be32 *) (tmp + j));
3418
3419                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3420
3421                         tw32(NVRAM_ADDR, phy_addr + j);
3422
3423                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3424                                 NVRAM_CMD_WR;
3425
3426                         if (j == 0)
3427                                 nvram_cmd |= NVRAM_CMD_FIRST;
3428                         else if (j == (pagesize - 4))
3429                                 nvram_cmd |= NVRAM_CMD_LAST;
3430
3431                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3432                         if (ret)
3433                                 break;
3434                 }
3435                 if (ret)
3436                         break;
3437         }
3438
3439         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3440         tg3_nvram_exec_cmd(tp, nvram_cmd);
3441
3442         kfree(tmp);
3443
3444         return ret;
3445 }
3446
3447 /* offset and length are dword aligned */
3448 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3449                 u8 *buf)
3450 {
3451         int i, ret = 0;
3452
3453         for (i = 0; i < len; i += 4, offset += 4) {
3454                 u32 page_off, phy_addr, nvram_cmd;
3455                 __be32 data;
3456
3457                 memcpy(&data, buf + i, 4);
3458                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3459
3460                 page_off = offset % tp->nvram_pagesize;
3461
3462                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3463
3464                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3465
3466                 if (page_off == 0 || i == 0)
3467                         nvram_cmd |= NVRAM_CMD_FIRST;
3468                 if (page_off == (tp->nvram_pagesize - 4))
3469                         nvram_cmd |= NVRAM_CMD_LAST;
3470
3471                 if (i == (len - 4))
3472                         nvram_cmd |= NVRAM_CMD_LAST;
3473
3474                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3475                     !tg3_flag(tp, FLASH) ||
3476                     !tg3_flag(tp, 57765_PLUS))
3477                         tw32(NVRAM_ADDR, phy_addr);
3478
3479                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3480                     !tg3_flag(tp, 5755_PLUS) &&
3481                     (tp->nvram_jedecnum == JEDEC_ST) &&
3482                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3483                         u32 cmd;
3484
3485                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3486                         ret = tg3_nvram_exec_cmd(tp, cmd);
3487                         if (ret)
3488                                 break;
3489                 }
3490                 if (!tg3_flag(tp, FLASH)) {
3491                         /* We always do complete word writes to eeprom. */
3492                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3493                 }
3494
3495                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3496                 if (ret)
3497                         break;
3498         }
3499         return ret;
3500 }
3501
3502 /* offset and length are dword aligned */
3503 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3504 {
3505         int ret;
3506
3507         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3508                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3509                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3510                 udelay(40);
3511         }
3512
3513         if (!tg3_flag(tp, NVRAM)) {
3514                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3515         } else {
3516                 u32 grc_mode;
3517
3518                 ret = tg3_nvram_lock(tp);
3519                 if (ret)
3520                         return ret;
3521
3522                 tg3_enable_nvram_access(tp);
3523                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3524                         tw32(NVRAM_WRITE1, 0x406);
3525
3526                 grc_mode = tr32(GRC_MODE);
3527                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3528
3529                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3530                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3531                                 buf);
3532                 } else {
3533                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3534                                 buf);
3535                 }
3536
3537                 grc_mode = tr32(GRC_MODE);
3538                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3539
3540                 tg3_disable_nvram_access(tp);
3541                 tg3_nvram_unlock(tp);
3542         }
3543
3544         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3545                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3546                 udelay(40);
3547         }
3548
3549         return ret;
3550 }
3551
3552 #define RX_CPU_SCRATCH_BASE     0x30000
3553 #define RX_CPU_SCRATCH_SIZE     0x04000
3554 #define TX_CPU_SCRATCH_BASE     0x34000
3555 #define TX_CPU_SCRATCH_SIZE     0x04000
3556
3557 /* tp->lock is held. */
3558 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3559 {
3560         int i;
3561         const int iters = 10000;
3562
3563         for (i = 0; i < iters; i++) {
3564                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3565                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3566                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3567                         break;
3568                 if (pci_channel_offline(tp->pdev))
3569                         return -EBUSY;
3570         }
3571
3572         return (i == iters) ? -EBUSY : 0;
3573 }
3574
3575 /* tp->lock is held. */
3576 static int tg3_rxcpu_pause(struct tg3 *tp)
3577 {
3578         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3579
3580         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3581         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3582         udelay(10);
3583
3584         return rc;
3585 }
3586
3587 /* tp->lock is held. */
3588 static int tg3_txcpu_pause(struct tg3 *tp)
3589 {
3590         return tg3_pause_cpu(tp, TX_CPU_BASE);
3591 }
3592
3593 /* tp->lock is held. */
3594 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596         tw32(cpu_base + CPU_STATE, 0xffffffff);
3597         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3598 }
3599
3600 /* tp->lock is held. */
3601 static void tg3_rxcpu_resume(struct tg3 *tp)
3602 {
3603         tg3_resume_cpu(tp, RX_CPU_BASE);
3604 }
3605
3606 /* tp->lock is held. */
3607 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3608 {
3609         int rc;
3610
3611         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3612
3613         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3614                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3615
3616                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3617                 return 0;
3618         }
3619         if (cpu_base == RX_CPU_BASE) {
3620                 rc = tg3_rxcpu_pause(tp);
3621         } else {
3622                 /*
3623                  * There is only an Rx CPU for the 5750 derivative in the
3624                  * BCM4785.
3625                  */
3626                 if (tg3_flag(tp, IS_SSB_CORE))
3627                         return 0;
3628
3629                 rc = tg3_txcpu_pause(tp);
3630         }
3631
3632         if (rc) {
3633                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3634                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3635                 return -ENODEV;
3636         }
3637
3638         /* Clear firmware's nvram arbitration. */
3639         if (tg3_flag(tp, NVRAM))
3640                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3641         return 0;
3642 }
3643
3644 static int tg3_fw_data_len(struct tg3 *tp,
3645                            const struct tg3_firmware_hdr *fw_hdr)
3646 {
3647         int fw_len;
3648
3649         /* Non fragmented firmware have one firmware header followed by a
3650          * contiguous chunk of data to be written. The length field in that
3651          * header is not the length of data to be written but the complete
3652          * length of the bss. The data length is determined based on
3653          * tp->fw->size minus headers.
3654          *
3655          * Fragmented firmware have a main header followed by multiple
3656          * fragments. Each fragment is identical to non fragmented firmware
3657          * with a firmware header followed by a contiguous chunk of data. In
3658          * the main header, the length field is unused and set to 0xffffffff.
3659          * In each fragment header the length is the entire size of that
3660          * fragment i.e. fragment data + header length. Data length is
3661          * therefore length field in the header minus TG3_FW_HDR_LEN.
3662          */
3663         if (tp->fw_len == 0xffffffff)
3664                 fw_len = be32_to_cpu(fw_hdr->len);
3665         else
3666                 fw_len = tp->fw->size;
3667
3668         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3669 }
3670
3671 /* tp->lock is held. */
3672 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3673                                  u32 cpu_scratch_base, int cpu_scratch_size,
3674                                  const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676         int err, i;
3677         void (*write_op)(struct tg3 *, u32, u32);
3678         int total_len = tp->fw->size;
3679
3680         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3681                 netdev_err(tp->dev,
3682                            "%s: Trying to load TX cpu firmware which is 5705\n",
3683                            __func__);
3684                 return -EINVAL;
3685         }
3686
3687         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3688                 write_op = tg3_write_mem;
3689         else
3690                 write_op = tg3_write_indirect_reg32;
3691
3692         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3693                 /* It is possible that bootcode is still loading at this point.
3694                  * Get the nvram lock first before halting the cpu.
3695                  */
3696                 int lock_err = tg3_nvram_lock(tp);
3697                 err = tg3_halt_cpu(tp, cpu_base);
3698                 if (!lock_err)
3699                         tg3_nvram_unlock(tp);
3700                 if (err)
3701                         goto out;
3702
3703                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3704                         write_op(tp, cpu_scratch_base + i, 0);
3705                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3706                 tw32(cpu_base + CPU_MODE,
3707                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3708         } else {
3709                 /* Subtract additional main header for fragmented firmware and
3710                  * advance to the first fragment
3711                  */
3712                 total_len -= TG3_FW_HDR_LEN;
3713                 fw_hdr++;
3714         }
3715
3716         do {
3717                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3718                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3719                         write_op(tp, cpu_scratch_base +
3720                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3721                                      (i * sizeof(u32)),
3722                                  be32_to_cpu(fw_data[i]));
3723
3724                 total_len -= be32_to_cpu(fw_hdr->len);
3725
3726                 /* Advance to next fragment */
3727                 fw_hdr = (struct tg3_firmware_hdr *)
3728                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3729         } while (total_len > 0);
3730
3731         err = 0;
3732
3733 out:
3734         return err;
3735 }
3736
3737 /* tp->lock is held. */
3738 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3739 {
3740         int i;
3741         const int iters = 5;
3742
3743         tw32(cpu_base + CPU_STATE, 0xffffffff);
3744         tw32_f(cpu_base + CPU_PC, pc);
3745
3746         for (i = 0; i < iters; i++) {
3747                 if (tr32(cpu_base + CPU_PC) == pc)
3748                         break;
3749                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3750                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3751                 tw32_f(cpu_base + CPU_PC, pc);
3752                 udelay(1000);
3753         }
3754
3755         return (i == iters) ? -EBUSY : 0;
3756 }
3757
3758 /* tp->lock is held. */
3759 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3760 {
3761         const struct tg3_firmware_hdr *fw_hdr;
3762         int err;
3763
3764         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3765
3766         /* Firmware blob starts with version numbers, followed by
3767            start address and length. We are setting complete length.
3768            length = end_address_of_bss - start_address_of_text.
3769            Remainder is the blob to be loaded contiguously
3770            from start address. */
3771
3772         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3773                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3774                                     fw_hdr);
3775         if (err)
3776                 return err;
3777
3778         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3779                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3780                                     fw_hdr);
3781         if (err)
3782                 return err;
3783
3784         /* Now startup only the RX cpu. */
3785         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3786                                        be32_to_cpu(fw_hdr->base_addr));
3787         if (err) {
3788                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3789                            "should be %08x\n", __func__,
3790                            tr32(RX_CPU_BASE + CPU_PC),
3791                                 be32_to_cpu(fw_hdr->base_addr));
3792                 return -ENODEV;
3793         }
3794
3795         tg3_rxcpu_resume(tp);
3796
3797         return 0;
3798 }
3799
3800 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3801 {
3802         const int iters = 1000;
3803         int i;
3804         u32 val;
3805
3806         /* Wait for boot code to complete initialization and enter service
3807          * loop. It is then safe to download service patches
3808          */
3809         for (i = 0; i < iters; i++) {
3810                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3811                         break;
3812
3813                 udelay(10);
3814         }
3815
3816         if (i == iters) {
3817                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3818                 return -EBUSY;
3819         }
3820
3821         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3822         if (val & 0xff) {
3823                 netdev_warn(tp->dev,
3824                             "Other patches exist. Not downloading EEE patch\n");
3825                 return -EEXIST;
3826         }
3827
3828         return 0;
3829 }
3830
3831 /* tp->lock is held. */
3832 static void tg3_load_57766_firmware(struct tg3 *tp)
3833 {
3834         struct tg3_firmware_hdr *fw_hdr;
3835
3836         if (!tg3_flag(tp, NO_NVRAM))
3837                 return;
3838
3839         if (tg3_validate_rxcpu_state(tp))
3840                 return;
3841
3842         if (!tp->fw)
3843                 return;
3844
3845         /* This firmware blob has a different format than older firmware
3846          * releases as given below. The main difference is we have fragmented
3847          * data to be written to non-contiguous locations.
3848          *
3849          * In the beginning we have a firmware header identical to other
3850          * firmware which consists of version, base addr and length. The length
3851          * here is unused and set to 0xffffffff.
3852          *
3853          * This is followed by a series of firmware fragments which are
3854          * individually identical to previous firmware. i.e. they have the
3855          * firmware header and followed by data for that fragment. The version
3856          * field of the individual fragment header is unused.
3857          */
3858
3859         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3860         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3861                 return;
3862
3863         if (tg3_rxcpu_pause(tp))
3864                 return;
3865
3866         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3867         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3868
3869         tg3_rxcpu_resume(tp);
3870 }
3871
3872 /* tp->lock is held. */
3873 static int tg3_load_tso_firmware(struct tg3 *tp)
3874 {
3875         const struct tg3_firmware_hdr *fw_hdr;
3876         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3877         int err;
3878
3879         if (!tg3_flag(tp, FW_TSO))
3880                 return 0;
3881
3882         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883
3884         /* Firmware blob starts with version numbers, followed by
3885            start address and length. We are setting complete length.
3886            length = end_address_of_bss - start_address_of_text.
3887            Remainder is the blob to be loaded contiguously
3888            from start address. */
3889
3890         cpu_scratch_size = tp->fw_len;
3891
3892         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3893                 cpu_base = RX_CPU_BASE;
3894                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3895         } else {
3896                 cpu_base = TX_CPU_BASE;
3897                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3898                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3899         }
3900
3901         err = tg3_load_firmware_cpu(tp, cpu_base,
3902                                     cpu_scratch_base, cpu_scratch_size,
3903                                     fw_hdr);
3904         if (err)
3905                 return err;
3906
3907         /* Now startup the cpu. */
3908         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3909                                        be32_to_cpu(fw_hdr->base_addr));
3910         if (err) {
3911                 netdev_err(tp->dev,
3912                            "%s fails to set CPU PC, is %08x should be %08x\n",
3913                            __func__, tr32(cpu_base + CPU_PC),
3914                            be32_to_cpu(fw_hdr->base_addr));
3915                 return -ENODEV;
3916         }
3917
3918         tg3_resume_cpu(tp, cpu_base);
3919         return 0;
3920 }
3921
3922
3923 /* tp->lock is held. */
3924 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3925 {
3926         u32 addr_high, addr_low;
3927         int i;
3928
3929         addr_high = ((tp->dev->dev_addr[0] << 8) |
3930                      tp->dev->dev_addr[1]);
3931         addr_low = ((tp->dev->dev_addr[2] << 24) |
3932                     (tp->dev->dev_addr[3] << 16) |
3933                     (tp->dev->dev_addr[4] <<  8) |
3934                     (tp->dev->dev_addr[5] <<  0));
3935         for (i = 0; i < 4; i++) {
3936                 if (i == 1 && skip_mac_1)
3937                         continue;
3938                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3939                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3940         }
3941
3942         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3943             tg3_asic_rev(tp) == ASIC_REV_5704) {
3944                 for (i = 0; i < 12; i++) {
3945                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3946                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3947                 }
3948         }
3949
3950         addr_high = (tp->dev->dev_addr[0] +
3951                      tp->dev->dev_addr[1] +
3952                      tp->dev->dev_addr[2] +
3953                      tp->dev->dev_addr[3] +
3954                      tp->dev->dev_addr[4] +
3955                      tp->dev->dev_addr[5]) &
3956                 TX_BACKOFF_SEED_MASK;
3957         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3958 }
3959
3960 static void tg3_enable_register_access(struct tg3 *tp)
3961 {
3962         /*
3963          * Make sure register accesses (indirect or otherwise) will function
3964          * correctly.
3965          */
3966         pci_write_config_dword(tp->pdev,
3967                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3968 }
3969
3970 static int tg3_power_up(struct tg3 *tp)
3971 {
3972         int err;
3973
3974         tg3_enable_register_access(tp);
3975
3976         err = pci_set_power_state(tp->pdev, PCI_D0);
3977         if (!err) {
3978                 /* Switch out of Vaux if it is a NIC */
3979                 tg3_pwrsrc_switch_to_vmain(tp);
3980         } else {
3981                 netdev_err(tp->dev, "Transition to D0 failed\n");
3982         }
3983
3984         return err;
3985 }
3986
3987 static int tg3_setup_phy(struct tg3 *, bool);
3988
3989 static int tg3_power_down_prepare(struct tg3 *tp)
3990 {
3991         u32 misc_host_ctrl;
3992         bool device_should_wake, do_low_power;
3993
3994         tg3_enable_register_access(tp);
3995
3996         /* Restore the CLKREQ setting. */
3997         if (tg3_flag(tp, CLKREQ_BUG))
3998                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3999                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4000
4001         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4002         tw32(TG3PCI_MISC_HOST_CTRL,
4003              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4004
4005         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4006                              tg3_flag(tp, WOL_ENABLE);
4007
4008         if (tg3_flag(tp, USE_PHYLIB)) {
4009                 do_low_power = false;
4010                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4011                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4012                         struct phy_device *phydev;
4013                         u32 phyid, advertising;
4014
4015                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4016
4017                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4018
4019                         tp->link_config.speed = phydev->speed;
4020                         tp->link_config.duplex = phydev->duplex;
4021                         tp->link_config.autoneg = phydev->autoneg;
4022                         tp->link_config.advertising = phydev->advertising;
4023
4024                         advertising = ADVERTISED_TP |
4025                                       ADVERTISED_Pause |
4026                                       ADVERTISED_Autoneg |
4027                                       ADVERTISED_10baseT_Half;
4028
4029                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4030                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4031                                         advertising |=
4032                                                 ADVERTISED_100baseT_Half |
4033                                                 ADVERTISED_100baseT_Full |
4034                                                 ADVERTISED_10baseT_Full;
4035                                 else
4036                                         advertising |= ADVERTISED_10baseT_Full;
4037                         }
4038
4039                         phydev->advertising = advertising;
4040
4041                         phy_start_aneg(phydev);
4042
4043                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4044                         if (phyid != PHY_ID_BCMAC131) {
4045                                 phyid &= PHY_BCM_OUI_MASK;
4046                                 if (phyid == PHY_BCM_OUI_1 ||
4047                                     phyid == PHY_BCM_OUI_2 ||
4048                                     phyid == PHY_BCM_OUI_3)
4049                                         do_low_power = true;
4050                         }
4051                 }
4052         } else {
4053                 do_low_power = true;
4054
4055                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4056                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4057
4058                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4059                         tg3_setup_phy(tp, false);
4060         }
4061
4062         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4063                 u32 val;
4064
4065                 val = tr32(GRC_VCPU_EXT_CTRL);
4066                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4067         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4068                 int i;
4069                 u32 val;
4070
4071                 for (i = 0; i < 200; i++) {
4072                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4073                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4074                                 break;
4075                         msleep(1);
4076                 }
4077         }
4078         if (tg3_flag(tp, WOL_CAP))
4079                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4080                                                      WOL_DRV_STATE_SHUTDOWN |
4081                                                      WOL_DRV_WOL |
4082                                                      WOL_SET_MAGIC_PKT);
4083
4084         if (device_should_wake) {
4085                 u32 mac_mode;
4086
4087                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4088                         if (do_low_power &&
4089                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4090                                 tg3_phy_auxctl_write(tp,
4091                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4092                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4093                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4094                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4095                                 udelay(40);
4096                         }
4097
4098                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4099                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4100                         else if (tp->phy_flags &
4101                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4102                                 if (tp->link_config.active_speed == SPEED_1000)
4103                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4104                                 else
4105                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4106                         } else
4107                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4108
4109                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4110                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4111                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4112                                              SPEED_100 : SPEED_10;
4113                                 if (tg3_5700_link_polarity(tp, speed))
4114                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4115                                 else
4116                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4117                         }
4118                 } else {
4119                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4120                 }
4121
4122                 if (!tg3_flag(tp, 5750_PLUS))
4123                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4124
4125                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4126                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4127                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4128                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4129
4130                 if (tg3_flag(tp, ENABLE_APE))
4131                         mac_mode |= MAC_MODE_APE_TX_EN |
4132                                     MAC_MODE_APE_RX_EN |
4133                                     MAC_MODE_TDE_ENABLE;
4134
4135                 tw32_f(MAC_MODE, mac_mode);
4136                 udelay(100);
4137
4138                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4139                 udelay(10);
4140         }
4141
4142         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4143             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4144              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4145                 u32 base_val;
4146
4147                 base_val = tp->pci_clock_ctrl;
4148                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4149                              CLOCK_CTRL_TXCLK_DISABLE);
4150
4151                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4152                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4153         } else if (tg3_flag(tp, 5780_CLASS) ||
4154                    tg3_flag(tp, CPMU_PRESENT) ||
4155                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4156                 /* do nothing */
4157         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4158                 u32 newbits1, newbits2;
4159
4160                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4161                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4162                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4163                                     CLOCK_CTRL_TXCLK_DISABLE |
4164                                     CLOCK_CTRL_ALTCLK);
4165                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4166                 } else if (tg3_flag(tp, 5705_PLUS)) {
4167                         newbits1 = CLOCK_CTRL_625_CORE;
4168                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4169                 } else {
4170                         newbits1 = CLOCK_CTRL_ALTCLK;
4171                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4172                 }
4173
4174                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4175                             40);
4176
4177                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4178                             40);
4179
4180                 if (!tg3_flag(tp, 5705_PLUS)) {
4181                         u32 newbits3;
4182
4183                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4185                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4186                                             CLOCK_CTRL_TXCLK_DISABLE |
4187                                             CLOCK_CTRL_44MHZ_CORE);
4188                         } else {
4189                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4190                         }
4191
4192                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4193                                     tp->pci_clock_ctrl | newbits3, 40);
4194                 }
4195         }
4196
4197         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4198                 tg3_power_down_phy(tp, do_low_power);
4199
4200         tg3_frob_aux_power(tp, true);
4201
4202         /* Workaround for unstable PLL clock */
4203         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4204             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4205              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4206                 u32 val = tr32(0x7d00);
4207
4208                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4209                 tw32(0x7d00, val);
4210                 if (!tg3_flag(tp, ENABLE_ASF)) {
4211                         int err;
4212
4213                         err = tg3_nvram_lock(tp);
4214                         tg3_halt_cpu(tp, RX_CPU_BASE);
4215                         if (!err)
4216                                 tg3_nvram_unlock(tp);
4217                 }
4218         }
4219
4220         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4221
4222         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4223
4224         return 0;
4225 }
4226
4227 static void tg3_power_down(struct tg3 *tp)
4228 {
4229         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4230         pci_set_power_state(tp->pdev, PCI_D3hot);
4231 }
4232
4233 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4234 {
4235         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4236         case MII_TG3_AUX_STAT_10HALF:
4237                 *speed = SPEED_10;
4238                 *duplex = DUPLEX_HALF;
4239                 break;
4240
4241         case MII_TG3_AUX_STAT_10FULL:
4242                 *speed = SPEED_10;
4243                 *duplex = DUPLEX_FULL;
4244                 break;
4245
4246         case MII_TG3_AUX_STAT_100HALF:
4247                 *speed = SPEED_100;
4248                 *duplex = DUPLEX_HALF;
4249                 break;
4250
4251         case MII_TG3_AUX_STAT_100FULL:
4252                 *speed = SPEED_100;
4253                 *duplex = DUPLEX_FULL;
4254                 break;
4255
4256         case MII_TG3_AUX_STAT_1000HALF:
4257                 *speed = SPEED_1000;
4258                 *duplex = DUPLEX_HALF;
4259                 break;
4260
4261         case MII_TG3_AUX_STAT_1000FULL:
4262                 *speed = SPEED_1000;
4263                 *duplex = DUPLEX_FULL;
4264                 break;
4265
4266         default:
4267                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4268                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4269                                  SPEED_10;
4270                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4271                                   DUPLEX_HALF;
4272                         break;
4273                 }
4274                 *speed = SPEED_UNKNOWN;
4275                 *duplex = DUPLEX_UNKNOWN;
4276                 break;
4277         }
4278 }
4279
4280 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4281 {
4282         int err = 0;
4283         u32 val, new_adv;
4284
4285         new_adv = ADVERTISE_CSMA;
4286         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4287         new_adv |= mii_advertise_flowctrl(flowctrl);
4288
4289         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4290         if (err)
4291                 goto done;
4292
4293         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4294                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4295
4296                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4297                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4298                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4299
4300                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4301                 if (err)
4302                         goto done;
4303         }
4304
4305         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4306                 goto done;
4307
4308         tw32(TG3_CPMU_EEE_MODE,
4309              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4310
4311         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4312         if (!err) {
4313                 u32 err2;
4314
4315                 val = 0;
4316                 /* Advertise 100-BaseTX EEE ability */
4317                 if (advertise & ADVERTISED_100baseT_Full)
4318                         val |= MDIO_AN_EEE_ADV_100TX;
4319                 /* Advertise 1000-BaseT EEE ability */
4320                 if (advertise & ADVERTISED_1000baseT_Full)
4321                         val |= MDIO_AN_EEE_ADV_1000T;
4322
4323                 if (!tp->eee.eee_enabled) {
4324                         val = 0;
4325                         tp->eee.advertised = 0;
4326                 } else {
4327                         tp->eee.advertised = advertise &
4328                                              (ADVERTISED_100baseT_Full |
4329                                               ADVERTISED_1000baseT_Full);
4330                 }
4331
4332                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4333                 if (err)
4334                         val = 0;
4335
4336                 switch (tg3_asic_rev(tp)) {
4337                 case ASIC_REV_5717:
4338                 case ASIC_REV_57765:
4339                 case ASIC_REV_57766:
4340                 case ASIC_REV_5719:
4341                         /* If we advertised any eee advertisements above... */
4342                         if (val)
4343                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4344                                       MII_TG3_DSP_TAP26_RMRXSTO |
4345                                       MII_TG3_DSP_TAP26_OPCSINPT;
4346                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4347                         /* Fall through */
4348                 case ASIC_REV_5720:
4349                 case ASIC_REV_5762:
4350                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4351                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4352                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4353                 }
4354
4355                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4356                 if (!err)
4357                         err = err2;
4358         }
4359
4360 done:
4361         return err;
4362 }
4363
4364 static void tg3_phy_copper_begin(struct tg3 *tp)
4365 {
4366         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4367             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4368                 u32 adv, fc;
4369
4370                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4371                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4372                         adv = ADVERTISED_10baseT_Half |
4373                               ADVERTISED_10baseT_Full;
4374                         if (tg3_flag(tp, WOL_SPEED_100MB))
4375                                 adv |= ADVERTISED_100baseT_Half |
4376                                        ADVERTISED_100baseT_Full;
4377                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4378                                 adv |= ADVERTISED_1000baseT_Half |
4379                                        ADVERTISED_1000baseT_Full;
4380
4381                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4382                 } else {
4383                         adv = tp->link_config.advertising;
4384                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4385                                 adv &= ~(ADVERTISED_1000baseT_Half |
4386                                          ADVERTISED_1000baseT_Full);
4387
4388                         fc = tp->link_config.flowctrl;
4389                 }
4390
4391                 tg3_phy_autoneg_cfg(tp, adv, fc);
4392
4393                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4394                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4395                         /* Normally during power down we want to autonegotiate
4396                          * the lowest possible speed for WOL. However, to avoid
4397                          * link flap, we leave it untouched.
4398                          */
4399                         return;
4400                 }
4401
4402                 tg3_writephy(tp, MII_BMCR,
4403                              BMCR_ANENABLE | BMCR_ANRESTART);
4404         } else {
4405                 int i;
4406                 u32 bmcr, orig_bmcr;
4407
4408                 tp->link_config.active_speed = tp->link_config.speed;
4409                 tp->link_config.active_duplex = tp->link_config.duplex;
4410
4411                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4412                         /* With autoneg disabled, 5715 only links up when the
4413                          * advertisement register has the configured speed
4414                          * enabled.
4415                          */
4416                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4417                 }
4418
4419                 bmcr = 0;
4420                 switch (tp->link_config.speed) {
4421                 default:
4422                 case SPEED_10:
4423                         break;
4424
4425                 case SPEED_100:
4426                         bmcr |= BMCR_SPEED100;
4427                         break;
4428
4429                 case SPEED_1000:
4430                         bmcr |= BMCR_SPEED1000;
4431                         break;
4432                 }
4433
4434                 if (tp->link_config.duplex == DUPLEX_FULL)
4435                         bmcr |= BMCR_FULLDPLX;
4436
4437                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4438                     (bmcr != orig_bmcr)) {
4439                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4440                         for (i = 0; i < 1500; i++) {
4441                                 u32 tmp;
4442
4443                                 udelay(10);
4444                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4445                                     tg3_readphy(tp, MII_BMSR, &tmp))
4446                                         continue;
4447                                 if (!(tmp & BMSR_LSTATUS)) {
4448                                         udelay(40);
4449                                         break;
4450                                 }
4451                         }
4452                         tg3_writephy(tp, MII_BMCR, bmcr);
4453                         udelay(40);
4454                 }
4455         }
4456 }
4457
4458 static int tg3_phy_pull_config(struct tg3 *tp)
4459 {
4460         int err;
4461         u32 val;
4462
4463         err = tg3_readphy(tp, MII_BMCR, &val);
4464         if (err)
4465                 goto done;
4466
4467         if (!(val & BMCR_ANENABLE)) {
4468                 tp->link_config.autoneg = AUTONEG_DISABLE;
4469                 tp->link_config.advertising = 0;
4470                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4471
4472                 err = -EIO;
4473
4474                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4475                 case 0:
4476                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4477                                 goto done;
4478
4479                         tp->link_config.speed = SPEED_10;
4480                         break;
4481                 case BMCR_SPEED100:
4482                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4483                                 goto done;
4484
4485                         tp->link_config.speed = SPEED_100;
4486                         break;
4487                 case BMCR_SPEED1000:
4488                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4489                                 tp->link_config.speed = SPEED_1000;
4490                                 break;
4491                         }
4492                         /* Fall through */
4493                 default:
4494                         goto done;
4495                 }
4496
4497                 if (val & BMCR_FULLDPLX)
4498                         tp->link_config.duplex = DUPLEX_FULL;
4499                 else
4500                         tp->link_config.duplex = DUPLEX_HALF;
4501
4502                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4503
4504                 err = 0;
4505                 goto done;
4506         }
4507
4508         tp->link_config.autoneg = AUTONEG_ENABLE;
4509         tp->link_config.advertising = ADVERTISED_Autoneg;
4510         tg3_flag_set(tp, PAUSE_AUTONEG);
4511
4512         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4513                 u32 adv;
4514
4515                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4516                 if (err)
4517                         goto done;
4518
4519                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4520                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4521
4522                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4523         } else {
4524                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4525         }
4526
4527         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4528                 u32 adv;
4529
4530                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4531                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4532                         if (err)
4533                                 goto done;
4534
4535                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4536                 } else {
4537                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4538                         if (err)
4539                                 goto done;
4540
4541                         adv = tg3_decode_flowctrl_1000X(val);
4542                         tp->link_config.flowctrl = adv;
4543
4544                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4545                         adv = mii_adv_to_ethtool_adv_x(val);
4546                 }
4547
4548                 tp->link_config.advertising |= adv;
4549         }
4550
4551 done:
4552         return err;
4553 }
4554
4555 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4556 {
4557         int err;
4558
4559         /* Turn off tap power management. */
4560         /* Set Extended packet length bit */
4561         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4562
4563         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4564         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4565         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4566         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4567         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4568
4569         udelay(40);
4570
4571         return err;
4572 }
4573
4574 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4575 {
4576         struct ethtool_eee eee;
4577
4578         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4579                 return true;
4580
4581         tg3_eee_pull_config(tp, &eee);
4582
4583         if (tp->eee.eee_enabled) {
4584                 if (tp->eee.advertised != eee.advertised ||
4585                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4586                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4587                         return false;
4588         } else {
4589                 /* EEE is disabled but we're advertising */
4590                 if (eee.advertised)
4591                         return false;
4592         }
4593
4594         return true;
4595 }
4596
4597 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4598 {
4599         u32 advmsk, tgtadv, advertising;
4600
4601         advertising = tp->link_config.advertising;
4602         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4603
4604         advmsk = ADVERTISE_ALL;
4605         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4606                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4607                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4608         }
4609
4610         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4611                 return false;
4612
4613         if ((*lcladv & advmsk) != tgtadv)
4614                 return false;
4615
4616         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4617                 u32 tg3_ctrl;
4618
4619                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4620
4621                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4622                         return false;
4623
4624                 if (tgtadv &&
4625                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4626                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4627                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4628                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4629                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4630                 } else {
4631                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4632                 }
4633
4634                 if (tg3_ctrl != tgtadv)
4635                         return false;
4636         }
4637
4638         return true;
4639 }
4640
4641 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4642 {
4643         u32 lpeth = 0;
4644
4645         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4646                 u32 val;
4647
4648                 if (tg3_readphy(tp, MII_STAT1000, &val))
4649                         return false;
4650
4651                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4652         }
4653
4654         if (tg3_readphy(tp, MII_LPA, rmtadv))
4655                 return false;
4656
4657         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4658         tp->link_config.rmt_adv = lpeth;
4659
4660         return true;
4661 }
4662
4663 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4664 {
4665         if (curr_link_up != tp->link_up) {
4666                 if (curr_link_up) {
4667                         netif_carrier_on(tp->dev);
4668                 } else {
4669                         netif_carrier_off(tp->dev);
4670                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4671                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4672                 }
4673
4674                 tg3_link_report(tp);
4675                 return true;
4676         }
4677
4678         return false;
4679 }
4680
4681 static void tg3_clear_mac_status(struct tg3 *tp)
4682 {
4683         tw32(MAC_EVENT, 0);
4684
4685         tw32_f(MAC_STATUS,
4686                MAC_STATUS_SYNC_CHANGED |
4687                MAC_STATUS_CFG_CHANGED |
4688                MAC_STATUS_MI_COMPLETION |
4689                MAC_STATUS_LNKSTATE_CHANGED);
4690         udelay(40);
4691 }
4692
4693 static void tg3_setup_eee(struct tg3 *tp)
4694 {
4695         u32 val;
4696
4697         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4698               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4699         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4700                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4701
4702         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4703
4704         tw32_f(TG3_CPMU_EEE_CTRL,
4705                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4706
4707         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4708               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4709               TG3_CPMU_EEEMD_LPI_IN_RX |
4710               TG3_CPMU_EEEMD_EEE_ENABLE;
4711
4712         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4713                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4714
4715         if (tg3_flag(tp, ENABLE_APE))
4716                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4717
4718         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4719
4720         tw32_f(TG3_CPMU_EEE_DBTMR1,
4721                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4722                (tp->eee.tx_lpi_timer & 0xffff));
4723
4724         tw32_f(TG3_CPMU_EEE_DBTMR2,
4725                TG3_CPMU_DBTMR2_APE_TX_2047US |
4726                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4727 }
4728
4729 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4730 {
4731         bool current_link_up;
4732         u32 bmsr, val;
4733         u32 lcl_adv, rmt_adv;
4734         u16 current_speed;
4735         u8 current_duplex;
4736         int i, err;
4737
4738         tg3_clear_mac_status(tp);
4739
4740         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4741                 tw32_f(MAC_MI_MODE,
4742                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4743                 udelay(80);
4744         }
4745
4746         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4747
4748         /* Some third-party PHYs need to be reset on link going
4749          * down.
4750          */
4751         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4752              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4753              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4754             tp->link_up) {
4755                 tg3_readphy(tp, MII_BMSR, &bmsr);
4756                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4757                     !(bmsr & BMSR_LSTATUS))
4758                         force_reset = true;
4759         }
4760         if (force_reset)
4761                 tg3_phy_reset(tp);
4762
4763         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4764                 tg3_readphy(tp, MII_BMSR, &bmsr);
4765                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4766                     !tg3_flag(tp, INIT_COMPLETE))
4767                         bmsr = 0;
4768
4769                 if (!(bmsr & BMSR_LSTATUS)) {
4770                         err = tg3_init_5401phy_dsp(tp);
4771                         if (err)
4772                                 return err;
4773
4774                         tg3_readphy(tp, MII_BMSR, &bmsr);
4775                         for (i = 0; i < 1000; i++) {
4776                                 udelay(10);
4777                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4778                                     (bmsr & BMSR_LSTATUS)) {
4779                                         udelay(40);
4780                                         break;
4781                                 }
4782                         }
4783
4784                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4785                             TG3_PHY_REV_BCM5401_B0 &&
4786                             !(bmsr & BMSR_LSTATUS) &&
4787                             tp->link_config.active_speed == SPEED_1000) {
4788                                 err = tg3_phy_reset(tp);
4789                                 if (!err)
4790                                         err = tg3_init_5401phy_dsp(tp);
4791                                 if (err)
4792                                         return err;
4793                         }
4794                 }
4795         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4796                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4797                 /* 5701 {A0,B0} CRC bug workaround */
4798                 tg3_writephy(tp, 0x15, 0x0a75);
4799                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4800                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4801                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4802         }
4803
4804         /* Clear pending interrupts... */
4805         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4806         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4807
4808         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4809                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4810         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4811                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4812
4813         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4814             tg3_asic_rev(tp) == ASIC_REV_5701) {
4815                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4816                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4817                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4818                 else
4819                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4820         }
4821
4822         current_link_up = false;
4823         current_speed = SPEED_UNKNOWN;
4824         current_duplex = DUPLEX_UNKNOWN;
4825         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4826         tp->link_config.rmt_adv = 0;
4827
4828         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4829                 err = tg3_phy_auxctl_read(tp,
4830                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4831                                           &val);
4832                 if (!err && !(val & (1 << 10))) {
4833                         tg3_phy_auxctl_write(tp,
4834                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4835                                              val | (1 << 10));
4836                         goto relink;
4837                 }
4838         }
4839
4840         bmsr = 0;
4841         for (i = 0; i < 100; i++) {
4842                 tg3_readphy(tp, MII_BMSR, &bmsr);
4843                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4844                     (bmsr & BMSR_LSTATUS))
4845                         break;
4846                 udelay(40);
4847         }
4848
4849         if (bmsr & BMSR_LSTATUS) {
4850                 u32 aux_stat, bmcr;
4851
4852                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4853                 for (i = 0; i < 2000; i++) {
4854                         udelay(10);
4855                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4856                             aux_stat)
4857                                 break;
4858                 }
4859
4860                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4861                                              &current_speed,
4862                                              &current_duplex);
4863
4864                 bmcr = 0;
4865                 for (i = 0; i < 200; i++) {
4866                         tg3_readphy(tp, MII_BMCR, &bmcr);
4867                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4868                                 continue;
4869                         if (bmcr && bmcr != 0x7fff)
4870                                 break;
4871                         udelay(10);
4872                 }
4873
4874                 lcl_adv = 0;
4875                 rmt_adv = 0;
4876
4877                 tp->link_config.active_speed = current_speed;
4878                 tp->link_config.active_duplex = current_duplex;
4879
4880                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4881                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4882
4883                         if ((bmcr & BMCR_ANENABLE) &&
4884                             eee_config_ok &&
4885                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4886                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4887                                 current_link_up = true;
4888
4889                         /* EEE settings changes take effect only after a phy
4890                          * reset.  If we have skipped a reset due to Link Flap
4891                          * Avoidance being enabled, do it now.
4892                          */
4893                         if (!eee_config_ok &&
4894                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4895                             !force_reset) {
4896                                 tg3_setup_eee(tp);
4897                                 tg3_phy_reset(tp);
4898                         }
4899                 } else {
4900                         if (!(bmcr & BMCR_ANENABLE) &&
4901                             tp->link_config.speed == current_speed &&
4902                             tp->link_config.duplex == current_duplex) {
4903                                 current_link_up = true;
4904                         }
4905                 }
4906
4907                 if (current_link_up &&
4908                     tp->link_config.active_duplex == DUPLEX_FULL) {
4909                         u32 reg, bit;
4910
4911                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4912                                 reg = MII_TG3_FET_GEN_STAT;
4913                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4914                         } else {
4915                                 reg = MII_TG3_EXT_STAT;
4916                                 bit = MII_TG3_EXT_STAT_MDIX;
4917                         }
4918
4919                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4920                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4921
4922                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4923                 }
4924         }
4925
4926 relink:
4927         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4928                 tg3_phy_copper_begin(tp);
4929
4930                 if (tg3_flag(tp, ROBOSWITCH)) {
4931                         current_link_up = true;
4932                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4933                         current_speed = SPEED_1000;
4934                         current_duplex = DUPLEX_FULL;
4935                         tp->link_config.active_speed = current_speed;
4936                         tp->link_config.active_duplex = current_duplex;
4937                 }
4938
4939                 tg3_readphy(tp, MII_BMSR, &bmsr);
4940                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4941                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4942                         current_link_up = true;
4943         }
4944
4945         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4946         if (current_link_up) {
4947                 if (tp->link_config.active_speed == SPEED_100 ||
4948                     tp->link_config.active_speed == SPEED_10)
4949                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4950                 else
4951                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4952         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4953                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4954         else
4955                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4956
4957         /* In order for the 5750 core in BCM4785 chip to work properly
4958          * in RGMII mode, the Led Control Register must be set up.
4959          */
4960         if (tg3_flag(tp, RGMII_MODE)) {
4961                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4962                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4963
4964                 if (tp->link_config.active_speed == SPEED_10)
4965                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4966                 else if (tp->link_config.active_speed == SPEED_100)
4967                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4968                                      LED_CTRL_100MBPS_ON);
4969                 else if (tp->link_config.active_speed == SPEED_1000)
4970                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4971                                      LED_CTRL_1000MBPS_ON);
4972
4973                 tw32(MAC_LED_CTRL, led_ctrl);
4974                 udelay(40);
4975         }
4976
4977         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4978         if (tp->link_config.active_duplex == DUPLEX_HALF)
4979                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4980
4981         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4982                 if (current_link_up &&
4983                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4984                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4985                 else
4986                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4987         }
4988
4989         /* ??? Without this setting Netgear GA302T PHY does not
4990          * ??? send/receive packets...
4991          */
4992         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4993             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4994                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4995                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4996                 udelay(80);
4997         }
4998
4999         tw32_f(MAC_MODE, tp->mac_mode);
5000         udelay(40);
5001
5002         tg3_phy_eee_adjust(tp, current_link_up);
5003
5004         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5005                 /* Polled via timer. */
5006                 tw32_f(MAC_EVENT, 0);
5007         } else {
5008                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5009         }
5010         udelay(40);
5011
5012         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5013             current_link_up &&
5014             tp->link_config.active_speed == SPEED_1000 &&
5015             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5016                 udelay(120);
5017                 tw32_f(MAC_STATUS,
5018                      (MAC_STATUS_SYNC_CHANGED |
5019                       MAC_STATUS_CFG_CHANGED));
5020                 udelay(40);
5021                 tg3_write_mem(tp,
5022                               NIC_SRAM_FIRMWARE_MBOX,
5023                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5024         }
5025
5026         /* Prevent send BD corruption. */
5027         if (tg3_flag(tp, CLKREQ_BUG)) {
5028                 if (tp->link_config.active_speed == SPEED_100 ||
5029                     tp->link_config.active_speed == SPEED_10)
5030                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5031                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5032                 else
5033                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5034                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5035         }
5036
5037         tg3_test_and_report_link_chg(tp, current_link_up);
5038
5039         return 0;
5040 }
5041
5042 struct tg3_fiber_aneginfo {
5043         int state;
5044 #define ANEG_STATE_UNKNOWN              0
5045 #define ANEG_STATE_AN_ENABLE            1
5046 #define ANEG_STATE_RESTART_INIT         2
5047 #define ANEG_STATE_RESTART              3
5048 #define ANEG_STATE_DISABLE_LINK_OK      4
5049 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5050 #define ANEG_STATE_ABILITY_DETECT       6
5051 #define ANEG_STATE_ACK_DETECT_INIT      7
5052 #define ANEG_STATE_ACK_DETECT           8
5053 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5054 #define ANEG_STATE_COMPLETE_ACK         10
5055 #define ANEG_STATE_IDLE_DETECT_INIT     11
5056 #define ANEG_STATE_IDLE_DETECT          12
5057 #define ANEG_STATE_LINK_OK              13
5058 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5059 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5060
5061         u32 flags;
5062 #define MR_AN_ENABLE            0x00000001
5063 #define MR_RESTART_AN           0x00000002
5064 #define MR_AN_COMPLETE          0x00000004
5065 #define MR_PAGE_RX              0x00000008
5066 #define MR_NP_LOADED            0x00000010
5067 #define MR_TOGGLE_TX            0x00000020
5068 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5069 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5070 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5071 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5072 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5073 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5074 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5075 #define MR_TOGGLE_RX            0x00002000
5076 #define MR_NP_RX                0x00004000
5077
5078 #define MR_LINK_OK              0x80000000
5079
5080         unsigned long link_time, cur_time;
5081
5082         u32 ability_match_cfg;
5083         int ability_match_count;
5084
5085         char ability_match, idle_match, ack_match;
5086
5087         u32 txconfig, rxconfig;
5088 #define ANEG_CFG_NP             0x00000080
5089 #define ANEG_CFG_ACK            0x00000040
5090 #define ANEG_CFG_RF2            0x00000020
5091 #define ANEG_CFG_RF1            0x00000010
5092 #define ANEG_CFG_PS2            0x00000001
5093 #define ANEG_CFG_PS1            0x00008000
5094 #define ANEG_CFG_HD             0x00004000
5095 #define ANEG_CFG_FD             0x00002000
5096 #define ANEG_CFG_INVAL          0x00001f06
5097
5098 };
5099 #define ANEG_OK         0
5100 #define ANEG_DONE       1
5101 #define ANEG_TIMER_ENAB 2
5102 #define ANEG_FAILED     -1
5103
5104 #define ANEG_STATE_SETTLE_TIME  10000
5105
5106 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5107                                    struct tg3_fiber_aneginfo *ap)
5108 {
5109         u16 flowctrl;
5110         unsigned long delta;
5111         u32 rx_cfg_reg;
5112         int ret;
5113
5114         if (ap->state == ANEG_STATE_UNKNOWN) {
5115                 ap->rxconfig = 0;
5116                 ap->link_time = 0;
5117                 ap->cur_time = 0;
5118                 ap->ability_match_cfg = 0;
5119                 ap->ability_match_count = 0;
5120                 ap->ability_match = 0;
5121                 ap->idle_match = 0;
5122                 ap->ack_match = 0;
5123         }
5124         ap->cur_time++;
5125
5126         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5127                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5128
5129                 if (rx_cfg_reg != ap->ability_match_cfg) {
5130                         ap->ability_match_cfg = rx_cfg_reg;
5131                         ap->ability_match = 0;
5132                         ap->ability_match_count = 0;
5133                 } else {
5134                         if (++ap->ability_match_count > 1) {
5135                                 ap->ability_match = 1;
5136                                 ap->ability_match_cfg = rx_cfg_reg;
5137                         }
5138                 }
5139                 if (rx_cfg_reg & ANEG_CFG_ACK)
5140                         ap->ack_match = 1;
5141                 else
5142                         ap->ack_match = 0;
5143
5144                 ap->idle_match = 0;
5145         } else {
5146                 ap->idle_match = 1;
5147                 ap->ability_match_cfg = 0;
5148                 ap->ability_match_count = 0;
5149                 ap->ability_match = 0;
5150                 ap->ack_match = 0;
5151
5152                 rx_cfg_reg = 0;
5153         }
5154
5155         ap->rxconfig = rx_cfg_reg;
5156         ret = ANEG_OK;
5157
5158         switch (ap->state) {
5159         case ANEG_STATE_UNKNOWN:
5160                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5161                         ap->state = ANEG_STATE_AN_ENABLE;
5162
5163                 /* fallthru */
5164         case ANEG_STATE_AN_ENABLE:
5165                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5166                 if (ap->flags & MR_AN_ENABLE) {
5167                         ap->link_time = 0;
5168                         ap->cur_time = 0;
5169                         ap->ability_match_cfg = 0;
5170                         ap->ability_match_count = 0;
5171                         ap->ability_match = 0;
5172                         ap->idle_match = 0;
5173                         ap->ack_match = 0;
5174
5175                         ap->state = ANEG_STATE_RESTART_INIT;
5176                 } else {
5177                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5178                 }
5179                 break;
5180
5181         case ANEG_STATE_RESTART_INIT:
5182                 ap->link_time = ap->cur_time;
5183                 ap->flags &= ~(MR_NP_LOADED);
5184                 ap->txconfig = 0;
5185                 tw32(MAC_TX_AUTO_NEG, 0);
5186                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5187                 tw32_f(MAC_MODE, tp->mac_mode);
5188                 udelay(40);
5189
5190                 ret = ANEG_TIMER_ENAB;
5191                 ap->state = ANEG_STATE_RESTART;
5192
5193                 /* fallthru */
5194         case ANEG_STATE_RESTART:
5195                 delta = ap->cur_time - ap->link_time;
5196                 if (delta > ANEG_STATE_SETTLE_TIME)
5197                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5198                 else
5199                         ret = ANEG_TIMER_ENAB;
5200                 break;
5201
5202         case ANEG_STATE_DISABLE_LINK_OK:
5203                 ret = ANEG_DONE;
5204                 break;
5205
5206         case ANEG_STATE_ABILITY_DETECT_INIT:
5207                 ap->flags &= ~(MR_TOGGLE_TX);
5208                 ap->txconfig = ANEG_CFG_FD;
5209                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5210                 if (flowctrl & ADVERTISE_1000XPAUSE)
5211                         ap->txconfig |= ANEG_CFG_PS1;
5212                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5213                         ap->txconfig |= ANEG_CFG_PS2;
5214                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5215                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5216                 tw32_f(MAC_MODE, tp->mac_mode);
5217                 udelay(40);
5218
5219                 ap->state = ANEG_STATE_ABILITY_DETECT;
5220                 break;
5221
5222         case ANEG_STATE_ABILITY_DETECT:
5223                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5224                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5225                 break;
5226
5227         case ANEG_STATE_ACK_DETECT_INIT:
5228                 ap->txconfig |= ANEG_CFG_ACK;
5229                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5230                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231                 tw32_f(MAC_MODE, tp->mac_mode);
5232                 udelay(40);
5233
5234                 ap->state = ANEG_STATE_ACK_DETECT;
5235
5236                 /* fallthru */
5237         case ANEG_STATE_ACK_DETECT:
5238                 if (ap->ack_match != 0) {
5239                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5240                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5241                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5242                         } else {
5243                                 ap->state = ANEG_STATE_AN_ENABLE;
5244                         }
5245                 } else if (ap->ability_match != 0 &&
5246                            ap->rxconfig == 0) {
5247                         ap->state = ANEG_STATE_AN_ENABLE;
5248                 }
5249                 break;
5250
5251         case ANEG_STATE_COMPLETE_ACK_INIT:
5252                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5253                         ret = ANEG_FAILED;
5254                         break;
5255                 }
5256                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5257                                MR_LP_ADV_HALF_DUPLEX |
5258                                MR_LP_ADV_SYM_PAUSE |
5259                                MR_LP_ADV_ASYM_PAUSE |
5260                                MR_LP_ADV_REMOTE_FAULT1 |
5261                                MR_LP_ADV_REMOTE_FAULT2 |
5262                                MR_LP_ADV_NEXT_PAGE |
5263                                MR_TOGGLE_RX |
5264                                MR_NP_RX);
5265                 if (ap->rxconfig & ANEG_CFG_FD)
5266                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5267                 if (ap->rxconfig & ANEG_CFG_HD)
5268                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5269                 if (ap->rxconfig & ANEG_CFG_PS1)
5270                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5271                 if (ap->rxconfig & ANEG_CFG_PS2)
5272                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5273                 if (ap->rxconfig & ANEG_CFG_RF1)
5274                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5275                 if (ap->rxconfig & ANEG_CFG_RF2)
5276                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5277                 if (ap->rxconfig & ANEG_CFG_NP)
5278                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5279
5280                 ap->link_time = ap->cur_time;
5281
5282                 ap->flags ^= (MR_TOGGLE_TX);
5283                 if (ap->rxconfig & 0x0008)
5284                         ap->flags |= MR_TOGGLE_RX;
5285                 if (ap->rxconfig & ANEG_CFG_NP)
5286                         ap->flags |= MR_NP_RX;
5287                 ap->flags |= MR_PAGE_RX;
5288
5289                 ap->state = ANEG_STATE_COMPLETE_ACK;
5290                 ret = ANEG_TIMER_ENAB;
5291                 break;
5292
5293         case ANEG_STATE_COMPLETE_ACK:
5294                 if (ap->ability_match != 0 &&
5295                     ap->rxconfig == 0) {
5296                         ap->state = ANEG_STATE_AN_ENABLE;
5297                         break;
5298                 }
5299                 delta = ap->cur_time - ap->link_time;
5300                 if (delta > ANEG_STATE_SETTLE_TIME) {
5301                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5302                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5303                         } else {
5304                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5305                                     !(ap->flags & MR_NP_RX)) {
5306                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5307                                 } else {
5308                                         ret = ANEG_FAILED;
5309                                 }
5310                         }
5311                 }
5312                 break;
5313
5314         case ANEG_STATE_IDLE_DETECT_INIT:
5315                 ap->link_time = ap->cur_time;
5316                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5317                 tw32_f(MAC_MODE, tp->mac_mode);
5318                 udelay(40);
5319
5320                 ap->state = ANEG_STATE_IDLE_DETECT;
5321                 ret = ANEG_TIMER_ENAB;
5322                 break;
5323
5324         case ANEG_STATE_IDLE_DETECT:
5325                 if (ap->ability_match != 0 &&
5326                     ap->rxconfig == 0) {
5327                         ap->state = ANEG_STATE_AN_ENABLE;
5328                         break;
5329                 }
5330                 delta = ap->cur_time - ap->link_time;
5331                 if (delta > ANEG_STATE_SETTLE_TIME) {
5332                         /* XXX another gem from the Broadcom driver :( */
5333                         ap->state = ANEG_STATE_LINK_OK;
5334                 }
5335                 break;
5336
5337         case ANEG_STATE_LINK_OK:
5338                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5339                 ret = ANEG_DONE;
5340                 break;
5341
5342         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5343                 /* ??? unimplemented */
5344                 break;
5345
5346         case ANEG_STATE_NEXT_PAGE_WAIT:
5347                 /* ??? unimplemented */
5348                 break;
5349
5350         default:
5351                 ret = ANEG_FAILED;
5352                 break;
5353         }
5354
5355         return ret;
5356 }
5357
5358 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5359 {
5360         int res = 0;
5361         struct tg3_fiber_aneginfo aninfo;
5362         int status = ANEG_FAILED;
5363         unsigned int tick;
5364         u32 tmp;
5365
5366         tw32_f(MAC_TX_AUTO_NEG, 0);
5367
5368         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5369         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5370         udelay(40);
5371
5372         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5373         udelay(40);
5374
5375         memset(&aninfo, 0, sizeof(aninfo));
5376         aninfo.flags |= MR_AN_ENABLE;
5377         aninfo.state = ANEG_STATE_UNKNOWN;
5378         aninfo.cur_time = 0;
5379         tick = 0;
5380         while (++tick < 195000) {
5381                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5382                 if (status == ANEG_DONE || status == ANEG_FAILED)
5383                         break;
5384
5385                 udelay(1);
5386         }
5387
5388         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5389         tw32_f(MAC_MODE, tp->mac_mode);
5390         udelay(40);
5391
5392         *txflags = aninfo.txconfig;
5393         *rxflags = aninfo.flags;
5394
5395         if (status == ANEG_DONE &&
5396             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5397                              MR_LP_ADV_FULL_DUPLEX)))
5398                 res = 1;
5399
5400         return res;
5401 }
5402
5403 static void tg3_init_bcm8002(struct tg3 *tp)
5404 {
5405         u32 mac_status = tr32(MAC_STATUS);
5406         int i;
5407
5408         /* Reset when initting first time or we have a link. */
5409         if (tg3_flag(tp, INIT_COMPLETE) &&
5410             !(mac_status & MAC_STATUS_PCS_SYNCED))
5411                 return;
5412
5413         /* Set PLL lock range. */
5414         tg3_writephy(tp, 0x16, 0x8007);
5415
5416         /* SW reset */
5417         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5418
5419         /* Wait for reset to complete. */
5420         /* XXX schedule_timeout() ... */
5421         for (i = 0; i < 500; i++)
5422                 udelay(10);
5423
5424         /* Config mode; select PMA/Ch 1 regs. */
5425         tg3_writephy(tp, 0x10, 0x8411);
5426
5427         /* Enable auto-lock and comdet, select txclk for tx. */
5428         tg3_writephy(tp, 0x11, 0x0a10);
5429
5430         tg3_writephy(tp, 0x18, 0x00a0);
5431         tg3_writephy(tp, 0x16, 0x41ff);
5432
5433         /* Assert and deassert POR. */
5434         tg3_writephy(tp, 0x13, 0x0400);
5435         udelay(40);
5436         tg3_writephy(tp, 0x13, 0x0000);
5437
5438         tg3_writephy(tp, 0x11, 0x0a50);
5439         udelay(40);
5440         tg3_writephy(tp, 0x11, 0x0a10);
5441
5442         /* Wait for signal to stabilize */
5443         /* XXX schedule_timeout() ... */
5444         for (i = 0; i < 15000; i++)
5445                 udelay(10);
5446
5447         /* Deselect the channel register so we can read the PHYID
5448          * later.
5449          */
5450         tg3_writephy(tp, 0x10, 0x8011);
5451 }
5452
5453 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5454 {
5455         u16 flowctrl;
5456         bool current_link_up;
5457         u32 sg_dig_ctrl, sg_dig_status;
5458         u32 serdes_cfg, expected_sg_dig_ctrl;
5459         int workaround, port_a;
5460
5461         serdes_cfg = 0;
5462         expected_sg_dig_ctrl = 0;
5463         workaround = 0;
5464         port_a = 1;
5465         current_link_up = false;
5466
5467         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5468             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5469                 workaround = 1;
5470                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5471                         port_a = 0;
5472
5473                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5474                 /* preserve bits 20-23 for voltage regulator */
5475                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5476         }
5477
5478         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5479
5480         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5481                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5482                         if (workaround) {
5483                                 u32 val = serdes_cfg;
5484
5485                                 if (port_a)
5486                                         val |= 0xc010000;
5487                                 else
5488                                         val |= 0x4010000;
5489                                 tw32_f(MAC_SERDES_CFG, val);
5490                         }
5491
5492                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5493                 }
5494                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5495                         tg3_setup_flow_control(tp, 0, 0);
5496                         current_link_up = true;
5497                 }
5498                 goto out;
5499         }
5500
5501         /* Want auto-negotiation.  */
5502         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5503
5504         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5505         if (flowctrl & ADVERTISE_1000XPAUSE)
5506                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5507         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5508                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5509
5510         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5511                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5512                     tp->serdes_counter &&
5513                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5514                                     MAC_STATUS_RCVD_CFG)) ==
5515                      MAC_STATUS_PCS_SYNCED)) {
5516                         tp->serdes_counter--;
5517                         current_link_up = true;
5518                         goto out;
5519                 }
5520 restart_autoneg:
5521                 if (workaround)
5522                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5523                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5524                 udelay(5);
5525                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5526
5527                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5528                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5529         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5530                                  MAC_STATUS_SIGNAL_DET)) {
5531                 sg_dig_status = tr32(SG_DIG_STATUS);
5532                 mac_status = tr32(MAC_STATUS);
5533
5534                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5535                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5536                         u32 local_adv = 0, remote_adv = 0;
5537
5538                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5539                                 local_adv |= ADVERTISE_1000XPAUSE;
5540                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5541                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5542
5543                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5544                                 remote_adv |= LPA_1000XPAUSE;
5545                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5546                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5547
5548                         tp->link_config.rmt_adv =
5549                                            mii_adv_to_ethtool_adv_x(remote_adv);
5550
5551                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5552                         current_link_up = true;
5553                         tp->serdes_counter = 0;
5554                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5555                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5556                         if (tp->serdes_counter)
5557                                 tp->serdes_counter--;
5558                         else {
5559                                 if (workaround) {
5560                                         u32 val = serdes_cfg;
5561
5562                                         if (port_a)
5563                                                 val |= 0xc010000;
5564                                         else
5565                                                 val |= 0x4010000;
5566
5567                                         tw32_f(MAC_SERDES_CFG, val);
5568                                 }
5569
5570                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5571                                 udelay(40);
5572
5573                                 /* Link parallel detection - link is up */
5574                                 /* only if we have PCS_SYNC and not */
5575                                 /* receiving config code words */
5576                                 mac_status = tr32(MAC_STATUS);
5577                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5578                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5579                                         tg3_setup_flow_control(tp, 0, 0);
5580                                         current_link_up = true;
5581                                         tp->phy_flags |=
5582                                                 TG3_PHYFLG_PARALLEL_DETECT;
5583                                         tp->serdes_counter =
5584                                                 SERDES_PARALLEL_DET_TIMEOUT;
5585                                 } else
5586                                         goto restart_autoneg;
5587                         }
5588                 }
5589         } else {
5590                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5591                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5592         }
5593
5594 out:
5595         return current_link_up;
5596 }
5597
5598 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5599 {
5600         bool current_link_up = false;
5601
5602         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5603                 goto out;
5604
5605         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5606                 u32 txflags, rxflags;
5607                 int i;
5608
5609                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5610                         u32 local_adv = 0, remote_adv = 0;
5611
5612                         if (txflags & ANEG_CFG_PS1)
5613                                 local_adv |= ADVERTISE_1000XPAUSE;
5614                         if (txflags & ANEG_CFG_PS2)
5615                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5616
5617                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5618                                 remote_adv |= LPA_1000XPAUSE;
5619                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5620                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5621
5622                         tp->link_config.rmt_adv =
5623                                            mii_adv_to_ethtool_adv_x(remote_adv);
5624
5625                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5626
5627                         current_link_up = true;
5628                 }
5629                 for (i = 0; i < 30; i++) {
5630                         udelay(20);
5631                         tw32_f(MAC_STATUS,
5632                                (MAC_STATUS_SYNC_CHANGED |
5633                                 MAC_STATUS_CFG_CHANGED));
5634                         udelay(40);
5635                         if ((tr32(MAC_STATUS) &
5636                              (MAC_STATUS_SYNC_CHANGED |
5637                               MAC_STATUS_CFG_CHANGED)) == 0)
5638                                 break;
5639                 }
5640
5641                 mac_status = tr32(MAC_STATUS);
5642                 if (!current_link_up &&
5643                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5644                     !(mac_status & MAC_STATUS_RCVD_CFG))
5645                         current_link_up = true;
5646         } else {
5647                 tg3_setup_flow_control(tp, 0, 0);
5648
5649                 /* Forcing 1000FD link up. */
5650                 current_link_up = true;
5651
5652                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5653                 udelay(40);
5654
5655                 tw32_f(MAC_MODE, tp->mac_mode);
5656                 udelay(40);
5657         }
5658
5659 out:
5660         return current_link_up;
5661 }
5662
5663 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5664 {
5665         u32 orig_pause_cfg;
5666         u16 orig_active_speed;
5667         u8 orig_active_duplex;
5668         u32 mac_status;
5669         bool current_link_up;
5670         int i;
5671
5672         orig_pause_cfg = tp->link_config.active_flowctrl;
5673         orig_active_speed = tp->link_config.active_speed;
5674         orig_active_duplex = tp->link_config.active_duplex;
5675
5676         if (!tg3_flag(tp, HW_AUTONEG) &&
5677             tp->link_up &&
5678             tg3_flag(tp, INIT_COMPLETE)) {
5679                 mac_status = tr32(MAC_STATUS);
5680                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5681                                MAC_STATUS_SIGNAL_DET |
5682                                MAC_STATUS_CFG_CHANGED |
5683                                MAC_STATUS_RCVD_CFG);
5684                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5685                                    MAC_STATUS_SIGNAL_DET)) {
5686                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5687                                             MAC_STATUS_CFG_CHANGED));
5688                         return 0;
5689                 }
5690         }
5691
5692         tw32_f(MAC_TX_AUTO_NEG, 0);
5693
5694         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5695         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5696         tw32_f(MAC_MODE, tp->mac_mode);
5697         udelay(40);
5698
5699         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5700                 tg3_init_bcm8002(tp);
5701
5702         /* Enable link change event even when serdes polling.  */
5703         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5704         udelay(40);
5705
5706         current_link_up = false;
5707         tp->link_config.rmt_adv = 0;
5708         mac_status = tr32(MAC_STATUS);
5709
5710         if (tg3_flag(tp, HW_AUTONEG))
5711                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5712         else
5713                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5714
5715         tp->napi[0].hw_status->status =
5716                 (SD_STATUS_UPDATED |
5717                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5718
5719         for (i = 0; i < 100; i++) {
5720                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5721                                     MAC_STATUS_CFG_CHANGED));
5722                 udelay(5);
5723                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5724                                          MAC_STATUS_CFG_CHANGED |
5725                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5726                         break;
5727         }
5728
5729         mac_status = tr32(MAC_STATUS);
5730         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5731                 current_link_up = false;
5732                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5733                     tp->serdes_counter == 0) {
5734                         tw32_f(MAC_MODE, (tp->mac_mode |
5735                                           MAC_MODE_SEND_CONFIGS));
5736                         udelay(1);
5737                         tw32_f(MAC_MODE, tp->mac_mode);
5738                 }
5739         }
5740
5741         if (current_link_up) {
5742                 tp->link_config.active_speed = SPEED_1000;
5743                 tp->link_config.active_duplex = DUPLEX_FULL;
5744                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5745                                     LED_CTRL_LNKLED_OVERRIDE |
5746                                     LED_CTRL_1000MBPS_ON));
5747         } else {
5748                 tp->link_config.active_speed = SPEED_UNKNOWN;
5749                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5750                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5751                                     LED_CTRL_LNKLED_OVERRIDE |
5752                                     LED_CTRL_TRAFFIC_OVERRIDE));
5753         }
5754
5755         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5756                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5757                 if (orig_pause_cfg != now_pause_cfg ||
5758                     orig_active_speed != tp->link_config.active_speed ||
5759                     orig_active_duplex != tp->link_config.active_duplex)
5760                         tg3_link_report(tp);
5761         }
5762
5763         return 0;
5764 }
5765
5766 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5767 {
5768         int err = 0;
5769         u32 bmsr, bmcr;
5770         u16 current_speed = SPEED_UNKNOWN;
5771         u8 current_duplex = DUPLEX_UNKNOWN;
5772         bool current_link_up = false;
5773         u32 local_adv, remote_adv, sgsr;
5774
5775         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5776              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5777              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5778              (sgsr & SERDES_TG3_SGMII_MODE)) {
5779
5780                 if (force_reset)
5781                         tg3_phy_reset(tp);
5782
5783                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5784
5785                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5786                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5787                 } else {
5788                         current_link_up = true;
5789                         if (sgsr & SERDES_TG3_SPEED_1000) {
5790                                 current_speed = SPEED_1000;
5791                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5792                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5793                                 current_speed = SPEED_100;
5794                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5795                         } else {
5796                                 current_speed = SPEED_10;
5797                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5798                         }
5799
5800                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5801                                 current_duplex = DUPLEX_FULL;
5802                         else
5803                                 current_duplex = DUPLEX_HALF;
5804                 }
5805
5806                 tw32_f(MAC_MODE, tp->mac_mode);
5807                 udelay(40);
5808
5809                 tg3_clear_mac_status(tp);
5810
5811                 goto fiber_setup_done;
5812         }
5813
5814         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5815         tw32_f(MAC_MODE, tp->mac_mode);
5816         udelay(40);
5817
5818         tg3_clear_mac_status(tp);
5819
5820         if (force_reset)
5821                 tg3_phy_reset(tp);
5822
5823         tp->link_config.rmt_adv = 0;
5824
5825         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5826         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5827         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5828                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5829                         bmsr |= BMSR_LSTATUS;
5830                 else
5831                         bmsr &= ~BMSR_LSTATUS;
5832         }
5833
5834         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5835
5836         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5837             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5838                 /* do nothing, just check for link up at the end */
5839         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5840                 u32 adv, newadv;
5841
5842                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5843                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5844                                  ADVERTISE_1000XPAUSE |
5845                                  ADVERTISE_1000XPSE_ASYM |
5846                                  ADVERTISE_SLCT);
5847
5848                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5849                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5850
5851                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5852                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5853                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5854                         tg3_writephy(tp, MII_BMCR, bmcr);
5855
5856                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5857                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5858                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5859
5860                         return err;
5861                 }
5862         } else {
5863                 u32 new_bmcr;
5864
5865                 bmcr &= ~BMCR_SPEED1000;
5866                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5867
5868                 if (tp->link_config.duplex == DUPLEX_FULL)
5869                         new_bmcr |= BMCR_FULLDPLX;
5870
5871                 if (new_bmcr != bmcr) {
5872                         /* BMCR_SPEED1000 is a reserved bit that needs
5873                          * to be set on write.
5874                          */
5875                         new_bmcr |= BMCR_SPEED1000;
5876
5877                         /* Force a linkdown */
5878                         if (tp->link_up) {
5879                                 u32 adv;
5880
5881                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5882                                 adv &= ~(ADVERTISE_1000XFULL |
5883                                          ADVERTISE_1000XHALF |
5884                                          ADVERTISE_SLCT);
5885                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5886                                 tg3_writephy(tp, MII_BMCR, bmcr |
5887                                                            BMCR_ANRESTART |
5888                                                            BMCR_ANENABLE);
5889                                 udelay(10);
5890                                 tg3_carrier_off(tp);
5891                         }
5892                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5893                         bmcr = new_bmcr;
5894                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5895                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5896                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5897                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5898                                         bmsr |= BMSR_LSTATUS;
5899                                 else
5900                                         bmsr &= ~BMSR_LSTATUS;
5901                         }
5902                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5903                 }
5904         }
5905
5906         if (bmsr & BMSR_LSTATUS) {
5907                 current_speed = SPEED_1000;
5908                 current_link_up = true;
5909                 if (bmcr & BMCR_FULLDPLX)
5910                         current_duplex = DUPLEX_FULL;
5911                 else
5912                         current_duplex = DUPLEX_HALF;
5913
5914                 local_adv = 0;
5915                 remote_adv = 0;
5916
5917                 if (bmcr & BMCR_ANENABLE) {
5918                         u32 common;
5919
5920                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5921                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5922                         common = local_adv & remote_adv;
5923                         if (common & (ADVERTISE_1000XHALF |
5924                                       ADVERTISE_1000XFULL)) {
5925                                 if (common & ADVERTISE_1000XFULL)
5926                                         current_duplex = DUPLEX_FULL;
5927                                 else
5928                                         current_duplex = DUPLEX_HALF;
5929
5930                                 tp->link_config.rmt_adv =
5931                                            mii_adv_to_ethtool_adv_x(remote_adv);
5932                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5933                                 /* Link is up via parallel detect */
5934                         } else {
5935                                 current_link_up = false;
5936                         }
5937                 }
5938         }
5939
5940 fiber_setup_done:
5941         if (current_link_up && current_duplex == DUPLEX_FULL)
5942                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5943
5944         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5945         if (tp->link_config.active_duplex == DUPLEX_HALF)
5946                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5947
5948         tw32_f(MAC_MODE, tp->mac_mode);
5949         udelay(40);
5950
5951         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5952
5953         tp->link_config.active_speed = current_speed;
5954         tp->link_config.active_duplex = current_duplex;
5955
5956         tg3_test_and_report_link_chg(tp, current_link_up);
5957         return err;
5958 }
5959
5960 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5961 {
5962         if (tp->serdes_counter) {
5963                 /* Give autoneg time to complete. */
5964                 tp->serdes_counter--;
5965                 return;
5966         }
5967
5968         if (!tp->link_up &&
5969             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5970                 u32 bmcr;
5971
5972                 tg3_readphy(tp, MII_BMCR, &bmcr);
5973                 if (bmcr & BMCR_ANENABLE) {
5974                         u32 phy1, phy2;
5975
5976                         /* Select shadow register 0x1f */
5977                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5978                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5979
5980                         /* Select expansion interrupt status register */
5981                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5982                                          MII_TG3_DSP_EXP1_INT_STAT);
5983                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5984                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5985
5986                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5987                                 /* We have signal detect and not receiving
5988                                  * config code words, link is up by parallel
5989                                  * detection.
5990                                  */
5991
5992                                 bmcr &= ~BMCR_ANENABLE;
5993                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5994                                 tg3_writephy(tp, MII_BMCR, bmcr);
5995                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5996                         }
5997                 }
5998         } else if (tp->link_up &&
5999                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6000                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6001                 u32 phy2;
6002
6003                 /* Select expansion interrupt status register */
6004                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6005                                  MII_TG3_DSP_EXP1_INT_STAT);
6006                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6007                 if (phy2 & 0x20) {
6008                         u32 bmcr;
6009
6010                         /* Config code words received, turn on autoneg. */
6011                         tg3_readphy(tp, MII_BMCR, &bmcr);
6012                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6013
6014                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6015
6016                 }
6017         }
6018 }
6019
6020 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6021 {
6022         u32 val;
6023         int err;
6024
6025         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6026                 err = tg3_setup_fiber_phy(tp, force_reset);
6027         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6028                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6029         else
6030                 err = tg3_setup_copper_phy(tp, force_reset);
6031
6032         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6033                 u32 scale;
6034
6035                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6036                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6037                         scale = 65;
6038                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6039                         scale = 6;
6040                 else
6041                         scale = 12;
6042
6043                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6044                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6045                 tw32(GRC_MISC_CFG, val);
6046         }
6047
6048         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6049               (6 << TX_LENGTHS_IPG_SHIFT);
6050         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6051             tg3_asic_rev(tp) == ASIC_REV_5762)
6052                 val |= tr32(MAC_TX_LENGTHS) &
6053                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6054                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6055
6056         if (tp->link_config.active_speed == SPEED_1000 &&
6057             tp->link_config.active_duplex == DUPLEX_HALF)
6058                 tw32(MAC_TX_LENGTHS, val |
6059                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6060         else
6061                 tw32(MAC_TX_LENGTHS, val |
6062                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6063
6064         if (!tg3_flag(tp, 5705_PLUS)) {
6065                 if (tp->link_up) {
6066                         tw32(HOSTCC_STAT_COAL_TICKS,
6067                              tp->coal.stats_block_coalesce_usecs);
6068                 } else {
6069                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6070                 }
6071         }
6072
6073         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6074                 val = tr32(PCIE_PWR_MGMT_THRESH);
6075                 if (!tp->link_up)
6076                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6077                               tp->pwrmgmt_thresh;
6078                 else
6079                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6080                 tw32(PCIE_PWR_MGMT_THRESH, val);
6081         }
6082
6083         return err;
6084 }
6085
6086 /* tp->lock must be held */
6087 static u64 tg3_refclk_read(struct tg3 *tp)
6088 {
6089         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6090         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6091 }
6092
6093 /* tp->lock must be held */
6094 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6095 {
6096         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6097
6098         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6099         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6100         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6101         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6102 }
6103
6104 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6105 static inline void tg3_full_unlock(struct tg3 *tp);
6106 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6107 {
6108         struct tg3 *tp = netdev_priv(dev);
6109
6110         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6111                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6112                                 SOF_TIMESTAMPING_SOFTWARE;
6113
6114         if (tg3_flag(tp, PTP_CAPABLE)) {
6115                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6116                                         SOF_TIMESTAMPING_RX_HARDWARE |
6117                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6118         }
6119
6120         if (tp->ptp_clock)
6121                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6122         else
6123                 info->phc_index = -1;
6124
6125         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6126
6127         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6128                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6129                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6130                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6131         return 0;
6132 }
6133
6134 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6135 {
6136         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6137         bool neg_adj = false;
6138         u32 correction = 0;
6139
6140         if (ppb < 0) {
6141                 neg_adj = true;
6142                 ppb = -ppb;
6143         }
6144
6145         /* Frequency adjustment is performed using hardware with a 24 bit
6146          * accumulator and a programmable correction value. On each clk, the
6147          * correction value gets added to the accumulator and when it
6148          * overflows, the time counter is incremented/decremented.
6149          *
6150          * So conversion from ppb to correction value is
6151          *              ppb * (1 << 24) / 1000000000
6152          */
6153         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6154                      TG3_EAV_REF_CLK_CORRECT_MASK;
6155
6156         tg3_full_lock(tp, 0);
6157
6158         if (correction)
6159                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6160                      TG3_EAV_REF_CLK_CORRECT_EN |
6161                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6162         else
6163                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6164
6165         tg3_full_unlock(tp);
6166
6167         return 0;
6168 }
6169
6170 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6171 {
6172         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6173
6174         tg3_full_lock(tp, 0);
6175         tp->ptp_adjust += delta;
6176         tg3_full_unlock(tp);
6177
6178         return 0;
6179 }
6180
6181 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6182 {
6183         u64 ns;
6184         u32 remainder;
6185         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186
6187         tg3_full_lock(tp, 0);
6188         ns = tg3_refclk_read(tp);
6189         ns += tp->ptp_adjust;
6190         tg3_full_unlock(tp);
6191
6192         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6193         ts->tv_nsec = remainder;
6194
6195         return 0;
6196 }
6197
6198 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6199                            const struct timespec *ts)
6200 {
6201         u64 ns;
6202         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6203
6204         ns = timespec_to_ns(ts);
6205
6206         tg3_full_lock(tp, 0);
6207         tg3_refclk_write(tp, ns);
6208         tp->ptp_adjust = 0;
6209         tg3_full_unlock(tp);
6210
6211         return 0;
6212 }
6213
6214 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6215                           struct ptp_clock_request *rq, int on)
6216 {
6217         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6218         u32 clock_ctl;
6219         int rval = 0;
6220
6221         switch (rq->type) {
6222         case PTP_CLK_REQ_PEROUT:
6223                 if (rq->perout.index != 0)
6224                         return -EINVAL;
6225
6226                 tg3_full_lock(tp, 0);
6227                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6228                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6229
6230                 if (on) {
6231                         u64 nsec;
6232
6233                         nsec = rq->perout.start.sec * 1000000000ULL +
6234                                rq->perout.start.nsec;
6235
6236                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6237                                 netdev_warn(tp->dev,
6238                                             "Device supports only a one-shot timesync output, period must be 0\n");
6239                                 rval = -EINVAL;
6240                                 goto err_out;
6241                         }
6242
6243                         if (nsec & (1ULL << 63)) {
6244                                 netdev_warn(tp->dev,
6245                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6246                                 rval = -EINVAL;
6247                                 goto err_out;
6248                         }
6249
6250                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6251                         tw32(TG3_EAV_WATCHDOG0_MSB,
6252                              TG3_EAV_WATCHDOG0_EN |
6253                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6254
6255                         tw32(TG3_EAV_REF_CLCK_CTL,
6256                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6257                 } else {
6258                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6259                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6260                 }
6261
6262 err_out:
6263                 tg3_full_unlock(tp);
6264                 return rval;
6265
6266         default:
6267                 break;
6268         }
6269
6270         return -EOPNOTSUPP;
6271 }
6272
6273 static const struct ptp_clock_info tg3_ptp_caps = {
6274         .owner          = THIS_MODULE,
6275         .name           = "tg3 clock",
6276         .max_adj        = 250000000,
6277         .n_alarm        = 0,
6278         .n_ext_ts       = 0,
6279         .n_per_out      = 1,
6280         .pps            = 0,
6281         .adjfreq        = tg3_ptp_adjfreq,
6282         .adjtime        = tg3_ptp_adjtime,
6283         .gettime        = tg3_ptp_gettime,
6284         .settime        = tg3_ptp_settime,
6285         .enable         = tg3_ptp_enable,
6286 };
6287
6288 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6289                                      struct skb_shared_hwtstamps *timestamp)
6290 {
6291         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6292         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6293                                            tp->ptp_adjust);
6294 }
6295
6296 /* tp->lock must be held */
6297 static void tg3_ptp_init(struct tg3 *tp)
6298 {
6299         if (!tg3_flag(tp, PTP_CAPABLE))
6300                 return;
6301
6302         /* Initialize the hardware clock to the system time. */
6303         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6304         tp->ptp_adjust = 0;
6305         tp->ptp_info = tg3_ptp_caps;
6306 }
6307
6308 /* tp->lock must be held */
6309 static void tg3_ptp_resume(struct tg3 *tp)
6310 {
6311         if (!tg3_flag(tp, PTP_CAPABLE))
6312                 return;
6313
6314         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6315         tp->ptp_adjust = 0;
6316 }
6317
6318 static void tg3_ptp_fini(struct tg3 *tp)
6319 {
6320         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6321                 return;
6322
6323         ptp_clock_unregister(tp->ptp_clock);
6324         tp->ptp_clock = NULL;
6325         tp->ptp_adjust = 0;
6326 }
6327
6328 static inline int tg3_irq_sync(struct tg3 *tp)
6329 {
6330         return tp->irq_sync;
6331 }
6332
6333 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6334 {
6335         int i;
6336
6337         dst = (u32 *)((u8 *)dst + off);
6338         for (i = 0; i < len; i += sizeof(u32))
6339                 *dst++ = tr32(off + i);
6340 }
6341
6342 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6343 {
6344         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6345         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6346         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6347         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6348         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6349         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6350         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6351         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6352         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6353         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6354         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6355         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6356         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6357         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6358         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6359         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6360         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6361         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6362         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6363
6364         if (tg3_flag(tp, SUPPORT_MSIX))
6365                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6366
6367         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6368         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6369         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6370         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6371         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6372         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6373         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6374         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6375
6376         if (!tg3_flag(tp, 5705_PLUS)) {
6377                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6378                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6379                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6380         }
6381
6382         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6383         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6384         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6385         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6386         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6387
6388         if (tg3_flag(tp, NVRAM))
6389                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6390 }
6391
6392 static void tg3_dump_state(struct tg3 *tp)
6393 {
6394         int i;
6395         u32 *regs;
6396
6397         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6398         if (!regs)
6399                 return;
6400
6401         if (tg3_flag(tp, PCI_EXPRESS)) {
6402                 /* Read up to but not including private PCI registers */
6403                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6404                         regs[i / sizeof(u32)] = tr32(i);
6405         } else
6406                 tg3_dump_legacy_regs(tp, regs);
6407
6408         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6409                 if (!regs[i + 0] && !regs[i + 1] &&
6410                     !regs[i + 2] && !regs[i + 3])
6411                         continue;
6412
6413                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6414                            i * 4,
6415                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6416         }
6417
6418         kfree(regs);
6419
6420         for (i = 0; i < tp->irq_cnt; i++) {
6421                 struct tg3_napi *tnapi = &tp->napi[i];
6422
6423                 /* SW status block */
6424                 netdev_err(tp->dev,
6425                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6426                            i,
6427                            tnapi->hw_status->status,
6428                            tnapi->hw_status->status_tag,
6429                            tnapi->hw_status->rx_jumbo_consumer,
6430                            tnapi->hw_status->rx_consumer,
6431                            tnapi->hw_status->rx_mini_consumer,
6432                            tnapi->hw_status->idx[0].rx_producer,
6433                            tnapi->hw_status->idx[0].tx_consumer);
6434
6435                 netdev_err(tp->dev,
6436                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6437                            i,
6438                            tnapi->last_tag, tnapi->last_irq_tag,
6439                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6440                            tnapi->rx_rcb_ptr,
6441                            tnapi->prodring.rx_std_prod_idx,
6442                            tnapi->prodring.rx_std_cons_idx,
6443                            tnapi->prodring.rx_jmb_prod_idx,
6444                            tnapi->prodring.rx_jmb_cons_idx);
6445         }
6446 }
6447
6448 /* This is called whenever we suspect that the system chipset is re-
6449  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6450  * is bogus tx completions. We try to recover by setting the
6451  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6452  * in the workqueue.
6453  */
6454 static void tg3_tx_recover(struct tg3 *tp)
6455 {
6456         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6457                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6458
6459         netdev_warn(tp->dev,
6460                     "The system may be re-ordering memory-mapped I/O "
6461                     "cycles to the network device, attempting to recover. "
6462                     "Please report the problem to the driver maintainer "
6463                     "and include system chipset information.\n");
6464
6465         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6466 }
6467
6468 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6469 {
6470         /* Tell compiler to fetch tx indices from memory. */
6471         barrier();
6472         return tnapi->tx_pending -
6473                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6474 }
6475
6476 /* Tigon3 never reports partial packet sends.  So we do not
6477  * need special logic to handle SKBs that have not had all
6478  * of their frags sent yet, like SunGEM does.
6479  */
6480 static void tg3_tx(struct tg3_napi *tnapi)
6481 {
6482         struct tg3 *tp = tnapi->tp;
6483         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6484         u32 sw_idx = tnapi->tx_cons;
6485         struct netdev_queue *txq;
6486         int index = tnapi - tp->napi;
6487         unsigned int pkts_compl = 0, bytes_compl = 0;
6488
6489         if (tg3_flag(tp, ENABLE_TSS))
6490                 index--;
6491
6492         txq = netdev_get_tx_queue(tp->dev, index);
6493
6494         while (sw_idx != hw_idx) {
6495                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6496                 struct sk_buff *skb = ri->skb;
6497                 int i, tx_bug = 0;
6498
6499                 if (unlikely(skb == NULL)) {
6500                         tg3_tx_recover(tp);
6501                         return;
6502                 }
6503
6504                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6505                         struct skb_shared_hwtstamps timestamp;
6506                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6507                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6508
6509                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6510
6511                         skb_tstamp_tx(skb, &timestamp);
6512                 }
6513
6514                 pci_unmap_single(tp->pdev,
6515                                  dma_unmap_addr(ri, mapping),
6516                                  skb_headlen(skb),
6517                                  PCI_DMA_TODEVICE);
6518
6519                 ri->skb = NULL;
6520
6521                 while (ri->fragmented) {
6522                         ri->fragmented = false;
6523                         sw_idx = NEXT_TX(sw_idx);
6524                         ri = &tnapi->tx_buffers[sw_idx];
6525                 }
6526
6527                 sw_idx = NEXT_TX(sw_idx);
6528
6529                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6530                         ri = &tnapi->tx_buffers[sw_idx];
6531                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6532                                 tx_bug = 1;
6533
6534                         pci_unmap_page(tp->pdev,
6535                                        dma_unmap_addr(ri, mapping),
6536                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6537                                        PCI_DMA_TODEVICE);
6538
6539                         while (ri->fragmented) {
6540                                 ri->fragmented = false;
6541                                 sw_idx = NEXT_TX(sw_idx);
6542                                 ri = &tnapi->tx_buffers[sw_idx];
6543                         }
6544
6545                         sw_idx = NEXT_TX(sw_idx);
6546                 }
6547
6548                 pkts_compl++;
6549                 bytes_compl += skb->len;
6550
6551                 dev_kfree_skb(skb);
6552
6553                 if (unlikely(tx_bug)) {
6554                         tg3_tx_recover(tp);
6555                         return;
6556                 }
6557         }
6558
6559         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6560
6561         tnapi->tx_cons = sw_idx;
6562
6563         /* Need to make the tx_cons update visible to tg3_start_xmit()
6564          * before checking for netif_queue_stopped().  Without the
6565          * memory barrier, there is a small possibility that tg3_start_xmit()
6566          * will miss it and cause the queue to be stopped forever.
6567          */
6568         smp_mb();
6569
6570         if (unlikely(netif_tx_queue_stopped(txq) &&
6571                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6572                 __netif_tx_lock(txq, smp_processor_id());
6573                 if (netif_tx_queue_stopped(txq) &&
6574                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6575                         netif_tx_wake_queue(txq);
6576                 __netif_tx_unlock(txq);
6577         }
6578 }
6579
6580 static void tg3_frag_free(bool is_frag, void *data)
6581 {
6582         if (is_frag)
6583                 put_page(virt_to_head_page(data));
6584         else
6585                 kfree(data);
6586 }
6587
6588 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6589 {
6590         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6591                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6592
6593         if (!ri->data)
6594                 return;
6595
6596         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6597                          map_sz, PCI_DMA_FROMDEVICE);
6598         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6599         ri->data = NULL;
6600 }
6601
6602
6603 /* Returns size of skb allocated or < 0 on error.
6604  *
6605  * We only need to fill in the address because the other members
6606  * of the RX descriptor are invariant, see tg3_init_rings.
6607  *
6608  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6609  * posting buffers we only dirty the first cache line of the RX
6610  * descriptor (containing the address).  Whereas for the RX status
6611  * buffers the cpu only reads the last cacheline of the RX descriptor
6612  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6613  */
6614 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6615                              u32 opaque_key, u32 dest_idx_unmasked,
6616                              unsigned int *frag_size)
6617 {
6618         struct tg3_rx_buffer_desc *desc;
6619         struct ring_info *map;
6620         u8 *data;
6621         dma_addr_t mapping;
6622         int skb_size, data_size, dest_idx;
6623
6624         switch (opaque_key) {
6625         case RXD_OPAQUE_RING_STD:
6626                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6627                 desc = &tpr->rx_std[dest_idx];
6628                 map = &tpr->rx_std_buffers[dest_idx];
6629                 data_size = tp->rx_pkt_map_sz;
6630                 break;
6631
6632         case RXD_OPAQUE_RING_JUMBO:
6633                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6634                 desc = &tpr->rx_jmb[dest_idx].std;
6635                 map = &tpr->rx_jmb_buffers[dest_idx];
6636                 data_size = TG3_RX_JMB_MAP_SZ;
6637                 break;
6638
6639         default:
6640                 return -EINVAL;
6641         }
6642
6643         /* Do not overwrite any of the map or rp information
6644          * until we are sure we can commit to a new buffer.
6645          *
6646          * Callers depend upon this behavior and assume that
6647          * we leave everything unchanged if we fail.
6648          */
6649         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6650                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6651         if (skb_size <= PAGE_SIZE) {
6652                 data = netdev_alloc_frag(skb_size);
6653                 *frag_size = skb_size;
6654         } else {
6655                 data = kmalloc(skb_size, GFP_ATOMIC);
6656                 *frag_size = 0;
6657         }
6658         if (!data)
6659                 return -ENOMEM;
6660
6661         mapping = pci_map_single(tp->pdev,
6662                                  data + TG3_RX_OFFSET(tp),
6663                                  data_size,
6664                                  PCI_DMA_FROMDEVICE);
6665         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6666                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6667                 return -EIO;
6668         }
6669
6670         map->data = data;
6671         dma_unmap_addr_set(map, mapping, mapping);
6672
6673         desc->addr_hi = ((u64)mapping >> 32);
6674         desc->addr_lo = ((u64)mapping & 0xffffffff);
6675
6676         return data_size;
6677 }
6678
6679 /* We only need to move over in the address because the other
6680  * members of the RX descriptor are invariant.  See notes above
6681  * tg3_alloc_rx_data for full details.
6682  */
6683 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6684                            struct tg3_rx_prodring_set *dpr,
6685                            u32 opaque_key, int src_idx,
6686                            u32 dest_idx_unmasked)
6687 {
6688         struct tg3 *tp = tnapi->tp;
6689         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6690         struct ring_info *src_map, *dest_map;
6691         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6692         int dest_idx;
6693
6694         switch (opaque_key) {
6695         case RXD_OPAQUE_RING_STD:
6696                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6697                 dest_desc = &dpr->rx_std[dest_idx];
6698                 dest_map = &dpr->rx_std_buffers[dest_idx];
6699                 src_desc = &spr->rx_std[src_idx];
6700                 src_map = &spr->rx_std_buffers[src_idx];
6701                 break;
6702
6703         case RXD_OPAQUE_RING_JUMBO:
6704                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6705                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6706                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6707                 src_desc = &spr->rx_jmb[src_idx].std;
6708                 src_map = &spr->rx_jmb_buffers[src_idx];
6709                 break;
6710
6711         default:
6712                 return;
6713         }
6714
6715         dest_map->data = src_map->data;
6716         dma_unmap_addr_set(dest_map, mapping,
6717                            dma_unmap_addr(src_map, mapping));
6718         dest_desc->addr_hi = src_desc->addr_hi;
6719         dest_desc->addr_lo = src_desc->addr_lo;
6720
6721         /* Ensure that the update to the skb happens after the physical
6722          * addresses have been transferred to the new BD location.
6723          */
6724         smp_wmb();
6725
6726         src_map->data = NULL;
6727 }
6728
6729 /* The RX ring scheme is composed of multiple rings which post fresh
6730  * buffers to the chip, and one special ring the chip uses to report
6731  * status back to the host.
6732  *
6733  * The special ring reports the status of received packets to the
6734  * host.  The chip does not write into the original descriptor the
6735  * RX buffer was obtained from.  The chip simply takes the original
6736  * descriptor as provided by the host, updates the status and length
6737  * field, then writes this into the next status ring entry.
6738  *
6739  * Each ring the host uses to post buffers to the chip is described
6740  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6741  * it is first placed into the on-chip ram.  When the packet's length
6742  * is known, it walks down the TG3_BDINFO entries to select the ring.
6743  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6744  * which is within the range of the new packet's length is chosen.
6745  *
6746  * The "separate ring for rx status" scheme may sound queer, but it makes
6747  * sense from a cache coherency perspective.  If only the host writes
6748  * to the buffer post rings, and only the chip writes to the rx status
6749  * rings, then cache lines never move beyond shared-modified state.
6750  * If both the host and chip were to write into the same ring, cache line
6751  * eviction could occur since both entities want it in an exclusive state.
6752  */
6753 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6754 {
6755         struct tg3 *tp = tnapi->tp;
6756         u32 work_mask, rx_std_posted = 0;
6757         u32 std_prod_idx, jmb_prod_idx;
6758         u32 sw_idx = tnapi->rx_rcb_ptr;
6759         u16 hw_idx;
6760         int received;
6761         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6762
6763         hw_idx = *(tnapi->rx_rcb_prod_idx);
6764         /*
6765          * We need to order the read of hw_idx and the read of
6766          * the opaque cookie.
6767          */
6768         rmb();
6769         work_mask = 0;
6770         received = 0;
6771         std_prod_idx = tpr->rx_std_prod_idx;
6772         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6773         while (sw_idx != hw_idx && budget > 0) {
6774                 struct ring_info *ri;
6775                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6776                 unsigned int len;
6777                 struct sk_buff *skb;
6778                 dma_addr_t dma_addr;
6779                 u32 opaque_key, desc_idx, *post_ptr;
6780                 u8 *data;
6781                 u64 tstamp = 0;
6782
6783                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6784                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6785                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6786                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6787                         dma_addr = dma_unmap_addr(ri, mapping);
6788                         data = ri->data;
6789                         post_ptr = &std_prod_idx;
6790                         rx_std_posted++;
6791                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6792                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6793                         dma_addr = dma_unmap_addr(ri, mapping);
6794                         data = ri->data;
6795                         post_ptr = &jmb_prod_idx;
6796                 } else
6797                         goto next_pkt_nopost;
6798
6799                 work_mask |= opaque_key;
6800
6801                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6802                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6803                 drop_it:
6804                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6805                                        desc_idx, *post_ptr);
6806                 drop_it_no_recycle:
6807                         /* Other statistics kept track of by card. */
6808                         tp->rx_dropped++;
6809                         goto next_pkt;
6810                 }
6811
6812                 prefetch(data + TG3_RX_OFFSET(tp));
6813                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6814                       ETH_FCS_LEN;
6815
6816                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6817                      RXD_FLAG_PTPSTAT_PTPV1 ||
6818                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6819                      RXD_FLAG_PTPSTAT_PTPV2) {
6820                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6821                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6822                 }
6823
6824                 if (len > TG3_RX_COPY_THRESH(tp)) {
6825                         int skb_size;
6826                         unsigned int frag_size;
6827
6828                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6829                                                     *post_ptr, &frag_size);
6830                         if (skb_size < 0)
6831                                 goto drop_it;
6832
6833                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6834                                          PCI_DMA_FROMDEVICE);
6835
6836                         skb = build_skb(data, frag_size);
6837                         if (!skb) {
6838                                 tg3_frag_free(frag_size != 0, data);
6839                                 goto drop_it_no_recycle;
6840                         }
6841                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6842                         /* Ensure that the update to the data happens
6843                          * after the usage of the old DMA mapping.
6844                          */
6845                         smp_wmb();
6846
6847                         ri->data = NULL;
6848
6849                 } else {
6850                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6851                                        desc_idx, *post_ptr);
6852
6853                         skb = netdev_alloc_skb(tp->dev,
6854                                                len + TG3_RAW_IP_ALIGN);
6855                         if (skb == NULL)
6856                                 goto drop_it_no_recycle;
6857
6858                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6859                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6860                         memcpy(skb->data,
6861                                data + TG3_RX_OFFSET(tp),
6862                                len);
6863                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6864                 }
6865
6866                 skb_put(skb, len);
6867                 if (tstamp)
6868                         tg3_hwclock_to_timestamp(tp, tstamp,
6869                                                  skb_hwtstamps(skb));
6870
6871                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6872                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6873                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6874                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6875                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6876                 else
6877                         skb_checksum_none_assert(skb);
6878
6879                 skb->protocol = eth_type_trans(skb, tp->dev);
6880
6881                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6882                     skb->protocol != htons(ETH_P_8021Q)) {
6883                         dev_kfree_skb(skb);
6884                         goto drop_it_no_recycle;
6885                 }
6886
6887                 if (desc->type_flags & RXD_FLAG_VLAN &&
6888                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6889                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6890                                                desc->err_vlan & RXD_VLAN_MASK);
6891
6892                 napi_gro_receive(&tnapi->napi, skb);
6893
6894                 received++;
6895                 budget--;
6896
6897 next_pkt:
6898                 (*post_ptr)++;
6899
6900                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6901                         tpr->rx_std_prod_idx = std_prod_idx &
6902                                                tp->rx_std_ring_mask;
6903                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6904                                      tpr->rx_std_prod_idx);
6905                         work_mask &= ~RXD_OPAQUE_RING_STD;
6906                         rx_std_posted = 0;
6907                 }
6908 next_pkt_nopost:
6909                 sw_idx++;
6910                 sw_idx &= tp->rx_ret_ring_mask;
6911
6912                 /* Refresh hw_idx to see if there is new work */
6913                 if (sw_idx == hw_idx) {
6914                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6915                         rmb();
6916                 }
6917         }
6918
6919         /* ACK the status ring. */
6920         tnapi->rx_rcb_ptr = sw_idx;
6921         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6922
6923         /* Refill RX ring(s). */
6924         if (!tg3_flag(tp, ENABLE_RSS)) {
6925                 /* Sync BD data before updating mailbox */
6926                 wmb();
6927
6928                 if (work_mask & RXD_OPAQUE_RING_STD) {
6929                         tpr->rx_std_prod_idx = std_prod_idx &
6930                                                tp->rx_std_ring_mask;
6931                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6932                                      tpr->rx_std_prod_idx);
6933                 }
6934                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6935                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6936                                                tp->rx_jmb_ring_mask;
6937                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6938                                      tpr->rx_jmb_prod_idx);
6939                 }
6940                 mmiowb();
6941         } else if (work_mask) {
6942                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6943                  * updated before the producer indices can be updated.
6944                  */
6945                 smp_wmb();
6946
6947                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6948                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6949
6950                 if (tnapi != &tp->napi[1]) {
6951                         tp->rx_refill = true;
6952                         napi_schedule(&tp->napi[1].napi);
6953                 }
6954         }
6955
6956         return received;
6957 }
6958
6959 static void tg3_poll_link(struct tg3 *tp)
6960 {
6961         /* handle link change and other phy events */
6962         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6963                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6964
6965                 if (sblk->status & SD_STATUS_LINK_CHG) {
6966                         sblk->status = SD_STATUS_UPDATED |
6967                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6968                         spin_lock(&tp->lock);
6969                         if (tg3_flag(tp, USE_PHYLIB)) {
6970                                 tw32_f(MAC_STATUS,
6971                                      (MAC_STATUS_SYNC_CHANGED |
6972                                       MAC_STATUS_CFG_CHANGED |
6973                                       MAC_STATUS_MI_COMPLETION |
6974                                       MAC_STATUS_LNKSTATE_CHANGED));
6975                                 udelay(40);
6976                         } else
6977                                 tg3_setup_phy(tp, false);
6978                         spin_unlock(&tp->lock);
6979                 }
6980         }
6981 }
6982
6983 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6984                                 struct tg3_rx_prodring_set *dpr,
6985                                 struct tg3_rx_prodring_set *spr)
6986 {
6987         u32 si, di, cpycnt, src_prod_idx;
6988         int i, err = 0;
6989
6990         while (1) {
6991                 src_prod_idx = spr->rx_std_prod_idx;
6992
6993                 /* Make sure updates to the rx_std_buffers[] entries and the
6994                  * standard producer index are seen in the correct order.
6995                  */
6996                 smp_rmb();
6997
6998                 if (spr->rx_std_cons_idx == src_prod_idx)
6999                         break;
7000
7001                 if (spr->rx_std_cons_idx < src_prod_idx)
7002                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7003                 else
7004                         cpycnt = tp->rx_std_ring_mask + 1 -
7005                                  spr->rx_std_cons_idx;
7006
7007                 cpycnt = min(cpycnt,
7008                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7009
7010                 si = spr->rx_std_cons_idx;
7011                 di = dpr->rx_std_prod_idx;
7012
7013                 for (i = di; i < di + cpycnt; i++) {
7014                         if (dpr->rx_std_buffers[i].data) {
7015                                 cpycnt = i - di;
7016                                 err = -ENOSPC;
7017                                 break;
7018                         }
7019                 }
7020
7021                 if (!cpycnt)
7022                         break;
7023
7024                 /* Ensure that updates to the rx_std_buffers ring and the
7025                  * shadowed hardware producer ring from tg3_recycle_skb() are
7026                  * ordered correctly WRT the skb check above.
7027                  */
7028                 smp_rmb();
7029
7030                 memcpy(&dpr->rx_std_buffers[di],
7031                        &spr->rx_std_buffers[si],
7032                        cpycnt * sizeof(struct ring_info));
7033
7034                 for (i = 0; i < cpycnt; i++, di++, si++) {
7035                         struct tg3_rx_buffer_desc *sbd, *dbd;
7036                         sbd = &spr->rx_std[si];
7037                         dbd = &dpr->rx_std[di];
7038                         dbd->addr_hi = sbd->addr_hi;
7039                         dbd->addr_lo = sbd->addr_lo;
7040                 }
7041
7042                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7043                                        tp->rx_std_ring_mask;
7044                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7045                                        tp->rx_std_ring_mask;
7046         }
7047
7048         while (1) {
7049                 src_prod_idx = spr->rx_jmb_prod_idx;
7050
7051                 /* Make sure updates to the rx_jmb_buffers[] entries and
7052                  * the jumbo producer index are seen in the correct order.
7053                  */
7054                 smp_rmb();
7055
7056                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7057                         break;
7058
7059                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7060                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7061                 else
7062                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7063                                  spr->rx_jmb_cons_idx;
7064
7065                 cpycnt = min(cpycnt,
7066                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7067
7068                 si = spr->rx_jmb_cons_idx;
7069                 di = dpr->rx_jmb_prod_idx;
7070
7071                 for (i = di; i < di + cpycnt; i++) {
7072                         if (dpr->rx_jmb_buffers[i].data) {
7073                                 cpycnt = i - di;
7074                                 err = -ENOSPC;
7075                                 break;
7076                         }
7077                 }
7078
7079                 if (!cpycnt)
7080                         break;
7081
7082                 /* Ensure that updates to the rx_jmb_buffers ring and the
7083                  * shadowed hardware producer ring from tg3_recycle_skb() are
7084                  * ordered correctly WRT the skb check above.
7085                  */
7086                 smp_rmb();
7087
7088                 memcpy(&dpr->rx_jmb_buffers[di],
7089                        &spr->rx_jmb_buffers[si],
7090                        cpycnt * sizeof(struct ring_info));
7091
7092                 for (i = 0; i < cpycnt; i++, di++, si++) {
7093                         struct tg3_rx_buffer_desc *sbd, *dbd;
7094                         sbd = &spr->rx_jmb[si].std;
7095                         dbd = &dpr->rx_jmb[di].std;
7096                         dbd->addr_hi = sbd->addr_hi;
7097                         dbd->addr_lo = sbd->addr_lo;
7098                 }
7099
7100                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7101                                        tp->rx_jmb_ring_mask;
7102                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7103                                        tp->rx_jmb_ring_mask;
7104         }
7105
7106         return err;
7107 }
7108
7109 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7110 {
7111         struct tg3 *tp = tnapi->tp;
7112
7113         /* run TX completion thread */
7114         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7115                 tg3_tx(tnapi);
7116                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7117                         return work_done;
7118         }
7119
7120         if (!tnapi->rx_rcb_prod_idx)
7121                 return work_done;
7122
7123         /* run RX thread, within the bounds set by NAPI.
7124          * All RX "locking" is done by ensuring outside
7125          * code synchronizes with tg3->napi.poll()
7126          */
7127         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7128                 work_done += tg3_rx(tnapi, budget - work_done);
7129
7130         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7131                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7132                 int i, err = 0;
7133                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7134                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7135
7136                 tp->rx_refill = false;
7137                 for (i = 1; i <= tp->rxq_cnt; i++)
7138                         err |= tg3_rx_prodring_xfer(tp, dpr,
7139                                                     &tp->napi[i].prodring);
7140
7141                 wmb();
7142
7143                 if (std_prod_idx != dpr->rx_std_prod_idx)
7144                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7145                                      dpr->rx_std_prod_idx);
7146
7147                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7148                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7149                                      dpr->rx_jmb_prod_idx);
7150
7151                 mmiowb();
7152
7153                 if (err)
7154                         tw32_f(HOSTCC_MODE, tp->coal_now);
7155         }
7156
7157         return work_done;
7158 }
7159
7160 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7161 {
7162         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7163                 schedule_work(&tp->reset_task);
7164 }
7165
7166 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7167 {
7168         cancel_work_sync(&tp->reset_task);
7169         tg3_flag_clear(tp, RESET_TASK_PENDING);
7170         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7171 }
7172
7173 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7174 {
7175         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7176         struct tg3 *tp = tnapi->tp;
7177         int work_done = 0;
7178         struct tg3_hw_status *sblk = tnapi->hw_status;
7179
7180         while (1) {
7181                 work_done = tg3_poll_work(tnapi, work_done, budget);
7182
7183                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7184                         goto tx_recovery;
7185
7186                 if (unlikely(work_done >= budget))
7187                         break;
7188
7189                 /* tp->last_tag is used in tg3_int_reenable() below
7190                  * to tell the hw how much work has been processed,
7191                  * so we must read it before checking for more work.
7192                  */
7193                 tnapi->last_tag = sblk->status_tag;
7194                 tnapi->last_irq_tag = tnapi->last_tag;
7195                 rmb();
7196
7197                 /* check for RX/TX work to do */
7198                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7199                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7200
7201                         /* This test here is not race free, but will reduce
7202                          * the number of interrupts by looping again.
7203                          */
7204                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7205                                 continue;
7206
7207                         napi_complete(napi);
7208                         /* Reenable interrupts. */
7209                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7210
7211                         /* This test here is synchronized by napi_schedule()
7212                          * and napi_complete() to close the race condition.
7213                          */
7214                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7215                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7216                                                   HOSTCC_MODE_ENABLE |
7217                                                   tnapi->coal_now);
7218                         }
7219                         mmiowb();
7220                         break;
7221                 }
7222         }
7223
7224         return work_done;
7225
7226 tx_recovery:
7227         /* work_done is guaranteed to be less than budget. */
7228         napi_complete(napi);
7229         tg3_reset_task_schedule(tp);
7230         return work_done;
7231 }
7232
7233 static void tg3_process_error(struct tg3 *tp)
7234 {
7235         u32 val;
7236         bool real_error = false;
7237
7238         if (tg3_flag(tp, ERROR_PROCESSED))
7239                 return;
7240
7241         /* Check Flow Attention register */
7242         val = tr32(HOSTCC_FLOW_ATTN);
7243         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7244                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7245                 real_error = true;
7246         }
7247
7248         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7249                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7250                 real_error = true;
7251         }
7252
7253         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7254                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7255                 real_error = true;
7256         }
7257
7258         if (!real_error)
7259                 return;
7260
7261         tg3_dump_state(tp);
7262
7263         tg3_flag_set(tp, ERROR_PROCESSED);
7264         tg3_reset_task_schedule(tp);
7265 }
7266
7267 static int tg3_poll(struct napi_struct *napi, int budget)
7268 {
7269         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7270         struct tg3 *tp = tnapi->tp;
7271         int work_done = 0;
7272         struct tg3_hw_status *sblk = tnapi->hw_status;
7273
7274         while (1) {
7275                 if (sblk->status & SD_STATUS_ERROR)
7276                         tg3_process_error(tp);
7277
7278                 tg3_poll_link(tp);
7279
7280                 work_done = tg3_poll_work(tnapi, work_done, budget);
7281
7282                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7283                         goto tx_recovery;
7284
7285                 if (unlikely(work_done >= budget))
7286                         break;
7287
7288                 if (tg3_flag(tp, TAGGED_STATUS)) {
7289                         /* tp->last_tag is used in tg3_int_reenable() below
7290                          * to tell the hw how much work has been processed,
7291                          * so we must read it before checking for more work.
7292                          */
7293                         tnapi->last_tag = sblk->status_tag;
7294                         tnapi->last_irq_tag = tnapi->last_tag;
7295                         rmb();
7296                 } else
7297                         sblk->status &= ~SD_STATUS_UPDATED;
7298
7299                 if (likely(!tg3_has_work(tnapi))) {
7300                         napi_complete(napi);
7301                         tg3_int_reenable(tnapi);
7302                         break;
7303                 }
7304         }
7305
7306         return work_done;
7307
7308 tx_recovery:
7309         /* work_done is guaranteed to be less than budget. */
7310         napi_complete(napi);
7311         tg3_reset_task_schedule(tp);
7312         return work_done;
7313 }
7314
7315 static void tg3_napi_disable(struct tg3 *tp)
7316 {
7317         int i;
7318
7319         for (i = tp->irq_cnt - 1; i >= 0; i--)
7320                 napi_disable(&tp->napi[i].napi);
7321 }
7322
7323 static void tg3_napi_enable(struct tg3 *tp)
7324 {
7325         int i;
7326
7327         for (i = 0; i < tp->irq_cnt; i++)
7328                 napi_enable(&tp->napi[i].napi);
7329 }
7330
7331 static void tg3_napi_init(struct tg3 *tp)
7332 {
7333         int i;
7334
7335         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7336         for (i = 1; i < tp->irq_cnt; i++)
7337                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7338 }
7339
7340 static void tg3_napi_fini(struct tg3 *tp)
7341 {
7342         int i;
7343
7344         for (i = 0; i < tp->irq_cnt; i++)
7345                 netif_napi_del(&tp->napi[i].napi);
7346 }
7347
7348 static inline void tg3_netif_stop(struct tg3 *tp)
7349 {
7350         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7351         tg3_napi_disable(tp);
7352         netif_carrier_off(tp->dev);
7353         netif_tx_disable(tp->dev);
7354 }
7355
7356 /* tp->lock must be held */
7357 static inline void tg3_netif_start(struct tg3 *tp)
7358 {
7359         tg3_ptp_resume(tp);
7360
7361         /* NOTE: unconditional netif_tx_wake_all_queues is only
7362          * appropriate so long as all callers are assured to
7363          * have free tx slots (such as after tg3_init_hw)
7364          */
7365         netif_tx_wake_all_queues(tp->dev);
7366
7367         if (tp->link_up)
7368                 netif_carrier_on(tp->dev);
7369
7370         tg3_napi_enable(tp);
7371         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7372         tg3_enable_ints(tp);
7373 }
7374
7375 static void tg3_irq_quiesce(struct tg3 *tp)
7376 {
7377         int i;
7378
7379         BUG_ON(tp->irq_sync);
7380
7381         tp->irq_sync = 1;
7382         smp_mb();
7383
7384         for (i = 0; i < tp->irq_cnt; i++)
7385                 synchronize_irq(tp->napi[i].irq_vec);
7386 }
7387
7388 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7389  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7390  * with as well.  Most of the time, this is not necessary except when
7391  * shutting down the device.
7392  */
7393 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7394 {
7395         spin_lock_bh(&tp->lock);
7396         if (irq_sync)
7397                 tg3_irq_quiesce(tp);
7398 }
7399
7400 static inline void tg3_full_unlock(struct tg3 *tp)
7401 {
7402         spin_unlock_bh(&tp->lock);
7403 }
7404
7405 /* One-shot MSI handler - Chip automatically disables interrupt
7406  * after sending MSI so driver doesn't have to do it.
7407  */
7408 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7409 {
7410         struct tg3_napi *tnapi = dev_id;
7411         struct tg3 *tp = tnapi->tp;
7412
7413         prefetch(tnapi->hw_status);
7414         if (tnapi->rx_rcb)
7415                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7416
7417         if (likely(!tg3_irq_sync(tp)))
7418                 napi_schedule(&tnapi->napi);
7419
7420         return IRQ_HANDLED;
7421 }
7422
7423 /* MSI ISR - No need to check for interrupt sharing and no need to
7424  * flush status block and interrupt mailbox. PCI ordering rules
7425  * guarantee that MSI will arrive after the status block.
7426  */
7427 static irqreturn_t tg3_msi(int irq, void *dev_id)
7428 {
7429         struct tg3_napi *tnapi = dev_id;
7430         struct tg3 *tp = tnapi->tp;
7431
7432         prefetch(tnapi->hw_status);
7433         if (tnapi->rx_rcb)
7434                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7435         /*
7436          * Writing any value to intr-mbox-0 clears PCI INTA# and
7437          * chip-internal interrupt pending events.
7438          * Writing non-zero to intr-mbox-0 additional tells the
7439          * NIC to stop sending us irqs, engaging "in-intr-handler"
7440          * event coalescing.
7441          */
7442         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7443         if (likely(!tg3_irq_sync(tp)))
7444                 napi_schedule(&tnapi->napi);
7445
7446         return IRQ_RETVAL(1);
7447 }
7448
7449 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7450 {
7451         struct tg3_napi *tnapi = dev_id;
7452         struct tg3 *tp = tnapi->tp;
7453         struct tg3_hw_status *sblk = tnapi->hw_status;
7454         unsigned int handled = 1;
7455
7456         /* In INTx mode, it is possible for the interrupt to arrive at
7457          * the CPU before the status block posted prior to the interrupt.
7458          * Reading the PCI State register will confirm whether the
7459          * interrupt is ours and will flush the status block.
7460          */
7461         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7462                 if (tg3_flag(tp, CHIP_RESETTING) ||
7463                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7464                         handled = 0;
7465                         goto out;
7466                 }
7467         }
7468
7469         /*
7470          * Writing any value to intr-mbox-0 clears PCI INTA# and
7471          * chip-internal interrupt pending events.
7472          * Writing non-zero to intr-mbox-0 additional tells the
7473          * NIC to stop sending us irqs, engaging "in-intr-handler"
7474          * event coalescing.
7475          *
7476          * Flush the mailbox to de-assert the IRQ immediately to prevent
7477          * spurious interrupts.  The flush impacts performance but
7478          * excessive spurious interrupts can be worse in some cases.
7479          */
7480         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7481         if (tg3_irq_sync(tp))
7482                 goto out;
7483         sblk->status &= ~SD_STATUS_UPDATED;
7484         if (likely(tg3_has_work(tnapi))) {
7485                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7486                 napi_schedule(&tnapi->napi);
7487         } else {
7488                 /* No work, shared interrupt perhaps?  re-enable
7489                  * interrupts, and flush that PCI write
7490                  */
7491                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7492                                0x00000000);
7493         }
7494 out:
7495         return IRQ_RETVAL(handled);
7496 }
7497
7498 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7499 {
7500         struct tg3_napi *tnapi = dev_id;
7501         struct tg3 *tp = tnapi->tp;
7502         struct tg3_hw_status *sblk = tnapi->hw_status;
7503         unsigned int handled = 1;
7504
7505         /* In INTx mode, it is possible for the interrupt to arrive at
7506          * the CPU before the status block posted prior to the interrupt.
7507          * Reading the PCI State register will confirm whether the
7508          * interrupt is ours and will flush the status block.
7509          */
7510         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7511                 if (tg3_flag(tp, CHIP_RESETTING) ||
7512                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7513                         handled = 0;
7514                         goto out;
7515                 }
7516         }
7517
7518         /*
7519          * writing any value to intr-mbox-0 clears PCI INTA# and
7520          * chip-internal interrupt pending events.
7521          * writing non-zero to intr-mbox-0 additional tells the
7522          * NIC to stop sending us irqs, engaging "in-intr-handler"
7523          * event coalescing.
7524          *
7525          * Flush the mailbox to de-assert the IRQ immediately to prevent
7526          * spurious interrupts.  The flush impacts performance but
7527          * excessive spurious interrupts can be worse in some cases.
7528          */
7529         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7530
7531         /*
7532          * In a shared interrupt configuration, sometimes other devices'
7533          * interrupts will scream.  We record the current status tag here
7534          * so that the above check can report that the screaming interrupts
7535          * are unhandled.  Eventually they will be silenced.
7536          */
7537         tnapi->last_irq_tag = sblk->status_tag;
7538
7539         if (tg3_irq_sync(tp))
7540                 goto out;
7541
7542         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7543
7544         napi_schedule(&tnapi->napi);
7545
7546 out:
7547         return IRQ_RETVAL(handled);
7548 }
7549
7550 /* ISR for interrupt test */
7551 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7552 {
7553         struct tg3_napi *tnapi = dev_id;
7554         struct tg3 *tp = tnapi->tp;
7555         struct tg3_hw_status *sblk = tnapi->hw_status;
7556
7557         if ((sblk->status & SD_STATUS_UPDATED) ||
7558             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7559                 tg3_disable_ints(tp);
7560                 return IRQ_RETVAL(1);
7561         }
7562         return IRQ_RETVAL(0);
7563 }
7564
7565 #ifdef CONFIG_NET_POLL_CONTROLLER
7566 static void tg3_poll_controller(struct net_device *dev)
7567 {
7568         int i;
7569         struct tg3 *tp = netdev_priv(dev);
7570
7571         if (tg3_irq_sync(tp))
7572                 return;
7573
7574         for (i = 0; i < tp->irq_cnt; i++)
7575                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7576 }
7577 #endif
7578
7579 static void tg3_tx_timeout(struct net_device *dev)
7580 {
7581         struct tg3 *tp = netdev_priv(dev);
7582
7583         if (netif_msg_tx_err(tp)) {
7584                 netdev_err(dev, "transmit timed out, resetting\n");
7585                 tg3_dump_state(tp);
7586         }
7587
7588         tg3_reset_task_schedule(tp);
7589 }
7590
7591 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7592 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7593 {
7594         u32 base = (u32) mapping & 0xffffffff;
7595
7596         return (base > 0xffffdcc0) && (base + len + 8 < base);
7597 }
7598
7599 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7600  * of any 4GB boundaries: 4G, 8G, etc
7601  */
7602 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7603                                            u32 len, u32 mss)
7604 {
7605         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7606                 u32 base = (u32) mapping & 0xffffffff;
7607
7608                 return ((base + len + (mss & 0x3fff)) < base);
7609         }
7610         return 0;
7611 }
7612
7613 /* Test for DMA addresses > 40-bit */
7614 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7615                                           int len)
7616 {
7617 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7618         if (tg3_flag(tp, 40BIT_DMA_BUG))
7619                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7620         return 0;
7621 #else
7622         return 0;
7623 #endif
7624 }
7625
7626 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7627                                  dma_addr_t mapping, u32 len, u32 flags,
7628                                  u32 mss, u32 vlan)
7629 {
7630         txbd->addr_hi = ((u64) mapping >> 32);
7631         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7632         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7633         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7634 }
7635
7636 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7637                             dma_addr_t map, u32 len, u32 flags,
7638                             u32 mss, u32 vlan)
7639 {
7640         struct tg3 *tp = tnapi->tp;
7641         bool hwbug = false;
7642
7643         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7644                 hwbug = true;
7645
7646         if (tg3_4g_overflow_test(map, len))
7647                 hwbug = true;
7648
7649         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7650                 hwbug = true;
7651
7652         if (tg3_40bit_overflow_test(tp, map, len))
7653                 hwbug = true;
7654
7655         if (tp->dma_limit) {
7656                 u32 prvidx = *entry;
7657                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7658                 while (len > tp->dma_limit && *budget) {
7659                         u32 frag_len = tp->dma_limit;
7660                         len -= tp->dma_limit;
7661
7662                         /* Avoid the 8byte DMA problem */
7663                         if (len <= 8) {
7664                                 len += tp->dma_limit / 2;
7665                                 frag_len = tp->dma_limit / 2;
7666                         }
7667
7668                         tnapi->tx_buffers[*entry].fragmented = true;
7669
7670                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7671                                       frag_len, tmp_flag, mss, vlan);
7672                         *budget -= 1;
7673                         prvidx = *entry;
7674                         *entry = NEXT_TX(*entry);
7675
7676                         map += frag_len;
7677                 }
7678
7679                 if (len) {
7680                         if (*budget) {
7681                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7682                                               len, flags, mss, vlan);
7683                                 *budget -= 1;
7684                                 *entry = NEXT_TX(*entry);
7685                         } else {
7686                                 hwbug = true;
7687                                 tnapi->tx_buffers[prvidx].fragmented = false;
7688                         }
7689                 }
7690         } else {
7691                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7692                               len, flags, mss, vlan);
7693                 *entry = NEXT_TX(*entry);
7694         }
7695
7696         return hwbug;
7697 }
7698
7699 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7700 {
7701         int i;
7702         struct sk_buff *skb;
7703         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7704
7705         skb = txb->skb;
7706         txb->skb = NULL;
7707
7708         pci_unmap_single(tnapi->tp->pdev,
7709                          dma_unmap_addr(txb, mapping),
7710                          skb_headlen(skb),
7711                          PCI_DMA_TODEVICE);
7712
7713         while (txb->fragmented) {
7714                 txb->fragmented = false;
7715                 entry = NEXT_TX(entry);
7716                 txb = &tnapi->tx_buffers[entry];
7717         }
7718
7719         for (i = 0; i <= last; i++) {
7720                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7721
7722                 entry = NEXT_TX(entry);
7723                 txb = &tnapi->tx_buffers[entry];
7724
7725                 pci_unmap_page(tnapi->tp->pdev,
7726                                dma_unmap_addr(txb, mapping),
7727                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7728
7729                 while (txb->fragmented) {
7730                         txb->fragmented = false;
7731                         entry = NEXT_TX(entry);
7732                         txb = &tnapi->tx_buffers[entry];
7733                 }
7734         }
7735 }
7736
7737 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7738 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7739                                        struct sk_buff **pskb,
7740                                        u32 *entry, u32 *budget,
7741                                        u32 base_flags, u32 mss, u32 vlan)
7742 {
7743         struct tg3 *tp = tnapi->tp;
7744         struct sk_buff *new_skb, *skb = *pskb;
7745         dma_addr_t new_addr = 0;
7746         int ret = 0;
7747
7748         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7749                 new_skb = skb_copy(skb, GFP_ATOMIC);
7750         else {
7751                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7752
7753                 new_skb = skb_copy_expand(skb,
7754                                           skb_headroom(skb) + more_headroom,
7755                                           skb_tailroom(skb), GFP_ATOMIC);
7756         }
7757
7758         if (!new_skb) {
7759                 ret = -1;
7760         } else {
7761                 /* New SKB is guaranteed to be linear. */
7762                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7763                                           PCI_DMA_TODEVICE);
7764                 /* Make sure the mapping succeeded */
7765                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7766                         dev_kfree_skb(new_skb);
7767                         ret = -1;
7768                 } else {
7769                         u32 save_entry = *entry;
7770
7771                         base_flags |= TXD_FLAG_END;
7772
7773                         tnapi->tx_buffers[*entry].skb = new_skb;
7774                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7775                                            mapping, new_addr);
7776
7777                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7778                                             new_skb->len, base_flags,
7779                                             mss, vlan)) {
7780                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7781                                 dev_kfree_skb(new_skb);
7782                                 ret = -1;
7783                         }
7784                 }
7785         }
7786
7787         dev_kfree_skb(skb);
7788         *pskb = new_skb;
7789         return ret;
7790 }
7791
7792 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7793
7794 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7795  * TSO header is greater than 80 bytes.
7796  */
7797 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7798 {
7799         struct sk_buff *segs, *nskb;
7800         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7801
7802         /* Estimate the number of fragments in the worst case */
7803         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7804                 netif_stop_queue(tp->dev);
7805
7806                 /* netif_tx_stop_queue() must be done before checking
7807                  * checking tx index in tg3_tx_avail() below, because in
7808                  * tg3_tx(), we update tx index before checking for
7809                  * netif_tx_queue_stopped().
7810                  */
7811                 smp_mb();
7812                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7813                         return NETDEV_TX_BUSY;
7814
7815                 netif_wake_queue(tp->dev);
7816         }
7817
7818         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7819         if (IS_ERR(segs))
7820                 goto tg3_tso_bug_end;
7821
7822         do {
7823                 nskb = segs;
7824                 segs = segs->next;
7825                 nskb->next = NULL;
7826                 tg3_start_xmit(nskb, tp->dev);
7827         } while (segs);
7828
7829 tg3_tso_bug_end:
7830         dev_kfree_skb(skb);
7831
7832         return NETDEV_TX_OK;
7833 }
7834
7835 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7836  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7837  */
7838 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7839 {
7840         struct tg3 *tp = netdev_priv(dev);
7841         u32 len, entry, base_flags, mss, vlan = 0;
7842         u32 budget;
7843         int i = -1, would_hit_hwbug;
7844         dma_addr_t mapping;
7845         struct tg3_napi *tnapi;
7846         struct netdev_queue *txq;
7847         unsigned int last;
7848
7849         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7850         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7851         if (tg3_flag(tp, ENABLE_TSS))
7852                 tnapi++;
7853
7854         budget = tg3_tx_avail(tnapi);
7855
7856         /* We are running in BH disabled context with netif_tx_lock
7857          * and TX reclaim runs via tp->napi.poll inside of a software
7858          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7859          * no IRQ context deadlocks to worry about either.  Rejoice!
7860          */
7861         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7862                 if (!netif_tx_queue_stopped(txq)) {
7863                         netif_tx_stop_queue(txq);
7864
7865                         /* This is a hard error, log it. */
7866                         netdev_err(dev,
7867                                    "BUG! Tx Ring full when queue awake!\n");
7868                 }
7869                 return NETDEV_TX_BUSY;
7870         }
7871
7872         entry = tnapi->tx_prod;
7873         base_flags = 0;
7874         if (skb->ip_summed == CHECKSUM_PARTIAL)
7875                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7876
7877         mss = skb_shinfo(skb)->gso_size;
7878         if (mss) {
7879                 struct iphdr *iph;
7880                 u32 tcp_opt_len, hdr_len;
7881
7882                 if (skb_header_cloned(skb) &&
7883                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7884                         goto drop;
7885
7886                 iph = ip_hdr(skb);
7887                 tcp_opt_len = tcp_optlen(skb);
7888
7889                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7890
7891                 if (!skb_is_gso_v6(skb)) {
7892                         iph->check = 0;
7893                         iph->tot_len = htons(mss + hdr_len);
7894                 }
7895
7896                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7897                     tg3_flag(tp, TSO_BUG))
7898                         return tg3_tso_bug(tp, skb);
7899
7900                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7901                                TXD_FLAG_CPU_POST_DMA);
7902
7903                 if (tg3_flag(tp, HW_TSO_1) ||
7904                     tg3_flag(tp, HW_TSO_2) ||
7905                     tg3_flag(tp, HW_TSO_3)) {
7906                         tcp_hdr(skb)->check = 0;
7907                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7908                 } else
7909                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7910                                                                  iph->daddr, 0,
7911                                                                  IPPROTO_TCP,
7912                                                                  0);
7913
7914                 if (tg3_flag(tp, HW_TSO_3)) {
7915                         mss |= (hdr_len & 0xc) << 12;
7916                         if (hdr_len & 0x10)
7917                                 base_flags |= 0x00000010;
7918                         base_flags |= (hdr_len & 0x3e0) << 5;
7919                 } else if (tg3_flag(tp, HW_TSO_2))
7920                         mss |= hdr_len << 9;
7921                 else if (tg3_flag(tp, HW_TSO_1) ||
7922                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7923                         if (tcp_opt_len || iph->ihl > 5) {
7924                                 int tsflags;
7925
7926                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7927                                 mss |= (tsflags << 11);
7928                         }
7929                 } else {
7930                         if (tcp_opt_len || iph->ihl > 5) {
7931                                 int tsflags;
7932
7933                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7934                                 base_flags |= tsflags << 12;
7935                         }
7936                 }
7937         }
7938
7939         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7940             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7941                 base_flags |= TXD_FLAG_JMB_PKT;
7942
7943         if (vlan_tx_tag_present(skb)) {
7944                 base_flags |= TXD_FLAG_VLAN;
7945                 vlan = vlan_tx_tag_get(skb);
7946         }
7947
7948         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7949             tg3_flag(tp, TX_TSTAMP_EN)) {
7950                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7951                 base_flags |= TXD_FLAG_HWTSTAMP;
7952         }
7953
7954         len = skb_headlen(skb);
7955
7956         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7957         if (pci_dma_mapping_error(tp->pdev, mapping))
7958                 goto drop;
7959
7960
7961         tnapi->tx_buffers[entry].skb = skb;
7962         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7963
7964         would_hit_hwbug = 0;
7965
7966         if (tg3_flag(tp, 5701_DMA_BUG))
7967                 would_hit_hwbug = 1;
7968
7969         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7970                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7971                             mss, vlan)) {
7972                 would_hit_hwbug = 1;
7973         } else if (skb_shinfo(skb)->nr_frags > 0) {
7974                 u32 tmp_mss = mss;
7975
7976                 if (!tg3_flag(tp, HW_TSO_1) &&
7977                     !tg3_flag(tp, HW_TSO_2) &&
7978                     !tg3_flag(tp, HW_TSO_3))
7979                         tmp_mss = 0;
7980
7981                 /* Now loop through additional data
7982                  * fragments, and queue them.
7983                  */
7984                 last = skb_shinfo(skb)->nr_frags - 1;
7985                 for (i = 0; i <= last; i++) {
7986                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7987
7988                         len = skb_frag_size(frag);
7989                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7990                                                    len, DMA_TO_DEVICE);
7991
7992                         tnapi->tx_buffers[entry].skb = NULL;
7993                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7994                                            mapping);
7995                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7996                                 goto dma_error;
7997
7998                         if (!budget ||
7999                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8000                                             len, base_flags |
8001                                             ((i == last) ? TXD_FLAG_END : 0),
8002                                             tmp_mss, vlan)) {
8003                                 would_hit_hwbug = 1;
8004                                 break;
8005                         }
8006                 }
8007         }
8008
8009         if (would_hit_hwbug) {
8010                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8011
8012                 /* If the workaround fails due to memory/mapping
8013                  * failure, silently drop this packet.
8014                  */
8015                 entry = tnapi->tx_prod;
8016                 budget = tg3_tx_avail(tnapi);
8017                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8018                                                 base_flags, mss, vlan))
8019                         goto drop_nofree;
8020         }
8021
8022         skb_tx_timestamp(skb);
8023         netdev_tx_sent_queue(txq, skb->len);
8024
8025         /* Sync BD data before updating mailbox */
8026         wmb();
8027
8028         /* Packets are ready, update Tx producer idx local and on card. */
8029         tw32_tx_mbox(tnapi->prodmbox, entry);
8030
8031         tnapi->tx_prod = entry;
8032         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8033                 netif_tx_stop_queue(txq);
8034
8035                 /* netif_tx_stop_queue() must be done before checking
8036                  * checking tx index in tg3_tx_avail() below, because in
8037                  * tg3_tx(), we update tx index before checking for
8038                  * netif_tx_queue_stopped().
8039                  */
8040                 smp_mb();
8041                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8042                         netif_tx_wake_queue(txq);
8043         }
8044
8045         mmiowb();
8046         return NETDEV_TX_OK;
8047
8048 dma_error:
8049         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8050         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8051 drop:
8052         dev_kfree_skb(skb);
8053 drop_nofree:
8054         tp->tx_dropped++;
8055         return NETDEV_TX_OK;
8056 }
8057
8058 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8059 {
8060         if (enable) {
8061                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8062                                   MAC_MODE_PORT_MODE_MASK);
8063
8064                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8065
8066                 if (!tg3_flag(tp, 5705_PLUS))
8067                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8068
8069                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8070                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8071                 else
8072                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8073         } else {
8074                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8075
8076                 if (tg3_flag(tp, 5705_PLUS) ||
8077                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8078                     tg3_asic_rev(tp) == ASIC_REV_5700)
8079                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8080         }
8081
8082         tw32(MAC_MODE, tp->mac_mode);
8083         udelay(40);
8084 }
8085
8086 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8087 {
8088         u32 val, bmcr, mac_mode, ptest = 0;
8089
8090         tg3_phy_toggle_apd(tp, false);
8091         tg3_phy_toggle_automdix(tp, false);
8092
8093         if (extlpbk && tg3_phy_set_extloopbk(tp))
8094                 return -EIO;
8095
8096         bmcr = BMCR_FULLDPLX;
8097         switch (speed) {
8098         case SPEED_10:
8099                 break;
8100         case SPEED_100:
8101                 bmcr |= BMCR_SPEED100;
8102                 break;
8103         case SPEED_1000:
8104         default:
8105                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8106                         speed = SPEED_100;
8107                         bmcr |= BMCR_SPEED100;
8108                 } else {
8109                         speed = SPEED_1000;
8110                         bmcr |= BMCR_SPEED1000;
8111                 }
8112         }
8113
8114         if (extlpbk) {
8115                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8116                         tg3_readphy(tp, MII_CTRL1000, &val);
8117                         val |= CTL1000_AS_MASTER |
8118                                CTL1000_ENABLE_MASTER;
8119                         tg3_writephy(tp, MII_CTRL1000, val);
8120                 } else {
8121                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8122                                 MII_TG3_FET_PTEST_TRIM_2;
8123                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8124                 }
8125         } else
8126                 bmcr |= BMCR_LOOPBACK;
8127
8128         tg3_writephy(tp, MII_BMCR, bmcr);
8129
8130         /* The write needs to be flushed for the FETs */
8131         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8132                 tg3_readphy(tp, MII_BMCR, &bmcr);
8133
8134         udelay(40);
8135
8136         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8137             tg3_asic_rev(tp) == ASIC_REV_5785) {
8138                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8139                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8140                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8141
8142                 /* The write needs to be flushed for the AC131 */
8143                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8144         }
8145
8146         /* Reset to prevent losing 1st rx packet intermittently */
8147         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8148             tg3_flag(tp, 5780_CLASS)) {
8149                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8150                 udelay(10);
8151                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8152         }
8153
8154         mac_mode = tp->mac_mode &
8155                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8156         if (speed == SPEED_1000)
8157                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8158         else
8159                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8160
8161         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8162                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8163
8164                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8165                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8166                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8167                         mac_mode |= MAC_MODE_LINK_POLARITY;
8168
8169                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8170                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8171         }
8172
8173         tw32(MAC_MODE, mac_mode);
8174         udelay(40);
8175
8176         return 0;
8177 }
8178
8179 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8180 {
8181         struct tg3 *tp = netdev_priv(dev);
8182
8183         if (features & NETIF_F_LOOPBACK) {
8184                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8185                         return;
8186
8187                 spin_lock_bh(&tp->lock);
8188                 tg3_mac_loopback(tp, true);
8189                 netif_carrier_on(tp->dev);
8190                 spin_unlock_bh(&tp->lock);
8191                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8192         } else {
8193                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8194                         return;
8195
8196                 spin_lock_bh(&tp->lock);
8197                 tg3_mac_loopback(tp, false);
8198                 /* Force link status check */
8199                 tg3_setup_phy(tp, true);
8200                 spin_unlock_bh(&tp->lock);
8201                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8202         }
8203 }
8204
8205 static netdev_features_t tg3_fix_features(struct net_device *dev,
8206         netdev_features_t features)
8207 {
8208         struct tg3 *tp = netdev_priv(dev);
8209
8210         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8211                 features &= ~NETIF_F_ALL_TSO;
8212
8213         return features;
8214 }
8215
8216 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8217 {
8218         netdev_features_t changed = dev->features ^ features;
8219
8220         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8221                 tg3_set_loopback(dev, features);
8222
8223         return 0;
8224 }
8225
8226 static void tg3_rx_prodring_free(struct tg3 *tp,
8227                                  struct tg3_rx_prodring_set *tpr)
8228 {
8229         int i;
8230
8231         if (tpr != &tp->napi[0].prodring) {
8232                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8233                      i = (i + 1) & tp->rx_std_ring_mask)
8234                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8235                                         tp->rx_pkt_map_sz);
8236
8237                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8238                         for (i = tpr->rx_jmb_cons_idx;
8239                              i != tpr->rx_jmb_prod_idx;
8240                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8241                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8242                                                 TG3_RX_JMB_MAP_SZ);
8243                         }
8244                 }
8245
8246                 return;
8247         }
8248
8249         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8250                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8251                                 tp->rx_pkt_map_sz);
8252
8253         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8254                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8255                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8256                                         TG3_RX_JMB_MAP_SZ);
8257         }
8258 }
8259
8260 /* Initialize rx rings for packet processing.
8261  *
8262  * The chip has been shut down and the driver detached from
8263  * the networking, so no interrupts or new tx packets will
8264  * end up in the driver.  tp->{tx,}lock are held and thus
8265  * we may not sleep.
8266  */
8267 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8268                                  struct tg3_rx_prodring_set *tpr)
8269 {
8270         u32 i, rx_pkt_dma_sz;
8271
8272         tpr->rx_std_cons_idx = 0;
8273         tpr->rx_std_prod_idx = 0;
8274         tpr->rx_jmb_cons_idx = 0;
8275         tpr->rx_jmb_prod_idx = 0;
8276
8277         if (tpr != &tp->napi[0].prodring) {
8278                 memset(&tpr->rx_std_buffers[0], 0,
8279                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8280                 if (tpr->rx_jmb_buffers)
8281                         memset(&tpr->rx_jmb_buffers[0], 0,
8282                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8283                 goto done;
8284         }
8285
8286         /* Zero out all descriptors. */
8287         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8288
8289         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8290         if (tg3_flag(tp, 5780_CLASS) &&
8291             tp->dev->mtu > ETH_DATA_LEN)
8292                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8293         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8294
8295         /* Initialize invariants of the rings, we only set this
8296          * stuff once.  This works because the card does not
8297          * write into the rx buffer posting rings.
8298          */
8299         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8300                 struct tg3_rx_buffer_desc *rxd;
8301
8302                 rxd = &tpr->rx_std[i];
8303                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8304                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8305                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8306                                (i << RXD_OPAQUE_INDEX_SHIFT));
8307         }
8308
8309         /* Now allocate fresh SKBs for each rx ring. */
8310         for (i = 0; i < tp->rx_pending; i++) {
8311                 unsigned int frag_size;
8312
8313                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8314                                       &frag_size) < 0) {
8315                         netdev_warn(tp->dev,
8316                                     "Using a smaller RX standard ring. Only "
8317                                     "%d out of %d buffers were allocated "
8318                                     "successfully\n", i, tp->rx_pending);
8319                         if (i == 0)
8320                                 goto initfail;
8321                         tp->rx_pending = i;
8322                         break;
8323                 }
8324         }
8325
8326         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8327                 goto done;
8328
8329         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8330
8331         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8332                 goto done;
8333
8334         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8335                 struct tg3_rx_buffer_desc *rxd;
8336
8337                 rxd = &tpr->rx_jmb[i].std;
8338                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8339                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8340                                   RXD_FLAG_JUMBO;
8341                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8342                        (i << RXD_OPAQUE_INDEX_SHIFT));
8343         }
8344
8345         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8346                 unsigned int frag_size;
8347
8348                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8349                                       &frag_size) < 0) {
8350                         netdev_warn(tp->dev,
8351                                     "Using a smaller RX jumbo ring. Only %d "
8352                                     "out of %d buffers were allocated "
8353                                     "successfully\n", i, tp->rx_jumbo_pending);
8354                         if (i == 0)
8355                                 goto initfail;
8356                         tp->rx_jumbo_pending = i;
8357                         break;
8358                 }
8359         }
8360
8361 done:
8362         return 0;
8363
8364 initfail:
8365         tg3_rx_prodring_free(tp, tpr);
8366         return -ENOMEM;
8367 }
8368
8369 static void tg3_rx_prodring_fini(struct tg3 *tp,
8370                                  struct tg3_rx_prodring_set *tpr)
8371 {
8372         kfree(tpr->rx_std_buffers);
8373         tpr->rx_std_buffers = NULL;
8374         kfree(tpr->rx_jmb_buffers);
8375         tpr->rx_jmb_buffers = NULL;
8376         if (tpr->rx_std) {
8377                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8378                                   tpr->rx_std, tpr->rx_std_mapping);
8379                 tpr->rx_std = NULL;
8380         }
8381         if (tpr->rx_jmb) {
8382                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8383                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8384                 tpr->rx_jmb = NULL;
8385         }
8386 }
8387
8388 static int tg3_rx_prodring_init(struct tg3 *tp,
8389                                 struct tg3_rx_prodring_set *tpr)
8390 {
8391         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8392                                       GFP_KERNEL);
8393         if (!tpr->rx_std_buffers)
8394                 return -ENOMEM;
8395
8396         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8397                                          TG3_RX_STD_RING_BYTES(tp),
8398                                          &tpr->rx_std_mapping,
8399                                          GFP_KERNEL);
8400         if (!tpr->rx_std)
8401                 goto err_out;
8402
8403         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8404                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8405                                               GFP_KERNEL);
8406                 if (!tpr->rx_jmb_buffers)
8407                         goto err_out;
8408
8409                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8410                                                  TG3_RX_JMB_RING_BYTES(tp),
8411                                                  &tpr->rx_jmb_mapping,
8412                                                  GFP_KERNEL);
8413                 if (!tpr->rx_jmb)
8414                         goto err_out;
8415         }
8416
8417         return 0;
8418
8419 err_out:
8420         tg3_rx_prodring_fini(tp, tpr);
8421         return -ENOMEM;
8422 }
8423
8424 /* Free up pending packets in all rx/tx rings.
8425  *
8426  * The chip has been shut down and the driver detached from
8427  * the networking, so no interrupts or new tx packets will
8428  * end up in the driver.  tp->{tx,}lock is not held and we are not
8429  * in an interrupt context and thus may sleep.
8430  */
8431 static void tg3_free_rings(struct tg3 *tp)
8432 {
8433         int i, j;
8434
8435         for (j = 0; j < tp->irq_cnt; j++) {
8436                 struct tg3_napi *tnapi = &tp->napi[j];
8437
8438                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8439
8440                 if (!tnapi->tx_buffers)
8441                         continue;
8442
8443                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8444                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8445
8446                         if (!skb)
8447                                 continue;
8448
8449                         tg3_tx_skb_unmap(tnapi, i,
8450                                          skb_shinfo(skb)->nr_frags - 1);
8451
8452                         dev_kfree_skb_any(skb);
8453                 }
8454                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8455         }
8456 }
8457
8458 /* Initialize tx/rx rings for packet processing.
8459  *
8460  * The chip has been shut down and the driver detached from
8461  * the networking, so no interrupts or new tx packets will
8462  * end up in the driver.  tp->{tx,}lock are held and thus
8463  * we may not sleep.
8464  */
8465 static int tg3_init_rings(struct tg3 *tp)
8466 {
8467         int i;
8468
8469         /* Free up all the SKBs. */
8470         tg3_free_rings(tp);
8471
8472         for (i = 0; i < tp->irq_cnt; i++) {
8473                 struct tg3_napi *tnapi = &tp->napi[i];
8474
8475                 tnapi->last_tag = 0;
8476                 tnapi->last_irq_tag = 0;
8477                 tnapi->hw_status->status = 0;
8478                 tnapi->hw_status->status_tag = 0;
8479                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8480
8481                 tnapi->tx_prod = 0;
8482                 tnapi->tx_cons = 0;
8483                 if (tnapi->tx_ring)
8484                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8485
8486                 tnapi->rx_rcb_ptr = 0;
8487                 if (tnapi->rx_rcb)
8488                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8489
8490                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8491                         tg3_free_rings(tp);
8492                         return -ENOMEM;
8493                 }
8494         }
8495
8496         return 0;
8497 }
8498
8499 static void tg3_mem_tx_release(struct tg3 *tp)
8500 {
8501         int i;
8502
8503         for (i = 0; i < tp->irq_max; i++) {
8504                 struct tg3_napi *tnapi = &tp->napi[i];
8505
8506                 if (tnapi->tx_ring) {
8507                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8508                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8509                         tnapi->tx_ring = NULL;
8510                 }
8511
8512                 kfree(tnapi->tx_buffers);
8513                 tnapi->tx_buffers = NULL;
8514         }
8515 }
8516
8517 static int tg3_mem_tx_acquire(struct tg3 *tp)
8518 {
8519         int i;
8520         struct tg3_napi *tnapi = &tp->napi[0];
8521
8522         /* If multivector TSS is enabled, vector 0 does not handle
8523          * tx interrupts.  Don't allocate any resources for it.
8524          */
8525         if (tg3_flag(tp, ENABLE_TSS))
8526                 tnapi++;
8527
8528         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8529                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8530                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8531                 if (!tnapi->tx_buffers)
8532                         goto err_out;
8533
8534                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8535                                                     TG3_TX_RING_BYTES,
8536                                                     &tnapi->tx_desc_mapping,
8537                                                     GFP_KERNEL);
8538                 if (!tnapi->tx_ring)
8539                         goto err_out;
8540         }
8541
8542         return 0;
8543
8544 err_out:
8545         tg3_mem_tx_release(tp);
8546         return -ENOMEM;
8547 }
8548
8549 static void tg3_mem_rx_release(struct tg3 *tp)
8550 {
8551         int i;
8552
8553         for (i = 0; i < tp->irq_max; i++) {
8554                 struct tg3_napi *tnapi = &tp->napi[i];
8555
8556                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8557
8558                 if (!tnapi->rx_rcb)
8559                         continue;
8560
8561                 dma_free_coherent(&tp->pdev->dev,
8562                                   TG3_RX_RCB_RING_BYTES(tp),
8563                                   tnapi->rx_rcb,
8564                                   tnapi->rx_rcb_mapping);
8565                 tnapi->rx_rcb = NULL;
8566         }
8567 }
8568
8569 static int tg3_mem_rx_acquire(struct tg3 *tp)
8570 {
8571         unsigned int i, limit;
8572
8573         limit = tp->rxq_cnt;
8574
8575         /* If RSS is enabled, we need a (dummy) producer ring
8576          * set on vector zero.  This is the true hw prodring.
8577          */
8578         if (tg3_flag(tp, ENABLE_RSS))
8579                 limit++;
8580
8581         for (i = 0; i < limit; i++) {
8582                 struct tg3_napi *tnapi = &tp->napi[i];
8583
8584                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8585                         goto err_out;
8586
8587                 /* If multivector RSS is enabled, vector 0
8588                  * does not handle rx or tx interrupts.
8589                  * Don't allocate any resources for it.
8590                  */
8591                 if (!i && tg3_flag(tp, ENABLE_RSS))
8592                         continue;
8593
8594                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8595                                                    TG3_RX_RCB_RING_BYTES(tp),
8596                                                    &tnapi->rx_rcb_mapping,
8597                                                    GFP_KERNEL | __GFP_ZERO);
8598                 if (!tnapi->rx_rcb)
8599                         goto err_out;
8600         }
8601
8602         return 0;
8603
8604 err_out:
8605         tg3_mem_rx_release(tp);
8606         return -ENOMEM;
8607 }
8608
8609 /*
8610  * Must not be invoked with interrupt sources disabled and
8611  * the hardware shutdown down.
8612  */
8613 static void tg3_free_consistent(struct tg3 *tp)
8614 {
8615         int i;
8616
8617         for (i = 0; i < tp->irq_cnt; i++) {
8618                 struct tg3_napi *tnapi = &tp->napi[i];
8619
8620                 if (tnapi->hw_status) {
8621                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8622                                           tnapi->hw_status,
8623                                           tnapi->status_mapping);
8624                         tnapi->hw_status = NULL;
8625                 }
8626         }
8627
8628         tg3_mem_rx_release(tp);
8629         tg3_mem_tx_release(tp);
8630
8631         if (tp->hw_stats) {
8632                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8633                                   tp->hw_stats, tp->stats_mapping);
8634                 tp->hw_stats = NULL;
8635         }
8636 }
8637
8638 /*
8639  * Must not be invoked with interrupt sources disabled and
8640  * the hardware shutdown down.  Can sleep.
8641  */
8642 static int tg3_alloc_consistent(struct tg3 *tp)
8643 {
8644         int i;
8645
8646         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8647                                           sizeof(struct tg3_hw_stats),
8648                                           &tp->stats_mapping,
8649                                           GFP_KERNEL | __GFP_ZERO);
8650         if (!tp->hw_stats)
8651                 goto err_out;
8652
8653         for (i = 0; i < tp->irq_cnt; i++) {
8654                 struct tg3_napi *tnapi = &tp->napi[i];
8655                 struct tg3_hw_status *sblk;
8656
8657                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8658                                                       TG3_HW_STATUS_SIZE,
8659                                                       &tnapi->status_mapping,
8660                                                       GFP_KERNEL | __GFP_ZERO);
8661                 if (!tnapi->hw_status)
8662                         goto err_out;
8663
8664                 sblk = tnapi->hw_status;
8665
8666                 if (tg3_flag(tp, ENABLE_RSS)) {
8667                         u16 *prodptr = NULL;
8668
8669                         /*
8670                          * When RSS is enabled, the status block format changes
8671                          * slightly.  The "rx_jumbo_consumer", "reserved",
8672                          * and "rx_mini_consumer" members get mapped to the
8673                          * other three rx return ring producer indexes.
8674                          */
8675                         switch (i) {
8676                         case 1:
8677                                 prodptr = &sblk->idx[0].rx_producer;
8678                                 break;
8679                         case 2:
8680                                 prodptr = &sblk->rx_jumbo_consumer;
8681                                 break;
8682                         case 3:
8683                                 prodptr = &sblk->reserved;
8684                                 break;
8685                         case 4:
8686                                 prodptr = &sblk->rx_mini_consumer;
8687                                 break;
8688                         }
8689                         tnapi->rx_rcb_prod_idx = prodptr;
8690                 } else {
8691                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8692                 }
8693         }
8694
8695         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8696                 goto err_out;
8697
8698         return 0;
8699
8700 err_out:
8701         tg3_free_consistent(tp);
8702         return -ENOMEM;
8703 }
8704
8705 #define MAX_WAIT_CNT 1000
8706
8707 /* To stop a block, clear the enable bit and poll till it
8708  * clears.  tp->lock is held.
8709  */
8710 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8711 {
8712         unsigned int i;
8713         u32 val;
8714
8715         if (tg3_flag(tp, 5705_PLUS)) {
8716                 switch (ofs) {
8717                 case RCVLSC_MODE:
8718                 case DMAC_MODE:
8719                 case MBFREE_MODE:
8720                 case BUFMGR_MODE:
8721                 case MEMARB_MODE:
8722                         /* We can't enable/disable these bits of the
8723                          * 5705/5750, just say success.
8724                          */
8725                         return 0;
8726
8727                 default:
8728                         break;
8729                 }
8730         }
8731
8732         val = tr32(ofs);
8733         val &= ~enable_bit;
8734         tw32_f(ofs, val);
8735
8736         for (i = 0; i < MAX_WAIT_CNT; i++) {
8737                 if (pci_channel_offline(tp->pdev)) {
8738                         dev_err(&tp->pdev->dev,
8739                                 "tg3_stop_block device offline, "
8740                                 "ofs=%lx enable_bit=%x\n",
8741                                 ofs, enable_bit);
8742                         return -ENODEV;
8743                 }
8744
8745                 udelay(100);
8746                 val = tr32(ofs);
8747                 if ((val & enable_bit) == 0)
8748                         break;
8749         }
8750
8751         if (i == MAX_WAIT_CNT && !silent) {
8752                 dev_err(&tp->pdev->dev,
8753                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8754                         ofs, enable_bit);
8755                 return -ENODEV;
8756         }
8757
8758         return 0;
8759 }
8760
8761 /* tp->lock is held. */
8762 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8763 {
8764         int i, err;
8765
8766         tg3_disable_ints(tp);
8767
8768         if (pci_channel_offline(tp->pdev)) {
8769                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8770                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8771                 err = -ENODEV;
8772                 goto err_no_dev;
8773         }
8774
8775         tp->rx_mode &= ~RX_MODE_ENABLE;
8776         tw32_f(MAC_RX_MODE, tp->rx_mode);
8777         udelay(10);
8778
8779         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8780         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8781         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8782         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8783         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8784         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8785
8786         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8787         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8788         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8789         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8790         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8791         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8792         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8793
8794         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8795         tw32_f(MAC_MODE, tp->mac_mode);
8796         udelay(40);
8797
8798         tp->tx_mode &= ~TX_MODE_ENABLE;
8799         tw32_f(MAC_TX_MODE, tp->tx_mode);
8800
8801         for (i = 0; i < MAX_WAIT_CNT; i++) {
8802                 udelay(100);
8803                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8804                         break;
8805         }
8806         if (i >= MAX_WAIT_CNT) {
8807                 dev_err(&tp->pdev->dev,
8808                         "%s timed out, TX_MODE_ENABLE will not clear "
8809                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8810                 err |= -ENODEV;
8811         }
8812
8813         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8814         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8815         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8816
8817         tw32(FTQ_RESET, 0xffffffff);
8818         tw32(FTQ_RESET, 0x00000000);
8819
8820         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8821         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8822
8823 err_no_dev:
8824         for (i = 0; i < tp->irq_cnt; i++) {
8825                 struct tg3_napi *tnapi = &tp->napi[i];
8826                 if (tnapi->hw_status)
8827                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8828         }
8829
8830         return err;
8831 }
8832
8833 /* Save PCI command register before chip reset */
8834 static void tg3_save_pci_state(struct tg3 *tp)
8835 {
8836         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8837 }
8838
8839 /* Restore PCI state after chip reset */
8840 static void tg3_restore_pci_state(struct tg3 *tp)
8841 {
8842         u32 val;
8843
8844         /* Re-enable indirect register accesses. */
8845         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8846                                tp->misc_host_ctrl);
8847
8848         /* Set MAX PCI retry to zero. */
8849         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8850         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8851             tg3_flag(tp, PCIX_MODE))
8852                 val |= PCISTATE_RETRY_SAME_DMA;
8853         /* Allow reads and writes to the APE register and memory space. */
8854         if (tg3_flag(tp, ENABLE_APE))
8855                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8856                        PCISTATE_ALLOW_APE_SHMEM_WR |
8857                        PCISTATE_ALLOW_APE_PSPACE_WR;
8858         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8859
8860         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8861
8862         if (!tg3_flag(tp, PCI_EXPRESS)) {
8863                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8864                                       tp->pci_cacheline_sz);
8865                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8866                                       tp->pci_lat_timer);
8867         }
8868
8869         /* Make sure PCI-X relaxed ordering bit is clear. */
8870         if (tg3_flag(tp, PCIX_MODE)) {
8871                 u16 pcix_cmd;
8872
8873                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8874                                      &pcix_cmd);
8875                 pcix_cmd &= ~PCI_X_CMD_ERO;
8876                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8877                                       pcix_cmd);
8878         }
8879
8880         if (tg3_flag(tp, 5780_CLASS)) {
8881
8882                 /* Chip reset on 5780 will reset MSI enable bit,
8883                  * so need to restore it.
8884                  */
8885                 if (tg3_flag(tp, USING_MSI)) {
8886                         u16 ctrl;
8887
8888                         pci_read_config_word(tp->pdev,
8889                                              tp->msi_cap + PCI_MSI_FLAGS,
8890                                              &ctrl);
8891                         pci_write_config_word(tp->pdev,
8892                                               tp->msi_cap + PCI_MSI_FLAGS,
8893                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8894                         val = tr32(MSGINT_MODE);
8895                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8896                 }
8897         }
8898 }
8899
8900 /* tp->lock is held. */
8901 static int tg3_chip_reset(struct tg3 *tp)
8902 {
8903         u32 val;
8904         void (*write_op)(struct tg3 *, u32, u32);
8905         int i, err;
8906
8907         tg3_nvram_lock(tp);
8908
8909         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8910
8911         /* No matching tg3_nvram_unlock() after this because
8912          * chip reset below will undo the nvram lock.
8913          */
8914         tp->nvram_lock_cnt = 0;
8915
8916         /* GRC_MISC_CFG core clock reset will clear the memory
8917          * enable bit in PCI register 4 and the MSI enable bit
8918          * on some chips, so we save relevant registers here.
8919          */
8920         tg3_save_pci_state(tp);
8921
8922         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8923             tg3_flag(tp, 5755_PLUS))
8924                 tw32(GRC_FASTBOOT_PC, 0);
8925
8926         /*
8927          * We must avoid the readl() that normally takes place.
8928          * It locks machines, causes machine checks, and other
8929          * fun things.  So, temporarily disable the 5701
8930          * hardware workaround, while we do the reset.
8931          */
8932         write_op = tp->write32;
8933         if (write_op == tg3_write_flush_reg32)
8934                 tp->write32 = tg3_write32;
8935
8936         /* Prevent the irq handler from reading or writing PCI registers
8937          * during chip reset when the memory enable bit in the PCI command
8938          * register may be cleared.  The chip does not generate interrupt
8939          * at this time, but the irq handler may still be called due to irq
8940          * sharing or irqpoll.
8941          */
8942         tg3_flag_set(tp, CHIP_RESETTING);
8943         for (i = 0; i < tp->irq_cnt; i++) {
8944                 struct tg3_napi *tnapi = &tp->napi[i];
8945                 if (tnapi->hw_status) {
8946                         tnapi->hw_status->status = 0;
8947                         tnapi->hw_status->status_tag = 0;
8948                 }
8949                 tnapi->last_tag = 0;
8950                 tnapi->last_irq_tag = 0;
8951         }
8952         smp_mb();
8953
8954         for (i = 0; i < tp->irq_cnt; i++)
8955                 synchronize_irq(tp->napi[i].irq_vec);
8956
8957         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8958                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8959                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8960         }
8961
8962         /* do the reset */
8963         val = GRC_MISC_CFG_CORECLK_RESET;
8964
8965         if (tg3_flag(tp, PCI_EXPRESS)) {
8966                 /* Force PCIe 1.0a mode */
8967                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8968                     !tg3_flag(tp, 57765_PLUS) &&
8969                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8970                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8971                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8972
8973                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8974                         tw32(GRC_MISC_CFG, (1 << 29));
8975                         val |= (1 << 29);
8976                 }
8977         }
8978
8979         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8980                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8981                 tw32(GRC_VCPU_EXT_CTRL,
8982                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8983         }
8984
8985         /* Manage gphy power for all CPMU absent PCIe devices. */
8986         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8987                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8988
8989         tw32(GRC_MISC_CFG, val);
8990
8991         /* restore 5701 hardware bug workaround write method */
8992         tp->write32 = write_op;
8993
8994         /* Unfortunately, we have to delay before the PCI read back.
8995          * Some 575X chips even will not respond to a PCI cfg access
8996          * when the reset command is given to the chip.
8997          *
8998          * How do these hardware designers expect things to work
8999          * properly if the PCI write is posted for a long period
9000          * of time?  It is always necessary to have some method by
9001          * which a register read back can occur to push the write
9002          * out which does the reset.
9003          *
9004          * For most tg3 variants the trick below was working.
9005          * Ho hum...
9006          */
9007         udelay(120);
9008
9009         /* Flush PCI posted writes.  The normal MMIO registers
9010          * are inaccessible at this time so this is the only
9011          * way to make this reliably (actually, this is no longer
9012          * the case, see above).  I tried to use indirect
9013          * register read/write but this upset some 5701 variants.
9014          */
9015         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9016
9017         udelay(120);
9018
9019         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9020                 u16 val16;
9021
9022                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9023                         int j;
9024                         u32 cfg_val;
9025
9026                         /* Wait for link training to complete.  */
9027                         for (j = 0; j < 5000; j++)
9028                                 udelay(100);
9029
9030                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9031                         pci_write_config_dword(tp->pdev, 0xc4,
9032                                                cfg_val | (1 << 15));
9033                 }
9034
9035                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9036                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9037                 /*
9038                  * Older PCIe devices only support the 128 byte
9039                  * MPS setting.  Enforce the restriction.
9040                  */
9041                 if (!tg3_flag(tp, CPMU_PRESENT))
9042                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9043                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9044
9045                 /* Clear error status */
9046                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9047                                       PCI_EXP_DEVSTA_CED |
9048                                       PCI_EXP_DEVSTA_NFED |
9049                                       PCI_EXP_DEVSTA_FED |
9050                                       PCI_EXP_DEVSTA_URD);
9051         }
9052
9053         tg3_restore_pci_state(tp);
9054
9055         tg3_flag_clear(tp, CHIP_RESETTING);
9056         tg3_flag_clear(tp, ERROR_PROCESSED);
9057
9058         val = 0;
9059         if (tg3_flag(tp, 5780_CLASS))
9060                 val = tr32(MEMARB_MODE);
9061         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9062
9063         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9064                 tg3_stop_fw(tp);
9065                 tw32(0x5000, 0x400);
9066         }
9067
9068         if (tg3_flag(tp, IS_SSB_CORE)) {
9069                 /*
9070                  * BCM4785: In order to avoid repercussions from using
9071                  * potentially defective internal ROM, stop the Rx RISC CPU,
9072                  * which is not required.
9073                  */
9074                 tg3_stop_fw(tp);
9075                 tg3_halt_cpu(tp, RX_CPU_BASE);
9076         }
9077
9078         err = tg3_poll_fw(tp);
9079         if (err)
9080                 return err;
9081
9082         tw32(GRC_MODE, tp->grc_mode);
9083
9084         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9085                 val = tr32(0xc4);
9086
9087                 tw32(0xc4, val | (1 << 15));
9088         }
9089
9090         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9091             tg3_asic_rev(tp) == ASIC_REV_5705) {
9092                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9093                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9094                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9095                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9096         }
9097
9098         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9099                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9100                 val = tp->mac_mode;
9101         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9102                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9103                 val = tp->mac_mode;
9104         } else
9105                 val = 0;
9106
9107         tw32_f(MAC_MODE, val);
9108         udelay(40);
9109
9110         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9111
9112         tg3_mdio_start(tp);
9113
9114         if (tg3_flag(tp, PCI_EXPRESS) &&
9115             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9116             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9117             !tg3_flag(tp, 57765_PLUS)) {
9118                 val = tr32(0x7c00);
9119
9120                 tw32(0x7c00, val | (1 << 25));
9121         }
9122
9123         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9124                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9125                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9126         }
9127
9128         /* Reprobe ASF enable state.  */
9129         tg3_flag_clear(tp, ENABLE_ASF);
9130         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9131                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9132
9133         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9134         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9135         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9136                 u32 nic_cfg;
9137
9138                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9139                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9140                         tg3_flag_set(tp, ENABLE_ASF);
9141                         tp->last_event_jiffies = jiffies;
9142                         if (tg3_flag(tp, 5750_PLUS))
9143                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9144
9145                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9146                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9147                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9148                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9149                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9150                 }
9151         }
9152
9153         return 0;
9154 }
9155
9156 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9157 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9158
9159 /* tp->lock is held. */
9160 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9161 {
9162         int err;
9163
9164         tg3_stop_fw(tp);
9165
9166         tg3_write_sig_pre_reset(tp, kind);
9167
9168         tg3_abort_hw(tp, silent);
9169         err = tg3_chip_reset(tp);
9170
9171         __tg3_set_mac_addr(tp, false);
9172
9173         tg3_write_sig_legacy(tp, kind);
9174         tg3_write_sig_post_reset(tp, kind);
9175
9176         if (tp->hw_stats) {
9177                 /* Save the stats across chip resets... */
9178                 tg3_get_nstats(tp, &tp->net_stats_prev);
9179                 tg3_get_estats(tp, &tp->estats_prev);
9180
9181                 /* And make sure the next sample is new data */
9182                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9183         }
9184
9185         if (err)
9186                 return err;
9187
9188         return 0;
9189 }
9190
9191 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9192 {
9193         struct tg3 *tp = netdev_priv(dev);
9194         struct sockaddr *addr = p;
9195         int err = 0;
9196         bool skip_mac_1 = false;
9197
9198         if (!is_valid_ether_addr(addr->sa_data))
9199                 return -EADDRNOTAVAIL;
9200
9201         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9202
9203         if (!netif_running(dev))
9204                 return 0;
9205
9206         if (tg3_flag(tp, ENABLE_ASF)) {
9207                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9208
9209                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9210                 addr0_low = tr32(MAC_ADDR_0_LOW);
9211                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9212                 addr1_low = tr32(MAC_ADDR_1_LOW);
9213
9214                 /* Skip MAC addr 1 if ASF is using it. */
9215                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9216                     !(addr1_high == 0 && addr1_low == 0))
9217                         skip_mac_1 = true;
9218         }
9219         spin_lock_bh(&tp->lock);
9220         __tg3_set_mac_addr(tp, skip_mac_1);
9221         spin_unlock_bh(&tp->lock);
9222
9223         return err;
9224 }
9225
9226 /* tp->lock is held. */
9227 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9228                            dma_addr_t mapping, u32 maxlen_flags,
9229                            u32 nic_addr)
9230 {
9231         tg3_write_mem(tp,
9232                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9233                       ((u64) mapping >> 32));
9234         tg3_write_mem(tp,
9235                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9236                       ((u64) mapping & 0xffffffff));
9237         tg3_write_mem(tp,
9238                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9239                        maxlen_flags);
9240
9241         if (!tg3_flag(tp, 5705_PLUS))
9242                 tg3_write_mem(tp,
9243                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9244                               nic_addr);
9245 }
9246
9247
9248 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9249 {
9250         int i = 0;
9251
9252         if (!tg3_flag(tp, ENABLE_TSS)) {
9253                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9254                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9255                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9256         } else {
9257                 tw32(HOSTCC_TXCOL_TICKS, 0);
9258                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9259                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9260
9261                 for (; i < tp->txq_cnt; i++) {
9262                         u32 reg;
9263
9264                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9265                         tw32(reg, ec->tx_coalesce_usecs);
9266                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9267                         tw32(reg, ec->tx_max_coalesced_frames);
9268                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9269                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9270                 }
9271         }
9272
9273         for (; i < tp->irq_max - 1; i++) {
9274                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9275                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9276                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9277         }
9278 }
9279
9280 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9281 {
9282         int i = 0;
9283         u32 limit = tp->rxq_cnt;
9284
9285         if (!tg3_flag(tp, ENABLE_RSS)) {
9286                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9287                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9288                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9289                 limit--;
9290         } else {
9291                 tw32(HOSTCC_RXCOL_TICKS, 0);
9292                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9293                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9294         }
9295
9296         for (; i < limit; i++) {
9297                 u32 reg;
9298
9299                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9300                 tw32(reg, ec->rx_coalesce_usecs);
9301                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9302                 tw32(reg, ec->rx_max_coalesced_frames);
9303                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9304                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9305         }
9306
9307         for (; i < tp->irq_max - 1; i++) {
9308                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9309                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9310                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9311         }
9312 }
9313
9314 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9315 {
9316         tg3_coal_tx_init(tp, ec);
9317         tg3_coal_rx_init(tp, ec);
9318
9319         if (!tg3_flag(tp, 5705_PLUS)) {
9320                 u32 val = ec->stats_block_coalesce_usecs;
9321
9322                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9323                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9324
9325                 if (!tp->link_up)
9326                         val = 0;
9327
9328                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9329         }
9330 }
9331
9332 /* tp->lock is held. */
9333 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9334 {
9335         u32 txrcb, limit;
9336
9337         /* Disable all transmit rings but the first. */
9338         if (!tg3_flag(tp, 5705_PLUS))
9339                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9340         else if (tg3_flag(tp, 5717_PLUS))
9341                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9342         else if (tg3_flag(tp, 57765_CLASS) ||
9343                  tg3_asic_rev(tp) == ASIC_REV_5762)
9344                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9345         else
9346                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9347
9348         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9349              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9350                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9351                               BDINFO_FLAGS_DISABLED);
9352 }
9353
9354 /* tp->lock is held. */
9355 static void tg3_tx_rcbs_init(struct tg3 *tp)
9356 {
9357         int i = 0;
9358         u32 txrcb = NIC_SRAM_SEND_RCB;
9359
9360         if (tg3_flag(tp, ENABLE_TSS))
9361                 i++;
9362
9363         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9364                 struct tg3_napi *tnapi = &tp->napi[i];
9365
9366                 if (!tnapi->tx_ring)
9367                         continue;
9368
9369                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9370                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9371                                NIC_SRAM_TX_BUFFER_DESC);
9372         }
9373 }
9374
9375 /* tp->lock is held. */
9376 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9377 {
9378         u32 rxrcb, limit;
9379
9380         /* Disable all receive return rings but the first. */
9381         if (tg3_flag(tp, 5717_PLUS))
9382                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9383         else if (!tg3_flag(tp, 5705_PLUS))
9384                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9385         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9386                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9387                  tg3_flag(tp, 57765_CLASS))
9388                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9389         else
9390                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9391
9392         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9393              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9394                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9395                               BDINFO_FLAGS_DISABLED);
9396 }
9397
9398 /* tp->lock is held. */
9399 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9400 {
9401         int i = 0;
9402         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9403
9404         if (tg3_flag(tp, ENABLE_RSS))
9405                 i++;
9406
9407         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9408                 struct tg3_napi *tnapi = &tp->napi[i];
9409
9410                 if (!tnapi->rx_rcb)
9411                         continue;
9412
9413                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9414                                (tp->rx_ret_ring_mask + 1) <<
9415                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9416         }
9417 }
9418
9419 /* tp->lock is held. */
9420 static void tg3_rings_reset(struct tg3 *tp)
9421 {
9422         int i;
9423         u32 stblk;
9424         struct tg3_napi *tnapi = &tp->napi[0];
9425
9426         tg3_tx_rcbs_disable(tp);
9427
9428         tg3_rx_ret_rcbs_disable(tp);
9429
9430         /* Disable interrupts */
9431         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9432         tp->napi[0].chk_msi_cnt = 0;
9433         tp->napi[0].last_rx_cons = 0;
9434         tp->napi[0].last_tx_cons = 0;
9435
9436         /* Zero mailbox registers. */
9437         if (tg3_flag(tp, SUPPORT_MSIX)) {
9438                 for (i = 1; i < tp->irq_max; i++) {
9439                         tp->napi[i].tx_prod = 0;
9440                         tp->napi[i].tx_cons = 0;
9441                         if (tg3_flag(tp, ENABLE_TSS))
9442                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9443                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9444                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9445                         tp->napi[i].chk_msi_cnt = 0;
9446                         tp->napi[i].last_rx_cons = 0;
9447                         tp->napi[i].last_tx_cons = 0;
9448                 }
9449                 if (!tg3_flag(tp, ENABLE_TSS))
9450                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9451         } else {
9452                 tp->napi[0].tx_prod = 0;
9453                 tp->napi[0].tx_cons = 0;
9454                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9455                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9456         }
9457
9458         /* Make sure the NIC-based send BD rings are disabled. */
9459         if (!tg3_flag(tp, 5705_PLUS)) {
9460                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9461                 for (i = 0; i < 16; i++)
9462                         tw32_tx_mbox(mbox + i * 8, 0);
9463         }
9464
9465         /* Clear status block in ram. */
9466         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9467
9468         /* Set status block DMA address */
9469         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9470              ((u64) tnapi->status_mapping >> 32));
9471         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9472              ((u64) tnapi->status_mapping & 0xffffffff));
9473
9474         stblk = HOSTCC_STATBLCK_RING1;
9475
9476         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9477                 u64 mapping = (u64)tnapi->status_mapping;
9478                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9479                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9480                 stblk += 8;
9481
9482                 /* Clear status block in ram. */
9483                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9484         }
9485
9486         tg3_tx_rcbs_init(tp);
9487         tg3_rx_ret_rcbs_init(tp);
9488 }
9489
9490 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9491 {
9492         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9493
9494         if (!tg3_flag(tp, 5750_PLUS) ||
9495             tg3_flag(tp, 5780_CLASS) ||
9496             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9497             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9498             tg3_flag(tp, 57765_PLUS))
9499                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9500         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9501                  tg3_asic_rev(tp) == ASIC_REV_5787)
9502                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9503         else
9504                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9505
9506         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9507         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9508
9509         val = min(nic_rep_thresh, host_rep_thresh);
9510         tw32(RCVBDI_STD_THRESH, val);
9511
9512         if (tg3_flag(tp, 57765_PLUS))
9513                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9514
9515         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9516                 return;
9517
9518         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9519
9520         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9521
9522         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9523         tw32(RCVBDI_JUMBO_THRESH, val);
9524
9525         if (tg3_flag(tp, 57765_PLUS))
9526                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9527 }
9528
9529 static inline u32 calc_crc(unsigned char *buf, int len)
9530 {
9531         u32 reg;
9532         u32 tmp;
9533         int j, k;
9534
9535         reg = 0xffffffff;
9536
9537         for (j = 0; j < len; j++) {
9538                 reg ^= buf[j];
9539
9540                 for (k = 0; k < 8; k++) {
9541                         tmp = reg & 0x01;
9542
9543                         reg >>= 1;
9544
9545                         if (tmp)
9546                                 reg ^= 0xedb88320;
9547                 }
9548         }
9549
9550         return ~reg;
9551 }
9552
9553 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9554 {
9555         /* accept or reject all multicast frames */
9556         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9557         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9558         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9559         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9560 }
9561
9562 static void __tg3_set_rx_mode(struct net_device *dev)
9563 {
9564         struct tg3 *tp = netdev_priv(dev);
9565         u32 rx_mode;
9566
9567         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9568                                   RX_MODE_KEEP_VLAN_TAG);
9569
9570 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9571         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9572          * flag clear.
9573          */
9574         if (!tg3_flag(tp, ENABLE_ASF))
9575                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9576 #endif
9577
9578         if (dev->flags & IFF_PROMISC) {
9579                 /* Promiscuous mode. */
9580                 rx_mode |= RX_MODE_PROMISC;
9581         } else if (dev->flags & IFF_ALLMULTI) {
9582                 /* Accept all multicast. */
9583                 tg3_set_multi(tp, 1);
9584         } else if (netdev_mc_empty(dev)) {
9585                 /* Reject all multicast. */
9586                 tg3_set_multi(tp, 0);
9587         } else {
9588                 /* Accept one or more multicast(s). */
9589                 struct netdev_hw_addr *ha;
9590                 u32 mc_filter[4] = { 0, };
9591                 u32 regidx;
9592                 u32 bit;
9593                 u32 crc;
9594
9595                 netdev_for_each_mc_addr(ha, dev) {
9596                         crc = calc_crc(ha->addr, ETH_ALEN);
9597                         bit = ~crc & 0x7f;
9598                         regidx = (bit & 0x60) >> 5;
9599                         bit &= 0x1f;
9600                         mc_filter[regidx] |= (1 << bit);
9601                 }
9602
9603                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9604                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9605                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9606                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9607         }
9608
9609         if (rx_mode != tp->rx_mode) {
9610                 tp->rx_mode = rx_mode;
9611                 tw32_f(MAC_RX_MODE, rx_mode);
9612                 udelay(10);
9613         }
9614 }
9615
9616 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9617 {
9618         int i;
9619
9620         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9621                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9622 }
9623
9624 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9625 {
9626         int i;
9627
9628         if (!tg3_flag(tp, SUPPORT_MSIX))
9629                 return;
9630
9631         if (tp->rxq_cnt == 1) {
9632                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9633                 return;
9634         }
9635
9636         /* Validate table against current IRQ count */
9637         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9638                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9639                         break;
9640         }
9641
9642         if (i != TG3_RSS_INDIR_TBL_SIZE)
9643                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9644 }
9645
9646 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9647 {
9648         int i = 0;
9649         u32 reg = MAC_RSS_INDIR_TBL_0;
9650
9651         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9652                 u32 val = tp->rss_ind_tbl[i];
9653                 i++;
9654                 for (; i % 8; i++) {
9655                         val <<= 4;
9656                         val |= tp->rss_ind_tbl[i];
9657                 }
9658                 tw32(reg, val);
9659                 reg += 4;
9660         }
9661 }
9662
9663 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9664 {
9665         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9666                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9667         else
9668                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9669 }
9670
9671 /* tp->lock is held. */
9672 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9673 {
9674         u32 val, rdmac_mode;
9675         int i, err, limit;
9676         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9677
9678         tg3_disable_ints(tp);
9679
9680         tg3_stop_fw(tp);
9681
9682         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9683
9684         if (tg3_flag(tp, INIT_COMPLETE))
9685                 tg3_abort_hw(tp, 1);
9686
9687         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9688             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9689                 tg3_phy_pull_config(tp);
9690                 tg3_eee_pull_config(tp, NULL);
9691                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9692         }
9693
9694         /* Enable MAC control of LPI */
9695         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9696                 tg3_setup_eee(tp);
9697
9698         if (reset_phy)
9699                 tg3_phy_reset(tp);
9700
9701         err = tg3_chip_reset(tp);
9702         if (err)
9703                 return err;
9704
9705         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9706
9707         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9708                 val = tr32(TG3_CPMU_CTRL);
9709                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9710                 tw32(TG3_CPMU_CTRL, val);
9711
9712                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9713                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9714                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9715                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9716
9717                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9718                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9719                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9720                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9721
9722                 val = tr32(TG3_CPMU_HST_ACC);
9723                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9724                 val |= CPMU_HST_ACC_MACCLK_6_25;
9725                 tw32(TG3_CPMU_HST_ACC, val);
9726         }
9727
9728         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9729                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9730                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9731                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9732                 tw32(PCIE_PWR_MGMT_THRESH, val);
9733
9734                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9735                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9736
9737                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9738
9739                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9740                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9741         }
9742
9743         if (tg3_flag(tp, L1PLLPD_EN)) {
9744                 u32 grc_mode = tr32(GRC_MODE);
9745
9746                 /* Access the lower 1K of PL PCIE block registers. */
9747                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9748                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9749
9750                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9751                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9752                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9753
9754                 tw32(GRC_MODE, grc_mode);
9755         }
9756
9757         if (tg3_flag(tp, 57765_CLASS)) {
9758                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9759                         u32 grc_mode = tr32(GRC_MODE);
9760
9761                         /* Access the lower 1K of PL PCIE block registers. */
9762                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9763                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9764
9765                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9766                                    TG3_PCIE_PL_LO_PHYCTL5);
9767                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9768                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9769
9770                         tw32(GRC_MODE, grc_mode);
9771                 }
9772
9773                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9774                         u32 grc_mode;
9775
9776                         /* Fix transmit hangs */
9777                         val = tr32(TG3_CPMU_PADRNG_CTL);
9778                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9779                         tw32(TG3_CPMU_PADRNG_CTL, val);
9780
9781                         grc_mode = tr32(GRC_MODE);
9782
9783                         /* Access the lower 1K of DL PCIE block registers. */
9784                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9785                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9786
9787                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9788                                    TG3_PCIE_DL_LO_FTSMAX);
9789                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9790                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9791                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9792
9793                         tw32(GRC_MODE, grc_mode);
9794                 }
9795
9796                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9797                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9798                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9799                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9800         }
9801
9802         /* This works around an issue with Athlon chipsets on
9803          * B3 tigon3 silicon.  This bit has no effect on any
9804          * other revision.  But do not set this on PCI Express
9805          * chips and don't even touch the clocks if the CPMU is present.
9806          */
9807         if (!tg3_flag(tp, CPMU_PRESENT)) {
9808                 if (!tg3_flag(tp, PCI_EXPRESS))
9809                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9810                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9811         }
9812
9813         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9814             tg3_flag(tp, PCIX_MODE)) {
9815                 val = tr32(TG3PCI_PCISTATE);
9816                 val |= PCISTATE_RETRY_SAME_DMA;
9817                 tw32(TG3PCI_PCISTATE, val);
9818         }
9819
9820         if (tg3_flag(tp, ENABLE_APE)) {
9821                 /* Allow reads and writes to the
9822                  * APE register and memory space.
9823                  */
9824                 val = tr32(TG3PCI_PCISTATE);
9825                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9826                        PCISTATE_ALLOW_APE_SHMEM_WR |
9827                        PCISTATE_ALLOW_APE_PSPACE_WR;
9828                 tw32(TG3PCI_PCISTATE, val);
9829         }
9830
9831         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9832                 /* Enable some hw fixes.  */
9833                 val = tr32(TG3PCI_MSI_DATA);
9834                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9835                 tw32(TG3PCI_MSI_DATA, val);
9836         }
9837
9838         /* Descriptor ring init may make accesses to the
9839          * NIC SRAM area to setup the TX descriptors, so we
9840          * can only do this after the hardware has been
9841          * successfully reset.
9842          */
9843         err = tg3_init_rings(tp);
9844         if (err)
9845                 return err;
9846
9847         if (tg3_flag(tp, 57765_PLUS)) {
9848                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9849                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9850                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9851                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9852                 if (!tg3_flag(tp, 57765_CLASS) &&
9853                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9854                     tg3_asic_rev(tp) != ASIC_REV_5762)
9855                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9856                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9857         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9858                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9859                 /* This value is determined during the probe time DMA
9860                  * engine test, tg3_test_dma.
9861                  */
9862                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9863         }
9864
9865         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9866                           GRC_MODE_4X_NIC_SEND_RINGS |
9867                           GRC_MODE_NO_TX_PHDR_CSUM |
9868                           GRC_MODE_NO_RX_PHDR_CSUM);
9869         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9870
9871         /* Pseudo-header checksum is done by hardware logic and not
9872          * the offload processers, so make the chip do the pseudo-
9873          * header checksums on receive.  For transmit it is more
9874          * convenient to do the pseudo-header checksum in software
9875          * as Linux does that on transmit for us in all cases.
9876          */
9877         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9878
9879         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9880         if (tp->rxptpctl)
9881                 tw32(TG3_RX_PTP_CTL,
9882                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9883
9884         if (tg3_flag(tp, PTP_CAPABLE))
9885                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9886
9887         tw32(GRC_MODE, tp->grc_mode | val);
9888
9889         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9890         val = tr32(GRC_MISC_CFG);
9891         val &= ~0xff;
9892         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9893         tw32(GRC_MISC_CFG, val);
9894
9895         /* Initialize MBUF/DESC pool. */
9896         if (tg3_flag(tp, 5750_PLUS)) {
9897                 /* Do nothing.  */
9898         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9899                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9900                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9901                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9902                 else
9903                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9904                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9905                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9906         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9907                 int fw_len;
9908
9909                 fw_len = tp->fw_len;
9910                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9911                 tw32(BUFMGR_MB_POOL_ADDR,
9912                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9913                 tw32(BUFMGR_MB_POOL_SIZE,
9914                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9915         }
9916
9917         if (tp->dev->mtu <= ETH_DATA_LEN) {
9918                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9919                      tp->bufmgr_config.mbuf_read_dma_low_water);
9920                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9921                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9922                 tw32(BUFMGR_MB_HIGH_WATER,
9923                      tp->bufmgr_config.mbuf_high_water);
9924         } else {
9925                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9926                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9927                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9928                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9929                 tw32(BUFMGR_MB_HIGH_WATER,
9930                      tp->bufmgr_config.mbuf_high_water_jumbo);
9931         }
9932         tw32(BUFMGR_DMA_LOW_WATER,
9933              tp->bufmgr_config.dma_low_water);
9934         tw32(BUFMGR_DMA_HIGH_WATER,
9935              tp->bufmgr_config.dma_high_water);
9936
9937         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9938         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9939                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9940         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9941             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9942             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9943                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9944         tw32(BUFMGR_MODE, val);
9945         for (i = 0; i < 2000; i++) {
9946                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9947                         break;
9948                 udelay(10);
9949         }
9950         if (i >= 2000) {
9951                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9952                 return -ENODEV;
9953         }
9954
9955         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9956                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9957
9958         tg3_setup_rxbd_thresholds(tp);
9959
9960         /* Initialize TG3_BDINFO's at:
9961          *  RCVDBDI_STD_BD:     standard eth size rx ring
9962          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9963          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9964          *
9965          * like so:
9966          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9967          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9968          *                              ring attribute flags
9969          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9970          *
9971          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9972          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9973          *
9974          * The size of each ring is fixed in the firmware, but the location is
9975          * configurable.
9976          */
9977         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9978              ((u64) tpr->rx_std_mapping >> 32));
9979         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9980              ((u64) tpr->rx_std_mapping & 0xffffffff));
9981         if (!tg3_flag(tp, 5717_PLUS))
9982                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9983                      NIC_SRAM_RX_BUFFER_DESC);
9984
9985         /* Disable the mini ring */
9986         if (!tg3_flag(tp, 5705_PLUS))
9987                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9988                      BDINFO_FLAGS_DISABLED);
9989
9990         /* Program the jumbo buffer descriptor ring control
9991          * blocks on those devices that have them.
9992          */
9993         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9994             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9995
9996                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9997                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9998                              ((u64) tpr->rx_jmb_mapping >> 32));
9999                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10000                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10001                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10002                               BDINFO_FLAGS_MAXLEN_SHIFT;
10003                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10004                              val | BDINFO_FLAGS_USE_EXT_RECV);
10005                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10006                             tg3_flag(tp, 57765_CLASS) ||
10007                             tg3_asic_rev(tp) == ASIC_REV_5762)
10008                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10009                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10010                 } else {
10011                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10012                              BDINFO_FLAGS_DISABLED);
10013                 }
10014
10015                 if (tg3_flag(tp, 57765_PLUS)) {
10016                         val = TG3_RX_STD_RING_SIZE(tp);
10017                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10018                         val |= (TG3_RX_STD_DMA_SZ << 2);
10019                 } else
10020                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10021         } else
10022                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10023
10024         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10025
10026         tpr->rx_std_prod_idx = tp->rx_pending;
10027         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10028
10029         tpr->rx_jmb_prod_idx =
10030                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10031         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10032
10033         tg3_rings_reset(tp);
10034
10035         /* Initialize MAC address and backoff seed. */
10036         __tg3_set_mac_addr(tp, false);
10037
10038         /* MTU + ethernet header + FCS + optional VLAN tag */
10039         tw32(MAC_RX_MTU_SIZE,
10040              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10041
10042         /* The slot time is changed by tg3_setup_phy if we
10043          * run at gigabit with half duplex.
10044          */
10045         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10046               (6 << TX_LENGTHS_IPG_SHIFT) |
10047               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10048
10049         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10050             tg3_asic_rev(tp) == ASIC_REV_5762)
10051                 val |= tr32(MAC_TX_LENGTHS) &
10052                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10053                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10054
10055         tw32(MAC_TX_LENGTHS, val);
10056
10057         /* Receive rules. */
10058         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10059         tw32(RCVLPC_CONFIG, 0x0181);
10060
10061         /* Calculate RDMAC_MODE setting early, we need it to determine
10062          * the RCVLPC_STATE_ENABLE mask.
10063          */
10064         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10065                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10066                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10067                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10068                       RDMAC_MODE_LNGREAD_ENAB);
10069
10070         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10071                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10072
10073         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10074             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10075             tg3_asic_rev(tp) == ASIC_REV_57780)
10076                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10077                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10078                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10079
10080         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10081             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10082                 if (tg3_flag(tp, TSO_CAPABLE) &&
10083                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10084                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10085                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10086                            !tg3_flag(tp, IS_5788)) {
10087                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10088                 }
10089         }
10090
10091         if (tg3_flag(tp, PCI_EXPRESS))
10092                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10093
10094         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10095                 tp->dma_limit = 0;
10096                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10097                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10098                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10099                 }
10100         }
10101
10102         if (tg3_flag(tp, HW_TSO_1) ||
10103             tg3_flag(tp, HW_TSO_2) ||
10104             tg3_flag(tp, HW_TSO_3))
10105                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10106
10107         if (tg3_flag(tp, 57765_PLUS) ||
10108             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10109             tg3_asic_rev(tp) == ASIC_REV_57780)
10110                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10111
10112         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10113             tg3_asic_rev(tp) == ASIC_REV_5762)
10114                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10115
10116         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10117             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10118             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10119             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10120             tg3_flag(tp, 57765_PLUS)) {
10121                 u32 tgtreg;
10122
10123                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10124                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10125                 else
10126                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10127
10128                 val = tr32(tgtreg);
10129                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10130                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10131                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10132                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10133                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10134                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10135                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10136                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10137                 }
10138                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10139         }
10140
10141         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10142             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10143             tg3_asic_rev(tp) == ASIC_REV_5762) {
10144                 u32 tgtreg;
10145
10146                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10147                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10148                 else
10149                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10150
10151                 val = tr32(tgtreg);
10152                 tw32(tgtreg, val |
10153                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10154                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10155         }
10156
10157         /* Receive/send statistics. */
10158         if (tg3_flag(tp, 5750_PLUS)) {
10159                 val = tr32(RCVLPC_STATS_ENABLE);
10160                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10161                 tw32(RCVLPC_STATS_ENABLE, val);
10162         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10163                    tg3_flag(tp, TSO_CAPABLE)) {
10164                 val = tr32(RCVLPC_STATS_ENABLE);
10165                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10166                 tw32(RCVLPC_STATS_ENABLE, val);
10167         } else {
10168                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10169         }
10170         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10171         tw32(SNDDATAI_STATSENAB, 0xffffff);
10172         tw32(SNDDATAI_STATSCTRL,
10173              (SNDDATAI_SCTRL_ENABLE |
10174               SNDDATAI_SCTRL_FASTUPD));
10175
10176         /* Setup host coalescing engine. */
10177         tw32(HOSTCC_MODE, 0);
10178         for (i = 0; i < 2000; i++) {
10179                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10180                         break;
10181                 udelay(10);
10182         }
10183
10184         __tg3_set_coalesce(tp, &tp->coal);
10185
10186         if (!tg3_flag(tp, 5705_PLUS)) {
10187                 /* Status/statistics block address.  See tg3_timer,
10188                  * the tg3_periodic_fetch_stats call there, and
10189                  * tg3_get_stats to see how this works for 5705/5750 chips.
10190                  */
10191                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10192                      ((u64) tp->stats_mapping >> 32));
10193                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10194                      ((u64) tp->stats_mapping & 0xffffffff));
10195                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10196
10197                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10198
10199                 /* Clear statistics and status block memory areas */
10200                 for (i = NIC_SRAM_STATS_BLK;
10201                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10202                      i += sizeof(u32)) {
10203                         tg3_write_mem(tp, i, 0);
10204                         udelay(40);
10205                 }
10206         }
10207
10208         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10209
10210         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10211         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10212         if (!tg3_flag(tp, 5705_PLUS))
10213                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10214
10215         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10216                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10217                 /* reset to prevent losing 1st rx packet intermittently */
10218                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10219                 udelay(10);
10220         }
10221
10222         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10223                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10224                         MAC_MODE_FHDE_ENABLE;
10225         if (tg3_flag(tp, ENABLE_APE))
10226                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10227         if (!tg3_flag(tp, 5705_PLUS) &&
10228             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10229             tg3_asic_rev(tp) != ASIC_REV_5700)
10230                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10231         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10232         udelay(40);
10233
10234         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10235          * If TG3_FLAG_IS_NIC is zero, we should read the
10236          * register to preserve the GPIO settings for LOMs. The GPIOs,
10237          * whether used as inputs or outputs, are set by boot code after
10238          * reset.
10239          */
10240         if (!tg3_flag(tp, IS_NIC)) {
10241                 u32 gpio_mask;
10242
10243                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10244                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10245                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10246
10247                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10248                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10249                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10250
10251                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10252                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10253
10254                 tp->grc_local_ctrl &= ~gpio_mask;
10255                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10256
10257                 /* GPIO1 must be driven high for eeprom write protect */
10258                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10259                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10260                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10261         }
10262         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10263         udelay(100);
10264
10265         if (tg3_flag(tp, USING_MSIX)) {
10266                 val = tr32(MSGINT_MODE);
10267                 val |= MSGINT_MODE_ENABLE;
10268                 if (tp->irq_cnt > 1)
10269                         val |= MSGINT_MODE_MULTIVEC_EN;
10270                 if (!tg3_flag(tp, 1SHOT_MSI))
10271                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10272                 tw32(MSGINT_MODE, val);
10273         }
10274
10275         if (!tg3_flag(tp, 5705_PLUS)) {
10276                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10277                 udelay(40);
10278         }
10279
10280         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10281                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10282                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10283                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10284                WDMAC_MODE_LNGREAD_ENAB);
10285
10286         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10287             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10288                 if (tg3_flag(tp, TSO_CAPABLE) &&
10289                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10290                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10291                         /* nothing */
10292                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10293                            !tg3_flag(tp, IS_5788)) {
10294                         val |= WDMAC_MODE_RX_ACCEL;
10295                 }
10296         }
10297
10298         /* Enable host coalescing bug fix */
10299         if (tg3_flag(tp, 5755_PLUS))
10300                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10301
10302         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10303                 val |= WDMAC_MODE_BURST_ALL_DATA;
10304
10305         tw32_f(WDMAC_MODE, val);
10306         udelay(40);
10307
10308         if (tg3_flag(tp, PCIX_MODE)) {
10309                 u16 pcix_cmd;
10310
10311                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10312                                      &pcix_cmd);
10313                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10314                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10315                         pcix_cmd |= PCI_X_CMD_READ_2K;
10316                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10317                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10318                         pcix_cmd |= PCI_X_CMD_READ_2K;
10319                 }
10320                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10321                                       pcix_cmd);
10322         }
10323
10324         tw32_f(RDMAC_MODE, rdmac_mode);
10325         udelay(40);
10326
10327         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10328             tg3_asic_rev(tp) == ASIC_REV_5720) {
10329                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10330                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10331                                 break;
10332                 }
10333                 if (i < TG3_NUM_RDMA_CHANNELS) {
10334                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10335                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10336                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10337                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10338                 }
10339         }
10340
10341         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10342         if (!tg3_flag(tp, 5705_PLUS))
10343                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10344
10345         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10346                 tw32(SNDDATAC_MODE,
10347                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10348         else
10349                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10350
10351         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10352         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10353         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10354         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10355                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10356         tw32(RCVDBDI_MODE, val);
10357         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10358         if (tg3_flag(tp, HW_TSO_1) ||
10359             tg3_flag(tp, HW_TSO_2) ||
10360             tg3_flag(tp, HW_TSO_3))
10361                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10362         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10363         if (tg3_flag(tp, ENABLE_TSS))
10364                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10365         tw32(SNDBDI_MODE, val);
10366         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10367
10368         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10369                 err = tg3_load_5701_a0_firmware_fix(tp);
10370                 if (err)
10371                         return err;
10372         }
10373
10374         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10375                 /* Ignore any errors for the firmware download. If download
10376                  * fails, the device will operate with EEE disabled
10377                  */
10378                 tg3_load_57766_firmware(tp);
10379         }
10380
10381         if (tg3_flag(tp, TSO_CAPABLE)) {
10382                 err = tg3_load_tso_firmware(tp);
10383                 if (err)
10384                         return err;
10385         }
10386
10387         tp->tx_mode = TX_MODE_ENABLE;
10388
10389         if (tg3_flag(tp, 5755_PLUS) ||
10390             tg3_asic_rev(tp) == ASIC_REV_5906)
10391                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10392
10393         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10394             tg3_asic_rev(tp) == ASIC_REV_5762) {
10395                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10396                 tp->tx_mode &= ~val;
10397                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10398         }
10399
10400         tw32_f(MAC_TX_MODE, tp->tx_mode);
10401         udelay(100);
10402
10403         if (tg3_flag(tp, ENABLE_RSS)) {
10404                 tg3_rss_write_indir_tbl(tp);
10405
10406                 /* Setup the "secret" hash key. */
10407                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10408                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10409                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10410                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10411                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10412                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10413                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10414                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10415                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10416                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10417         }
10418
10419         tp->rx_mode = RX_MODE_ENABLE;
10420         if (tg3_flag(tp, 5755_PLUS))
10421                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10422
10423         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10424                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10425
10426         if (tg3_flag(tp, ENABLE_RSS))
10427                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10428                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10429                                RX_MODE_RSS_IPV6_HASH_EN |
10430                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10431                                RX_MODE_RSS_IPV4_HASH_EN |
10432                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10433
10434         tw32_f(MAC_RX_MODE, tp->rx_mode);
10435         udelay(10);
10436
10437         tw32(MAC_LED_CTRL, tp->led_ctrl);
10438
10439         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10440         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10441                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10442                 udelay(10);
10443         }
10444         tw32_f(MAC_RX_MODE, tp->rx_mode);
10445         udelay(10);
10446
10447         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10448                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10449                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10450                         /* Set drive transmission level to 1.2V  */
10451                         /* only if the signal pre-emphasis bit is not set  */
10452                         val = tr32(MAC_SERDES_CFG);
10453                         val &= 0xfffff000;
10454                         val |= 0x880;
10455                         tw32(MAC_SERDES_CFG, val);
10456                 }
10457                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10458                         tw32(MAC_SERDES_CFG, 0x616000);
10459         }
10460
10461         /* Prevent chip from dropping frames when flow control
10462          * is enabled.
10463          */
10464         if (tg3_flag(tp, 57765_CLASS))
10465                 val = 1;
10466         else
10467                 val = 2;
10468         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10469
10470         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10471             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10472                 /* Use hardware link auto-negotiation */
10473                 tg3_flag_set(tp, HW_AUTONEG);
10474         }
10475
10476         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10477             tg3_asic_rev(tp) == ASIC_REV_5714) {
10478                 u32 tmp;
10479
10480                 tmp = tr32(SERDES_RX_CTRL);
10481                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10482                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10483                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10484                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10485         }
10486
10487         if (!tg3_flag(tp, USE_PHYLIB)) {
10488                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10489                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10490
10491                 err = tg3_setup_phy(tp, false);
10492                 if (err)
10493                         return err;
10494
10495                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10496                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10497                         u32 tmp;
10498
10499                         /* Clear CRC stats. */
10500                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10501                                 tg3_writephy(tp, MII_TG3_TEST1,
10502                                              tmp | MII_TG3_TEST1_CRC_EN);
10503                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10504                         }
10505                 }
10506         }
10507
10508         __tg3_set_rx_mode(tp->dev);
10509
10510         /* Initialize receive rules. */
10511         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10512         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10513         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10514         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10515
10516         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10517                 limit = 8;
10518         else
10519                 limit = 16;
10520         if (tg3_flag(tp, ENABLE_ASF))
10521                 limit -= 4;
10522         switch (limit) {
10523         case 16:
10524                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10525         case 15:
10526                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10527         case 14:
10528                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10529         case 13:
10530                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10531         case 12:
10532                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10533         case 11:
10534                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10535         case 10:
10536                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10537         case 9:
10538                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10539         case 8:
10540                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10541         case 7:
10542                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10543         case 6:
10544                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10545         case 5:
10546                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10547         case 4:
10548                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10549         case 3:
10550                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10551         case 2:
10552         case 1:
10553
10554         default:
10555                 break;
10556         }
10557
10558         if (tg3_flag(tp, ENABLE_APE))
10559                 /* Write our heartbeat update interval to APE. */
10560                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10561                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10562
10563         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10564
10565         return 0;
10566 }
10567
10568 /* Called at device open time to get the chip ready for
10569  * packet processing.  Invoked with tp->lock held.
10570  */
10571 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10572 {
10573         /* Chip may have been just powered on. If so, the boot code may still
10574          * be running initialization. Wait for it to finish to avoid races in
10575          * accessing the hardware.
10576          */
10577         tg3_enable_register_access(tp);
10578         tg3_poll_fw(tp);
10579
10580         tg3_switch_clocks(tp);
10581
10582         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10583
10584         return tg3_reset_hw(tp, reset_phy);
10585 }
10586
10587 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10588 {
10589         int i;
10590
10591         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10592                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10593
10594                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10595                 off += len;
10596
10597                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10598                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10599                         memset(ocir, 0, TG3_OCIR_LEN);
10600         }
10601 }
10602
10603 /* sysfs attributes for hwmon */
10604 static ssize_t tg3_show_temp(struct device *dev,
10605                              struct device_attribute *devattr, char *buf)
10606 {
10607         struct pci_dev *pdev = to_pci_dev(dev);
10608         struct net_device *netdev = pci_get_drvdata(pdev);
10609         struct tg3 *tp = netdev_priv(netdev);
10610         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10611         u32 temperature;
10612
10613         spin_lock_bh(&tp->lock);
10614         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10615                                 sizeof(temperature));
10616         spin_unlock_bh(&tp->lock);
10617         return sprintf(buf, "%u\n", temperature);
10618 }
10619
10620
10621 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10622                           TG3_TEMP_SENSOR_OFFSET);
10623 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10624                           TG3_TEMP_CAUTION_OFFSET);
10625 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10626                           TG3_TEMP_MAX_OFFSET);
10627
10628 static struct attribute *tg3_attributes[] = {
10629         &sensor_dev_attr_temp1_input.dev_attr.attr,
10630         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10631         &sensor_dev_attr_temp1_max.dev_attr.attr,
10632         NULL
10633 };
10634
10635 static const struct attribute_group tg3_group = {
10636         .attrs = tg3_attributes,
10637 };
10638
10639 static void tg3_hwmon_close(struct tg3 *tp)
10640 {
10641         if (tp->hwmon_dev) {
10642                 hwmon_device_unregister(tp->hwmon_dev);
10643                 tp->hwmon_dev = NULL;
10644                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10645         }
10646 }
10647
10648 static void tg3_hwmon_open(struct tg3 *tp)
10649 {
10650         int i, err;
10651         u32 size = 0;
10652         struct pci_dev *pdev = tp->pdev;
10653         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10654
10655         tg3_sd_scan_scratchpad(tp, ocirs);
10656
10657         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10658                 if (!ocirs[i].src_data_length)
10659                         continue;
10660
10661                 size += ocirs[i].src_hdr_length;
10662                 size += ocirs[i].src_data_length;
10663         }
10664
10665         if (!size)
10666                 return;
10667
10668         /* Register hwmon sysfs hooks */
10669         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10670         if (err) {
10671                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10672                 return;
10673         }
10674
10675         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10676         if (IS_ERR(tp->hwmon_dev)) {
10677                 tp->hwmon_dev = NULL;
10678                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10679                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10680         }
10681 }
10682
10683
10684 #define TG3_STAT_ADD32(PSTAT, REG) \
10685 do {    u32 __val = tr32(REG); \
10686         (PSTAT)->low += __val; \
10687         if ((PSTAT)->low < __val) \
10688                 (PSTAT)->high += 1; \
10689 } while (0)
10690
10691 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10692 {
10693         struct tg3_hw_stats *sp = tp->hw_stats;
10694
10695         if (!tp->link_up)
10696                 return;
10697
10698         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10699         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10700         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10701         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10702         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10703         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10704         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10705         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10706         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10707         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10708         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10709         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10710         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10711         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10712                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10713                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10714                 u32 val;
10715
10716                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10717                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10718                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10719                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10720         }
10721
10722         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10723         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10724         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10725         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10726         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10727         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10728         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10729         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10730         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10731         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10732         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10733         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10734         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10735         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10736
10737         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10738         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10739             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10740             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10741                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10742         } else {
10743                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10744                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10745                 if (val) {
10746                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10747                         sp->rx_discards.low += val;
10748                         if (sp->rx_discards.low < val)
10749                                 sp->rx_discards.high += 1;
10750                 }
10751                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10752         }
10753         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10754 }
10755
10756 static void tg3_chk_missed_msi(struct tg3 *tp)
10757 {
10758         u32 i;
10759
10760         for (i = 0; i < tp->irq_cnt; i++) {
10761                 struct tg3_napi *tnapi = &tp->napi[i];
10762
10763                 if (tg3_has_work(tnapi)) {
10764                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10765                             tnapi->last_tx_cons == tnapi->tx_cons) {
10766                                 if (tnapi->chk_msi_cnt < 1) {
10767                                         tnapi->chk_msi_cnt++;
10768                                         return;
10769                                 }
10770                                 tg3_msi(0, tnapi);
10771                         }
10772                 }
10773                 tnapi->chk_msi_cnt = 0;
10774                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10775                 tnapi->last_tx_cons = tnapi->tx_cons;
10776         }
10777 }
10778
10779 static void tg3_timer(unsigned long __opaque)
10780 {
10781         struct tg3 *tp = (struct tg3 *) __opaque;
10782
10783         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10784                 goto restart_timer;
10785
10786         spin_lock(&tp->lock);
10787
10788         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10789             tg3_flag(tp, 57765_CLASS))
10790                 tg3_chk_missed_msi(tp);
10791
10792         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10793                 /* BCM4785: Flush posted writes from GbE to host memory. */
10794                 tr32(HOSTCC_MODE);
10795         }
10796
10797         if (!tg3_flag(tp, TAGGED_STATUS)) {
10798                 /* All of this garbage is because when using non-tagged
10799                  * IRQ status the mailbox/status_block protocol the chip
10800                  * uses with the cpu is race prone.
10801                  */
10802                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10803                         tw32(GRC_LOCAL_CTRL,
10804                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10805                 } else {
10806                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10807                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10808                 }
10809
10810                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10811                         spin_unlock(&tp->lock);
10812                         tg3_reset_task_schedule(tp);
10813                         goto restart_timer;
10814                 }
10815         }
10816
10817         /* This part only runs once per second. */
10818         if (!--tp->timer_counter) {
10819                 if (tg3_flag(tp, 5705_PLUS))
10820                         tg3_periodic_fetch_stats(tp);
10821
10822                 if (tp->setlpicnt && !--tp->setlpicnt)
10823                         tg3_phy_eee_enable(tp);
10824
10825                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10826                         u32 mac_stat;
10827                         int phy_event;
10828
10829                         mac_stat = tr32(MAC_STATUS);
10830
10831                         phy_event = 0;
10832                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10833                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10834                                         phy_event = 1;
10835                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10836                                 phy_event = 1;
10837
10838                         if (phy_event)
10839                                 tg3_setup_phy(tp, false);
10840                 } else if (tg3_flag(tp, POLL_SERDES)) {
10841                         u32 mac_stat = tr32(MAC_STATUS);
10842                         int need_setup = 0;
10843
10844                         if (tp->link_up &&
10845                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10846                                 need_setup = 1;
10847                         }
10848                         if (!tp->link_up &&
10849                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10850                                          MAC_STATUS_SIGNAL_DET))) {
10851                                 need_setup = 1;
10852                         }
10853                         if (need_setup) {
10854                                 if (!tp->serdes_counter) {
10855                                         tw32_f(MAC_MODE,
10856                                              (tp->mac_mode &
10857                                               ~MAC_MODE_PORT_MODE_MASK));
10858                                         udelay(40);
10859                                         tw32_f(MAC_MODE, tp->mac_mode);
10860                                         udelay(40);
10861                                 }
10862                                 tg3_setup_phy(tp, false);
10863                         }
10864                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10865                            tg3_flag(tp, 5780_CLASS)) {
10866                         tg3_serdes_parallel_detect(tp);
10867                 }
10868
10869                 tp->timer_counter = tp->timer_multiplier;
10870         }
10871
10872         /* Heartbeat is only sent once every 2 seconds.
10873          *
10874          * The heartbeat is to tell the ASF firmware that the host
10875          * driver is still alive.  In the event that the OS crashes,
10876          * ASF needs to reset the hardware to free up the FIFO space
10877          * that may be filled with rx packets destined for the host.
10878          * If the FIFO is full, ASF will no longer function properly.
10879          *
10880          * Unintended resets have been reported on real time kernels
10881          * where the timer doesn't run on time.  Netpoll will also have
10882          * same problem.
10883          *
10884          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10885          * to check the ring condition when the heartbeat is expiring
10886          * before doing the reset.  This will prevent most unintended
10887          * resets.
10888          */
10889         if (!--tp->asf_counter) {
10890                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10891                         tg3_wait_for_event_ack(tp);
10892
10893                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10894                                       FWCMD_NICDRV_ALIVE3);
10895                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10896                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10897                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10898
10899                         tg3_generate_fw_event(tp);
10900                 }
10901                 tp->asf_counter = tp->asf_multiplier;
10902         }
10903
10904         spin_unlock(&tp->lock);
10905
10906 restart_timer:
10907         tp->timer.expires = jiffies + tp->timer_offset;
10908         add_timer(&tp->timer);
10909 }
10910
10911 static void tg3_timer_init(struct tg3 *tp)
10912 {
10913         if (tg3_flag(tp, TAGGED_STATUS) &&
10914             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10915             !tg3_flag(tp, 57765_CLASS))
10916                 tp->timer_offset = HZ;
10917         else
10918                 tp->timer_offset = HZ / 10;
10919
10920         BUG_ON(tp->timer_offset > HZ);
10921
10922         tp->timer_multiplier = (HZ / tp->timer_offset);
10923         tp->asf_multiplier = (HZ / tp->timer_offset) *
10924                              TG3_FW_UPDATE_FREQ_SEC;
10925
10926         init_timer(&tp->timer);
10927         tp->timer.data = (unsigned long) tp;
10928         tp->timer.function = tg3_timer;
10929 }
10930
10931 static void tg3_timer_start(struct tg3 *tp)
10932 {
10933         tp->asf_counter   = tp->asf_multiplier;
10934         tp->timer_counter = tp->timer_multiplier;
10935
10936         tp->timer.expires = jiffies + tp->timer_offset;
10937         add_timer(&tp->timer);
10938 }
10939
10940 static void tg3_timer_stop(struct tg3 *tp)
10941 {
10942         del_timer_sync(&tp->timer);
10943 }
10944
10945 /* Restart hardware after configuration changes, self-test, etc.
10946  * Invoked with tp->lock held.
10947  */
10948 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10949         __releases(tp->lock)
10950         __acquires(tp->lock)
10951 {
10952         int err;
10953
10954         err = tg3_init_hw(tp, reset_phy);
10955         if (err) {
10956                 netdev_err(tp->dev,
10957                            "Failed to re-initialize device, aborting\n");
10958                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10959                 tg3_full_unlock(tp);
10960                 tg3_timer_stop(tp);
10961                 tp->irq_sync = 0;
10962                 tg3_napi_enable(tp);
10963                 dev_close(tp->dev);
10964                 tg3_full_lock(tp, 0);
10965         }
10966         return err;
10967 }
10968
10969 static void tg3_reset_task(struct work_struct *work)
10970 {
10971         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10972         int err;
10973
10974         tg3_full_lock(tp, 0);
10975
10976         if (!netif_running(tp->dev)) {
10977                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10978                 tg3_full_unlock(tp);
10979                 return;
10980         }
10981
10982         tg3_full_unlock(tp);
10983
10984         tg3_phy_stop(tp);
10985
10986         tg3_netif_stop(tp);
10987
10988         tg3_full_lock(tp, 1);
10989
10990         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10991                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10992                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10993                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10994                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10995         }
10996
10997         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10998         err = tg3_init_hw(tp, true);
10999         if (err)
11000                 goto out;
11001
11002         tg3_netif_start(tp);
11003
11004 out:
11005         tg3_full_unlock(tp);
11006
11007         if (!err)
11008                 tg3_phy_start(tp);
11009
11010         tg3_flag_clear(tp, RESET_TASK_PENDING);
11011 }
11012
11013 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11014 {
11015         irq_handler_t fn;
11016         unsigned long flags;
11017         char *name;
11018         struct tg3_napi *tnapi = &tp->napi[irq_num];
11019
11020         if (tp->irq_cnt == 1)
11021                 name = tp->dev->name;
11022         else {
11023                 name = &tnapi->irq_lbl[0];
11024                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
11025                 name[IFNAMSIZ-1] = 0;
11026         }
11027
11028         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11029                 fn = tg3_msi;
11030                 if (tg3_flag(tp, 1SHOT_MSI))
11031                         fn = tg3_msi_1shot;
11032                 flags = 0;
11033         } else {
11034                 fn = tg3_interrupt;
11035                 if (tg3_flag(tp, TAGGED_STATUS))
11036                         fn = tg3_interrupt_tagged;
11037                 flags = IRQF_SHARED;
11038         }
11039
11040         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11041 }
11042
11043 static int tg3_test_interrupt(struct tg3 *tp)
11044 {
11045         struct tg3_napi *tnapi = &tp->napi[0];
11046         struct net_device *dev = tp->dev;
11047         int err, i, intr_ok = 0;
11048         u32 val;
11049
11050         if (!netif_running(dev))
11051                 return -ENODEV;
11052
11053         tg3_disable_ints(tp);
11054
11055         free_irq(tnapi->irq_vec, tnapi);
11056
11057         /*
11058          * Turn off MSI one shot mode.  Otherwise this test has no
11059          * observable way to know whether the interrupt was delivered.
11060          */
11061         if (tg3_flag(tp, 57765_PLUS)) {
11062                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11063                 tw32(MSGINT_MODE, val);
11064         }
11065
11066         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11067                           IRQF_SHARED, dev->name, tnapi);
11068         if (err)
11069                 return err;
11070
11071         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11072         tg3_enable_ints(tp);
11073
11074         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11075                tnapi->coal_now);
11076
11077         for (i = 0; i < 5; i++) {
11078                 u32 int_mbox, misc_host_ctrl;
11079
11080                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11081                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11082
11083                 if ((int_mbox != 0) ||
11084                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11085                         intr_ok = 1;
11086                         break;
11087                 }
11088
11089                 if (tg3_flag(tp, 57765_PLUS) &&
11090                     tnapi->hw_status->status_tag != tnapi->last_tag)
11091                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11092
11093                 msleep(10);
11094         }
11095
11096         tg3_disable_ints(tp);
11097
11098         free_irq(tnapi->irq_vec, tnapi);
11099
11100         err = tg3_request_irq(tp, 0);
11101
11102         if (err)
11103                 return err;
11104
11105         if (intr_ok) {
11106                 /* Reenable MSI one shot mode. */
11107                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11108                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11109                         tw32(MSGINT_MODE, val);
11110                 }
11111                 return 0;
11112         }
11113
11114         return -EIO;
11115 }
11116
11117 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11118  * successfully restored
11119  */
11120 static int tg3_test_msi(struct tg3 *tp)
11121 {
11122         int err;
11123         u16 pci_cmd;
11124
11125         if (!tg3_flag(tp, USING_MSI))
11126                 return 0;
11127
11128         /* Turn off SERR reporting in case MSI terminates with Master
11129          * Abort.
11130          */
11131         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11132         pci_write_config_word(tp->pdev, PCI_COMMAND,
11133                               pci_cmd & ~PCI_COMMAND_SERR);
11134
11135         err = tg3_test_interrupt(tp);
11136
11137         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11138
11139         if (!err)
11140                 return 0;
11141
11142         /* other failures */
11143         if (err != -EIO)
11144                 return err;
11145
11146         /* MSI test failed, go back to INTx mode */
11147         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11148                     "to INTx mode. Please report this failure to the PCI "
11149                     "maintainer and include system chipset information\n");
11150
11151         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11152
11153         pci_disable_msi(tp->pdev);
11154
11155         tg3_flag_clear(tp, USING_MSI);
11156         tp->napi[0].irq_vec = tp->pdev->irq;
11157
11158         err = tg3_request_irq(tp, 0);
11159         if (err)
11160                 return err;
11161
11162         /* Need to reset the chip because the MSI cycle may have terminated
11163          * with Master Abort.
11164          */
11165         tg3_full_lock(tp, 1);
11166
11167         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11168         err = tg3_init_hw(tp, true);
11169
11170         tg3_full_unlock(tp);
11171
11172         if (err)
11173                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11174
11175         return err;
11176 }
11177
11178 static int tg3_request_firmware(struct tg3 *tp)
11179 {
11180         const struct tg3_firmware_hdr *fw_hdr;
11181
11182         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11183                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11184                            tp->fw_needed);
11185                 return -ENOENT;
11186         }
11187
11188         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11189
11190         /* Firmware blob starts with version numbers, followed by
11191          * start address and _full_ length including BSS sections
11192          * (which must be longer than the actual data, of course
11193          */
11194
11195         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11196         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11197                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11198                            tp->fw_len, tp->fw_needed);
11199                 release_firmware(tp->fw);
11200                 tp->fw = NULL;
11201                 return -EINVAL;
11202         }
11203
11204         /* We no longer need firmware; we have it. */
11205         tp->fw_needed = NULL;
11206         return 0;
11207 }
11208
11209 static u32 tg3_irq_count(struct tg3 *tp)
11210 {
11211         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11212
11213         if (irq_cnt > 1) {
11214                 /* We want as many rx rings enabled as there are cpus.
11215                  * In multiqueue MSI-X mode, the first MSI-X vector
11216                  * only deals with link interrupts, etc, so we add
11217                  * one to the number of vectors we are requesting.
11218                  */
11219                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11220         }
11221
11222         return irq_cnt;
11223 }
11224
11225 static bool tg3_enable_msix(struct tg3 *tp)
11226 {
11227         int i, rc;
11228         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11229
11230         tp->txq_cnt = tp->txq_req;
11231         tp->rxq_cnt = tp->rxq_req;
11232         if (!tp->rxq_cnt)
11233                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11234         if (tp->rxq_cnt > tp->rxq_max)
11235                 tp->rxq_cnt = tp->rxq_max;
11236
11237         /* Disable multiple TX rings by default.  Simple round-robin hardware
11238          * scheduling of the TX rings can cause starvation of rings with
11239          * small packets when other rings have TSO or jumbo packets.
11240          */
11241         if (!tp->txq_req)
11242                 tp->txq_cnt = 1;
11243
11244         tp->irq_cnt = tg3_irq_count(tp);
11245
11246         for (i = 0; i < tp->irq_max; i++) {
11247                 msix_ent[i].entry  = i;
11248                 msix_ent[i].vector = 0;
11249         }
11250
11251         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11252         if (rc < 0) {
11253                 return false;
11254         } else if (rc != 0) {
11255                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11256                         return false;
11257                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11258                               tp->irq_cnt, rc);
11259                 tp->irq_cnt = rc;
11260                 tp->rxq_cnt = max(rc - 1, 1);
11261                 if (tp->txq_cnt)
11262                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11263         }
11264
11265         for (i = 0; i < tp->irq_max; i++)
11266                 tp->napi[i].irq_vec = msix_ent[i].vector;
11267
11268         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11269                 pci_disable_msix(tp->pdev);
11270                 return false;
11271         }
11272
11273         if (tp->irq_cnt == 1)
11274                 return true;
11275
11276         tg3_flag_set(tp, ENABLE_RSS);
11277
11278         if (tp->txq_cnt > 1)
11279                 tg3_flag_set(tp, ENABLE_TSS);
11280
11281         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11282
11283         return true;
11284 }
11285
11286 static void tg3_ints_init(struct tg3 *tp)
11287 {
11288         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11289             !tg3_flag(tp, TAGGED_STATUS)) {
11290                 /* All MSI supporting chips should support tagged
11291                  * status.  Assert that this is the case.
11292                  */
11293                 netdev_warn(tp->dev,
11294                             "MSI without TAGGED_STATUS? Not using MSI\n");
11295                 goto defcfg;
11296         }
11297
11298         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11299                 tg3_flag_set(tp, USING_MSIX);
11300         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11301                 tg3_flag_set(tp, USING_MSI);
11302
11303         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11304                 u32 msi_mode = tr32(MSGINT_MODE);
11305                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11306                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11307                 if (!tg3_flag(tp, 1SHOT_MSI))
11308                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11309                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11310         }
11311 defcfg:
11312         if (!tg3_flag(tp, USING_MSIX)) {
11313                 tp->irq_cnt = 1;
11314                 tp->napi[0].irq_vec = tp->pdev->irq;
11315         }
11316
11317         if (tp->irq_cnt == 1) {
11318                 tp->txq_cnt = 1;
11319                 tp->rxq_cnt = 1;
11320                 netif_set_real_num_tx_queues(tp->dev, 1);
11321                 netif_set_real_num_rx_queues(tp->dev, 1);
11322         }
11323 }
11324
11325 static void tg3_ints_fini(struct tg3 *tp)
11326 {
11327         if (tg3_flag(tp, USING_MSIX))
11328                 pci_disable_msix(tp->pdev);
11329         else if (tg3_flag(tp, USING_MSI))
11330                 pci_disable_msi(tp->pdev);
11331         tg3_flag_clear(tp, USING_MSI);
11332         tg3_flag_clear(tp, USING_MSIX);
11333         tg3_flag_clear(tp, ENABLE_RSS);
11334         tg3_flag_clear(tp, ENABLE_TSS);
11335 }
11336
11337 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11338                      bool init)
11339 {
11340         struct net_device *dev = tp->dev;
11341         int i, err;
11342
11343         /*
11344          * Setup interrupts first so we know how
11345          * many NAPI resources to allocate
11346          */
11347         tg3_ints_init(tp);
11348
11349         tg3_rss_check_indir_tbl(tp);
11350
11351         /* The placement of this call is tied
11352          * to the setup and use of Host TX descriptors.
11353          */
11354         err = tg3_alloc_consistent(tp);
11355         if (err)
11356                 goto out_ints_fini;
11357
11358         tg3_napi_init(tp);
11359
11360         tg3_napi_enable(tp);
11361
11362         for (i = 0; i < tp->irq_cnt; i++) {
11363                 struct tg3_napi *tnapi = &tp->napi[i];
11364                 err = tg3_request_irq(tp, i);
11365                 if (err) {
11366                         for (i--; i >= 0; i--) {
11367                                 tnapi = &tp->napi[i];
11368                                 free_irq(tnapi->irq_vec, tnapi);
11369                         }
11370                         goto out_napi_fini;
11371                 }
11372         }
11373
11374         tg3_full_lock(tp, 0);
11375
11376         if (init)
11377                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11378
11379         err = tg3_init_hw(tp, reset_phy);
11380         if (err) {
11381                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11382                 tg3_free_rings(tp);
11383         }
11384
11385         tg3_full_unlock(tp);
11386
11387         if (err)
11388                 goto out_free_irq;
11389
11390         if (test_irq && tg3_flag(tp, USING_MSI)) {
11391                 err = tg3_test_msi(tp);
11392
11393                 if (err) {
11394                         tg3_full_lock(tp, 0);
11395                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11396                         tg3_free_rings(tp);
11397                         tg3_full_unlock(tp);
11398
11399                         goto out_napi_fini;
11400                 }
11401
11402                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11403                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11404
11405                         tw32(PCIE_TRANSACTION_CFG,
11406                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11407                 }
11408         }
11409
11410         tg3_phy_start(tp);
11411
11412         tg3_hwmon_open(tp);
11413
11414         tg3_full_lock(tp, 0);
11415
11416         tg3_timer_start(tp);
11417         tg3_flag_set(tp, INIT_COMPLETE);
11418         tg3_enable_ints(tp);
11419
11420         if (init)
11421                 tg3_ptp_init(tp);
11422         else
11423                 tg3_ptp_resume(tp);
11424
11425
11426         tg3_full_unlock(tp);
11427
11428         netif_tx_start_all_queues(dev);
11429
11430         /*
11431          * Reset loopback feature if it was turned on while the device was down
11432          * make sure that it's installed properly now.
11433          */
11434         if (dev->features & NETIF_F_LOOPBACK)
11435                 tg3_set_loopback(dev, dev->features);
11436
11437         return 0;
11438
11439 out_free_irq:
11440         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11441                 struct tg3_napi *tnapi = &tp->napi[i];
11442                 free_irq(tnapi->irq_vec, tnapi);
11443         }
11444
11445 out_napi_fini:
11446         tg3_napi_disable(tp);
11447         tg3_napi_fini(tp);
11448         tg3_free_consistent(tp);
11449
11450 out_ints_fini:
11451         tg3_ints_fini(tp);
11452
11453         return err;
11454 }
11455
11456 static void tg3_stop(struct tg3 *tp)
11457 {
11458         int i;
11459
11460         tg3_reset_task_cancel(tp);
11461         tg3_netif_stop(tp);
11462
11463         tg3_timer_stop(tp);
11464
11465         tg3_hwmon_close(tp);
11466
11467         tg3_phy_stop(tp);
11468
11469         tg3_full_lock(tp, 1);
11470
11471         tg3_disable_ints(tp);
11472
11473         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11474         tg3_free_rings(tp);
11475         tg3_flag_clear(tp, INIT_COMPLETE);
11476
11477         tg3_full_unlock(tp);
11478
11479         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11480                 struct tg3_napi *tnapi = &tp->napi[i];
11481                 free_irq(tnapi->irq_vec, tnapi);
11482         }
11483
11484         tg3_ints_fini(tp);
11485
11486         tg3_napi_fini(tp);
11487
11488         tg3_free_consistent(tp);
11489 }
11490
11491 static int tg3_open(struct net_device *dev)
11492 {
11493         struct tg3 *tp = netdev_priv(dev);
11494         int err;
11495
11496         if (tp->fw_needed) {
11497                 err = tg3_request_firmware(tp);
11498                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11499                         if (err) {
11500                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11501                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11502                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11503                                 netdev_warn(tp->dev, "EEE capability restored\n");
11504                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11505                         }
11506                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11507                         if (err)
11508                                 return err;
11509                 } else if (err) {
11510                         netdev_warn(tp->dev, "TSO capability disabled\n");
11511                         tg3_flag_clear(tp, TSO_CAPABLE);
11512                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11513                         netdev_notice(tp->dev, "TSO capability restored\n");
11514                         tg3_flag_set(tp, TSO_CAPABLE);
11515                 }
11516         }
11517
11518         tg3_carrier_off(tp);
11519
11520         err = tg3_power_up(tp);
11521         if (err)
11522                 return err;
11523
11524         tg3_full_lock(tp, 0);
11525
11526         tg3_disable_ints(tp);
11527         tg3_flag_clear(tp, INIT_COMPLETE);
11528
11529         tg3_full_unlock(tp);
11530
11531         err = tg3_start(tp,
11532                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11533                         true, true);
11534         if (err) {
11535                 tg3_frob_aux_power(tp, false);
11536                 pci_set_power_state(tp->pdev, PCI_D3hot);
11537         }
11538
11539         if (tg3_flag(tp, PTP_CAPABLE)) {
11540                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11541                                                    &tp->pdev->dev);
11542                 if (IS_ERR(tp->ptp_clock))
11543                         tp->ptp_clock = NULL;
11544         }
11545
11546         return err;
11547 }
11548
11549 static int tg3_close(struct net_device *dev)
11550 {
11551         struct tg3 *tp = netdev_priv(dev);
11552
11553         tg3_ptp_fini(tp);
11554
11555         tg3_stop(tp);
11556
11557         /* Clear stats across close / open calls */
11558         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11559         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11560
11561         tg3_power_down_prepare(tp);
11562
11563         tg3_carrier_off(tp);
11564
11565         return 0;
11566 }
11567
11568 static inline u64 get_stat64(tg3_stat64_t *val)
11569 {
11570        return ((u64)val->high << 32) | ((u64)val->low);
11571 }
11572
11573 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11574 {
11575         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11576
11577         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11578             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11579              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11580                 u32 val;
11581
11582                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11583                         tg3_writephy(tp, MII_TG3_TEST1,
11584                                      val | MII_TG3_TEST1_CRC_EN);
11585                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11586                 } else
11587                         val = 0;
11588
11589                 tp->phy_crc_errors += val;
11590
11591                 return tp->phy_crc_errors;
11592         }
11593
11594         return get_stat64(&hw_stats->rx_fcs_errors);
11595 }
11596
11597 #define ESTAT_ADD(member) \
11598         estats->member =        old_estats->member + \
11599                                 get_stat64(&hw_stats->member)
11600
11601 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11602 {
11603         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11604         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11605
11606         ESTAT_ADD(rx_octets);
11607         ESTAT_ADD(rx_fragments);
11608         ESTAT_ADD(rx_ucast_packets);
11609         ESTAT_ADD(rx_mcast_packets);
11610         ESTAT_ADD(rx_bcast_packets);
11611         ESTAT_ADD(rx_fcs_errors);
11612         ESTAT_ADD(rx_align_errors);
11613         ESTAT_ADD(rx_xon_pause_rcvd);
11614         ESTAT_ADD(rx_xoff_pause_rcvd);
11615         ESTAT_ADD(rx_mac_ctrl_rcvd);
11616         ESTAT_ADD(rx_xoff_entered);
11617         ESTAT_ADD(rx_frame_too_long_errors);
11618         ESTAT_ADD(rx_jabbers);
11619         ESTAT_ADD(rx_undersize_packets);
11620         ESTAT_ADD(rx_in_length_errors);
11621         ESTAT_ADD(rx_out_length_errors);
11622         ESTAT_ADD(rx_64_or_less_octet_packets);
11623         ESTAT_ADD(rx_65_to_127_octet_packets);
11624         ESTAT_ADD(rx_128_to_255_octet_packets);
11625         ESTAT_ADD(rx_256_to_511_octet_packets);
11626         ESTAT_ADD(rx_512_to_1023_octet_packets);
11627         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11628         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11629         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11630         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11631         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11632
11633         ESTAT_ADD(tx_octets);
11634         ESTAT_ADD(tx_collisions);
11635         ESTAT_ADD(tx_xon_sent);
11636         ESTAT_ADD(tx_xoff_sent);
11637         ESTAT_ADD(tx_flow_control);
11638         ESTAT_ADD(tx_mac_errors);
11639         ESTAT_ADD(tx_single_collisions);
11640         ESTAT_ADD(tx_mult_collisions);
11641         ESTAT_ADD(tx_deferred);
11642         ESTAT_ADD(tx_excessive_collisions);
11643         ESTAT_ADD(tx_late_collisions);
11644         ESTAT_ADD(tx_collide_2times);
11645         ESTAT_ADD(tx_collide_3times);
11646         ESTAT_ADD(tx_collide_4times);
11647         ESTAT_ADD(tx_collide_5times);
11648         ESTAT_ADD(tx_collide_6times);
11649         ESTAT_ADD(tx_collide_7times);
11650         ESTAT_ADD(tx_collide_8times);
11651         ESTAT_ADD(tx_collide_9times);
11652         ESTAT_ADD(tx_collide_10times);
11653         ESTAT_ADD(tx_collide_11times);
11654         ESTAT_ADD(tx_collide_12times);
11655         ESTAT_ADD(tx_collide_13times);
11656         ESTAT_ADD(tx_collide_14times);
11657         ESTAT_ADD(tx_collide_15times);
11658         ESTAT_ADD(tx_ucast_packets);
11659         ESTAT_ADD(tx_mcast_packets);
11660         ESTAT_ADD(tx_bcast_packets);
11661         ESTAT_ADD(tx_carrier_sense_errors);
11662         ESTAT_ADD(tx_discards);
11663         ESTAT_ADD(tx_errors);
11664
11665         ESTAT_ADD(dma_writeq_full);
11666         ESTAT_ADD(dma_write_prioq_full);
11667         ESTAT_ADD(rxbds_empty);
11668         ESTAT_ADD(rx_discards);
11669         ESTAT_ADD(rx_errors);
11670         ESTAT_ADD(rx_threshold_hit);
11671
11672         ESTAT_ADD(dma_readq_full);
11673         ESTAT_ADD(dma_read_prioq_full);
11674         ESTAT_ADD(tx_comp_queue_full);
11675
11676         ESTAT_ADD(ring_set_send_prod_index);
11677         ESTAT_ADD(ring_status_update);
11678         ESTAT_ADD(nic_irqs);
11679         ESTAT_ADD(nic_avoided_irqs);
11680         ESTAT_ADD(nic_tx_threshold_hit);
11681
11682         ESTAT_ADD(mbuf_lwm_thresh_hit);
11683 }
11684
11685 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11686 {
11687         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11688         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11689
11690         stats->rx_packets = old_stats->rx_packets +
11691                 get_stat64(&hw_stats->rx_ucast_packets) +
11692                 get_stat64(&hw_stats->rx_mcast_packets) +
11693                 get_stat64(&hw_stats->rx_bcast_packets);
11694
11695         stats->tx_packets = old_stats->tx_packets +
11696                 get_stat64(&hw_stats->tx_ucast_packets) +
11697                 get_stat64(&hw_stats->tx_mcast_packets) +
11698                 get_stat64(&hw_stats->tx_bcast_packets);
11699
11700         stats->rx_bytes = old_stats->rx_bytes +
11701                 get_stat64(&hw_stats->rx_octets);
11702         stats->tx_bytes = old_stats->tx_bytes +
11703                 get_stat64(&hw_stats->tx_octets);
11704
11705         stats->rx_errors = old_stats->rx_errors +
11706                 get_stat64(&hw_stats->rx_errors);
11707         stats->tx_errors = old_stats->tx_errors +
11708                 get_stat64(&hw_stats->tx_errors) +
11709                 get_stat64(&hw_stats->tx_mac_errors) +
11710                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11711                 get_stat64(&hw_stats->tx_discards);
11712
11713         stats->multicast = old_stats->multicast +
11714                 get_stat64(&hw_stats->rx_mcast_packets);
11715         stats->collisions = old_stats->collisions +
11716                 get_stat64(&hw_stats->tx_collisions);
11717
11718         stats->rx_length_errors = old_stats->rx_length_errors +
11719                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11720                 get_stat64(&hw_stats->rx_undersize_packets);
11721
11722         stats->rx_over_errors = old_stats->rx_over_errors +
11723                 get_stat64(&hw_stats->rxbds_empty);
11724         stats->rx_frame_errors = old_stats->rx_frame_errors +
11725                 get_stat64(&hw_stats->rx_align_errors);
11726         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11727                 get_stat64(&hw_stats->tx_discards);
11728         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11729                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11730
11731         stats->rx_crc_errors = old_stats->rx_crc_errors +
11732                 tg3_calc_crc_errors(tp);
11733
11734         stats->rx_missed_errors = old_stats->rx_missed_errors +
11735                 get_stat64(&hw_stats->rx_discards);
11736
11737         stats->rx_dropped = tp->rx_dropped;
11738         stats->tx_dropped = tp->tx_dropped;
11739 }
11740
11741 static int tg3_get_regs_len(struct net_device *dev)
11742 {
11743         return TG3_REG_BLK_SIZE;
11744 }
11745
11746 static void tg3_get_regs(struct net_device *dev,
11747                 struct ethtool_regs *regs, void *_p)
11748 {
11749         struct tg3 *tp = netdev_priv(dev);
11750
11751         regs->version = 0;
11752
11753         memset(_p, 0, TG3_REG_BLK_SIZE);
11754
11755         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11756                 return;
11757
11758         tg3_full_lock(tp, 0);
11759
11760         tg3_dump_legacy_regs(tp, (u32 *)_p);
11761
11762         tg3_full_unlock(tp);
11763 }
11764
11765 static int tg3_get_eeprom_len(struct net_device *dev)
11766 {
11767         struct tg3 *tp = netdev_priv(dev);
11768
11769         return tp->nvram_size;
11770 }
11771
11772 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11773 {
11774         struct tg3 *tp = netdev_priv(dev);
11775         int ret;
11776         u8  *pd;
11777         u32 i, offset, len, b_offset, b_count;
11778         __be32 val;
11779
11780         if (tg3_flag(tp, NO_NVRAM))
11781                 return -EINVAL;
11782
11783         offset = eeprom->offset;
11784         len = eeprom->len;
11785         eeprom->len = 0;
11786
11787         eeprom->magic = TG3_EEPROM_MAGIC;
11788
11789         if (offset & 3) {
11790                 /* adjustments to start on required 4 byte boundary */
11791                 b_offset = offset & 3;
11792                 b_count = 4 - b_offset;
11793                 if (b_count > len) {
11794                         /* i.e. offset=1 len=2 */
11795                         b_count = len;
11796                 }
11797                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11798                 if (ret)
11799                         return ret;
11800                 memcpy(data, ((char *)&val) + b_offset, b_count);
11801                 len -= b_count;
11802                 offset += b_count;
11803                 eeprom->len += b_count;
11804         }
11805
11806         /* read bytes up to the last 4 byte boundary */
11807         pd = &data[eeprom->len];
11808         for (i = 0; i < (len - (len & 3)); i += 4) {
11809                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11810                 if (ret) {
11811                         eeprom->len += i;
11812                         return ret;
11813                 }
11814                 memcpy(pd + i, &val, 4);
11815         }
11816         eeprom->len += i;
11817
11818         if (len & 3) {
11819                 /* read last bytes not ending on 4 byte boundary */
11820                 pd = &data[eeprom->len];
11821                 b_count = len & 3;
11822                 b_offset = offset + len - b_count;
11823                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11824                 if (ret)
11825                         return ret;
11826                 memcpy(pd, &val, b_count);
11827                 eeprom->len += b_count;
11828         }
11829         return 0;
11830 }
11831
11832 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11833 {
11834         struct tg3 *tp = netdev_priv(dev);
11835         int ret;
11836         u32 offset, len, b_offset, odd_len;
11837         u8 *buf;
11838         __be32 start, end;
11839
11840         if (tg3_flag(tp, NO_NVRAM) ||
11841             eeprom->magic != TG3_EEPROM_MAGIC)
11842                 return -EINVAL;
11843
11844         offset = eeprom->offset;
11845         len = eeprom->len;
11846
11847         if ((b_offset = (offset & 3))) {
11848                 /* adjustments to start on required 4 byte boundary */
11849                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11850                 if (ret)
11851                         return ret;
11852                 len += b_offset;
11853                 offset &= ~3;
11854                 if (len < 4)
11855                         len = 4;
11856         }
11857
11858         odd_len = 0;
11859         if (len & 3) {
11860                 /* adjustments to end on required 4 byte boundary */
11861                 odd_len = 1;
11862                 len = (len + 3) & ~3;
11863                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11864                 if (ret)
11865                         return ret;
11866         }
11867
11868         buf = data;
11869         if (b_offset || odd_len) {
11870                 buf = kmalloc(len, GFP_KERNEL);
11871                 if (!buf)
11872                         return -ENOMEM;
11873                 if (b_offset)
11874                         memcpy(buf, &start, 4);
11875                 if (odd_len)
11876                         memcpy(buf+len-4, &end, 4);
11877                 memcpy(buf + b_offset, data, eeprom->len);
11878         }
11879
11880         ret = tg3_nvram_write_block(tp, offset, len, buf);
11881
11882         if (buf != data)
11883                 kfree(buf);
11884
11885         return ret;
11886 }
11887
11888 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11889 {
11890         struct tg3 *tp = netdev_priv(dev);
11891
11892         if (tg3_flag(tp, USE_PHYLIB)) {
11893                 struct phy_device *phydev;
11894                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11895                         return -EAGAIN;
11896                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11897                 return phy_ethtool_gset(phydev, cmd);
11898         }
11899
11900         cmd->supported = (SUPPORTED_Autoneg);
11901
11902         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11903                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11904                                    SUPPORTED_1000baseT_Full);
11905
11906         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11907                 cmd->supported |= (SUPPORTED_100baseT_Half |
11908                                   SUPPORTED_100baseT_Full |
11909                                   SUPPORTED_10baseT_Half |
11910                                   SUPPORTED_10baseT_Full |
11911                                   SUPPORTED_TP);
11912                 cmd->port = PORT_TP;
11913         } else {
11914                 cmd->supported |= SUPPORTED_FIBRE;
11915                 cmd->port = PORT_FIBRE;
11916         }
11917
11918         cmd->advertising = tp->link_config.advertising;
11919         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11920                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11921                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11922                                 cmd->advertising |= ADVERTISED_Pause;
11923                         } else {
11924                                 cmd->advertising |= ADVERTISED_Pause |
11925                                                     ADVERTISED_Asym_Pause;
11926                         }
11927                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11928                         cmd->advertising |= ADVERTISED_Asym_Pause;
11929                 }
11930         }
11931         if (netif_running(dev) && tp->link_up) {
11932                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11933                 cmd->duplex = tp->link_config.active_duplex;
11934                 cmd->lp_advertising = tp->link_config.rmt_adv;
11935                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11936                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11937                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11938                         else
11939                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11940                 }
11941         } else {
11942                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11943                 cmd->duplex = DUPLEX_UNKNOWN;
11944                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11945         }
11946         cmd->phy_address = tp->phy_addr;
11947         cmd->transceiver = XCVR_INTERNAL;
11948         cmd->autoneg = tp->link_config.autoneg;
11949         cmd->maxtxpkt = 0;
11950         cmd->maxrxpkt = 0;
11951         return 0;
11952 }
11953
11954 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11955 {
11956         struct tg3 *tp = netdev_priv(dev);
11957         u32 speed = ethtool_cmd_speed(cmd);
11958
11959         if (tg3_flag(tp, USE_PHYLIB)) {
11960                 struct phy_device *phydev;
11961                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11962                         return -EAGAIN;
11963                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11964                 return phy_ethtool_sset(phydev, cmd);
11965         }
11966
11967         if (cmd->autoneg != AUTONEG_ENABLE &&
11968             cmd->autoneg != AUTONEG_DISABLE)
11969                 return -EINVAL;
11970
11971         if (cmd->autoneg == AUTONEG_DISABLE &&
11972             cmd->duplex != DUPLEX_FULL &&
11973             cmd->duplex != DUPLEX_HALF)
11974                 return -EINVAL;
11975
11976         if (cmd->autoneg == AUTONEG_ENABLE) {
11977                 u32 mask = ADVERTISED_Autoneg |
11978                            ADVERTISED_Pause |
11979                            ADVERTISED_Asym_Pause;
11980
11981                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11982                         mask |= ADVERTISED_1000baseT_Half |
11983                                 ADVERTISED_1000baseT_Full;
11984
11985                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11986                         mask |= ADVERTISED_100baseT_Half |
11987                                 ADVERTISED_100baseT_Full |
11988                                 ADVERTISED_10baseT_Half |
11989                                 ADVERTISED_10baseT_Full |
11990                                 ADVERTISED_TP;
11991                 else
11992                         mask |= ADVERTISED_FIBRE;
11993
11994                 if (cmd->advertising & ~mask)
11995                         return -EINVAL;
11996
11997                 mask &= (ADVERTISED_1000baseT_Half |
11998                          ADVERTISED_1000baseT_Full |
11999                          ADVERTISED_100baseT_Half |
12000                          ADVERTISED_100baseT_Full |
12001                          ADVERTISED_10baseT_Half |
12002                          ADVERTISED_10baseT_Full);
12003
12004                 cmd->advertising &= mask;
12005         } else {
12006                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12007                         if (speed != SPEED_1000)
12008                                 return -EINVAL;
12009
12010                         if (cmd->duplex != DUPLEX_FULL)
12011                                 return -EINVAL;
12012                 } else {
12013                         if (speed != SPEED_100 &&
12014                             speed != SPEED_10)
12015                                 return -EINVAL;
12016                 }
12017         }
12018
12019         tg3_full_lock(tp, 0);
12020
12021         tp->link_config.autoneg = cmd->autoneg;
12022         if (cmd->autoneg == AUTONEG_ENABLE) {
12023                 tp->link_config.advertising = (cmd->advertising |
12024                                               ADVERTISED_Autoneg);
12025                 tp->link_config.speed = SPEED_UNKNOWN;
12026                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12027         } else {
12028                 tp->link_config.advertising = 0;
12029                 tp->link_config.speed = speed;
12030                 tp->link_config.duplex = cmd->duplex;
12031         }
12032
12033         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12034
12035         tg3_warn_mgmt_link_flap(tp);
12036
12037         if (netif_running(dev))
12038                 tg3_setup_phy(tp, true);
12039
12040         tg3_full_unlock(tp);
12041
12042         return 0;
12043 }
12044
12045 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12046 {
12047         struct tg3 *tp = netdev_priv(dev);
12048
12049         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12050         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12051         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12052         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12053 }
12054
12055 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12056 {
12057         struct tg3 *tp = netdev_priv(dev);
12058
12059         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12060                 wol->supported = WAKE_MAGIC;
12061         else
12062                 wol->supported = 0;
12063         wol->wolopts = 0;
12064         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12065                 wol->wolopts = WAKE_MAGIC;
12066         memset(&wol->sopass, 0, sizeof(wol->sopass));
12067 }
12068
12069 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12070 {
12071         struct tg3 *tp = netdev_priv(dev);
12072         struct device *dp = &tp->pdev->dev;
12073
12074         if (wol->wolopts & ~WAKE_MAGIC)
12075                 return -EINVAL;
12076         if ((wol->wolopts & WAKE_MAGIC) &&
12077             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12078                 return -EINVAL;
12079
12080         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12081
12082         spin_lock_bh(&tp->lock);
12083         if (device_may_wakeup(dp))
12084                 tg3_flag_set(tp, WOL_ENABLE);
12085         else
12086                 tg3_flag_clear(tp, WOL_ENABLE);
12087         spin_unlock_bh(&tp->lock);
12088
12089         return 0;
12090 }
12091
12092 static u32 tg3_get_msglevel(struct net_device *dev)
12093 {
12094         struct tg3 *tp = netdev_priv(dev);
12095         return tp->msg_enable;
12096 }
12097
12098 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12099 {
12100         struct tg3 *tp = netdev_priv(dev);
12101         tp->msg_enable = value;
12102 }
12103
12104 static int tg3_nway_reset(struct net_device *dev)
12105 {
12106         struct tg3 *tp = netdev_priv(dev);
12107         int r;
12108
12109         if (!netif_running(dev))
12110                 return -EAGAIN;
12111
12112         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12113                 return -EINVAL;
12114
12115         tg3_warn_mgmt_link_flap(tp);
12116
12117         if (tg3_flag(tp, USE_PHYLIB)) {
12118                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12119                         return -EAGAIN;
12120                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12121         } else {
12122                 u32 bmcr;
12123
12124                 spin_lock_bh(&tp->lock);
12125                 r = -EINVAL;
12126                 tg3_readphy(tp, MII_BMCR, &bmcr);
12127                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12128                     ((bmcr & BMCR_ANENABLE) ||
12129                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12130                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12131                                                    BMCR_ANENABLE);
12132                         r = 0;
12133                 }
12134                 spin_unlock_bh(&tp->lock);
12135         }
12136
12137         return r;
12138 }
12139
12140 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12141 {
12142         struct tg3 *tp = netdev_priv(dev);
12143
12144         ering->rx_max_pending = tp->rx_std_ring_mask;
12145         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12146                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12147         else
12148                 ering->rx_jumbo_max_pending = 0;
12149
12150         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12151
12152         ering->rx_pending = tp->rx_pending;
12153         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12154                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12155         else
12156                 ering->rx_jumbo_pending = 0;
12157
12158         ering->tx_pending = tp->napi[0].tx_pending;
12159 }
12160
12161 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12162 {
12163         struct tg3 *tp = netdev_priv(dev);
12164         int i, irq_sync = 0, err = 0;
12165
12166         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12167             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12168             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12169             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12170             (tg3_flag(tp, TSO_BUG) &&
12171              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12172                 return -EINVAL;
12173
12174         if (netif_running(dev)) {
12175                 tg3_phy_stop(tp);
12176                 tg3_netif_stop(tp);
12177                 irq_sync = 1;
12178         }
12179
12180         tg3_full_lock(tp, irq_sync);
12181
12182         tp->rx_pending = ering->rx_pending;
12183
12184         if (tg3_flag(tp, MAX_RXPEND_64) &&
12185             tp->rx_pending > 63)
12186                 tp->rx_pending = 63;
12187         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12188
12189         for (i = 0; i < tp->irq_max; i++)
12190                 tp->napi[i].tx_pending = ering->tx_pending;
12191
12192         if (netif_running(dev)) {
12193                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12194                 err = tg3_restart_hw(tp, false);
12195                 if (!err)
12196                         tg3_netif_start(tp);
12197         }
12198
12199         tg3_full_unlock(tp);
12200
12201         if (irq_sync && !err)
12202                 tg3_phy_start(tp);
12203
12204         return err;
12205 }
12206
12207 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12208 {
12209         struct tg3 *tp = netdev_priv(dev);
12210
12211         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12212
12213         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12214                 epause->rx_pause = 1;
12215         else
12216                 epause->rx_pause = 0;
12217
12218         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12219                 epause->tx_pause = 1;
12220         else
12221                 epause->tx_pause = 0;
12222 }
12223
12224 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12225 {
12226         struct tg3 *tp = netdev_priv(dev);
12227         int err = 0;
12228
12229         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12230                 tg3_warn_mgmt_link_flap(tp);
12231
12232         if (tg3_flag(tp, USE_PHYLIB)) {
12233                 u32 newadv;
12234                 struct phy_device *phydev;
12235
12236                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12237
12238                 if (!(phydev->supported & SUPPORTED_Pause) ||
12239                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12240                      (epause->rx_pause != epause->tx_pause)))
12241                         return -EINVAL;
12242
12243                 tp->link_config.flowctrl = 0;
12244                 if (epause->rx_pause) {
12245                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12246
12247                         if (epause->tx_pause) {
12248                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12249                                 newadv = ADVERTISED_Pause;
12250                         } else
12251                                 newadv = ADVERTISED_Pause |
12252                                          ADVERTISED_Asym_Pause;
12253                 } else if (epause->tx_pause) {
12254                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12255                         newadv = ADVERTISED_Asym_Pause;
12256                 } else
12257                         newadv = 0;
12258
12259                 if (epause->autoneg)
12260                         tg3_flag_set(tp, PAUSE_AUTONEG);
12261                 else
12262                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12263
12264                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12265                         u32 oldadv = phydev->advertising &
12266                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12267                         if (oldadv != newadv) {
12268                                 phydev->advertising &=
12269                                         ~(ADVERTISED_Pause |
12270                                           ADVERTISED_Asym_Pause);
12271                                 phydev->advertising |= newadv;
12272                                 if (phydev->autoneg) {
12273                                         /*
12274                                          * Always renegotiate the link to
12275                                          * inform our link partner of our
12276                                          * flow control settings, even if the
12277                                          * flow control is forced.  Let
12278                                          * tg3_adjust_link() do the final
12279                                          * flow control setup.
12280                                          */
12281                                         return phy_start_aneg(phydev);
12282                                 }
12283                         }
12284
12285                         if (!epause->autoneg)
12286                                 tg3_setup_flow_control(tp, 0, 0);
12287                 } else {
12288                         tp->link_config.advertising &=
12289                                         ~(ADVERTISED_Pause |
12290                                           ADVERTISED_Asym_Pause);
12291                         tp->link_config.advertising |= newadv;
12292                 }
12293         } else {
12294                 int irq_sync = 0;
12295
12296                 if (netif_running(dev)) {
12297                         tg3_netif_stop(tp);
12298                         irq_sync = 1;
12299                 }
12300
12301                 tg3_full_lock(tp, irq_sync);
12302
12303                 if (epause->autoneg)
12304                         tg3_flag_set(tp, PAUSE_AUTONEG);
12305                 else
12306                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12307                 if (epause->rx_pause)
12308                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12309                 else
12310                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12311                 if (epause->tx_pause)
12312                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12313                 else
12314                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12315
12316                 if (netif_running(dev)) {
12317                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12318                         err = tg3_restart_hw(tp, false);
12319                         if (!err)
12320                                 tg3_netif_start(tp);
12321                 }
12322
12323                 tg3_full_unlock(tp);
12324         }
12325
12326         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12327
12328         return err;
12329 }
12330
12331 static int tg3_get_sset_count(struct net_device *dev, int sset)
12332 {
12333         switch (sset) {
12334         case ETH_SS_TEST:
12335                 return TG3_NUM_TEST;
12336         case ETH_SS_STATS:
12337                 return TG3_NUM_STATS;
12338         default:
12339                 return -EOPNOTSUPP;
12340         }
12341 }
12342
12343 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12344                          u32 *rules __always_unused)
12345 {
12346         struct tg3 *tp = netdev_priv(dev);
12347
12348         if (!tg3_flag(tp, SUPPORT_MSIX))
12349                 return -EOPNOTSUPP;
12350
12351         switch (info->cmd) {
12352         case ETHTOOL_GRXRINGS:
12353                 if (netif_running(tp->dev))
12354                         info->data = tp->rxq_cnt;
12355                 else {
12356                         info->data = num_online_cpus();
12357                         if (info->data > TG3_RSS_MAX_NUM_QS)
12358                                 info->data = TG3_RSS_MAX_NUM_QS;
12359                 }
12360
12361                 /* The first interrupt vector only
12362                  * handles link interrupts.
12363                  */
12364                 info->data -= 1;
12365                 return 0;
12366
12367         default:
12368                 return -EOPNOTSUPP;
12369         }
12370 }
12371
12372 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12373 {
12374         u32 size = 0;
12375         struct tg3 *tp = netdev_priv(dev);
12376
12377         if (tg3_flag(tp, SUPPORT_MSIX))
12378                 size = TG3_RSS_INDIR_TBL_SIZE;
12379
12380         return size;
12381 }
12382
12383 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12384 {
12385         struct tg3 *tp = netdev_priv(dev);
12386         int i;
12387
12388         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12389                 indir[i] = tp->rss_ind_tbl[i];
12390
12391         return 0;
12392 }
12393
12394 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12395 {
12396         struct tg3 *tp = netdev_priv(dev);
12397         size_t i;
12398
12399         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12400                 tp->rss_ind_tbl[i] = indir[i];
12401
12402         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12403                 return 0;
12404
12405         /* It is legal to write the indirection
12406          * table while the device is running.
12407          */
12408         tg3_full_lock(tp, 0);
12409         tg3_rss_write_indir_tbl(tp);
12410         tg3_full_unlock(tp);
12411
12412         return 0;
12413 }
12414
12415 static void tg3_get_channels(struct net_device *dev,
12416                              struct ethtool_channels *channel)
12417 {
12418         struct tg3 *tp = netdev_priv(dev);
12419         u32 deflt_qs = netif_get_num_default_rss_queues();
12420
12421         channel->max_rx = tp->rxq_max;
12422         channel->max_tx = tp->txq_max;
12423
12424         if (netif_running(dev)) {
12425                 channel->rx_count = tp->rxq_cnt;
12426                 channel->tx_count = tp->txq_cnt;
12427         } else {
12428                 if (tp->rxq_req)
12429                         channel->rx_count = tp->rxq_req;
12430                 else
12431                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12432
12433                 if (tp->txq_req)
12434                         channel->tx_count = tp->txq_req;
12435                 else
12436                         channel->tx_count = min(deflt_qs, tp->txq_max);
12437         }
12438 }
12439
12440 static int tg3_set_channels(struct net_device *dev,
12441                             struct ethtool_channels *channel)
12442 {
12443         struct tg3 *tp = netdev_priv(dev);
12444
12445         if (!tg3_flag(tp, SUPPORT_MSIX))
12446                 return -EOPNOTSUPP;
12447
12448         if (channel->rx_count > tp->rxq_max ||
12449             channel->tx_count > tp->txq_max)
12450                 return -EINVAL;
12451
12452         tp->rxq_req = channel->rx_count;
12453         tp->txq_req = channel->tx_count;
12454
12455         if (!netif_running(dev))
12456                 return 0;
12457
12458         tg3_stop(tp);
12459
12460         tg3_carrier_off(tp);
12461
12462         tg3_start(tp, true, false, false);
12463
12464         return 0;
12465 }
12466
12467 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12468 {
12469         switch (stringset) {
12470         case ETH_SS_STATS:
12471                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12472                 break;
12473         case ETH_SS_TEST:
12474                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12475                 break;
12476         default:
12477                 WARN_ON(1);     /* we need a WARN() */
12478                 break;
12479         }
12480 }
12481
12482 static int tg3_set_phys_id(struct net_device *dev,
12483                             enum ethtool_phys_id_state state)
12484 {
12485         struct tg3 *tp = netdev_priv(dev);
12486
12487         if (!netif_running(tp->dev))
12488                 return -EAGAIN;
12489
12490         switch (state) {
12491         case ETHTOOL_ID_ACTIVE:
12492                 return 1;       /* cycle on/off once per second */
12493
12494         case ETHTOOL_ID_ON:
12495                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12496                      LED_CTRL_1000MBPS_ON |
12497                      LED_CTRL_100MBPS_ON |
12498                      LED_CTRL_10MBPS_ON |
12499                      LED_CTRL_TRAFFIC_OVERRIDE |
12500                      LED_CTRL_TRAFFIC_BLINK |
12501                      LED_CTRL_TRAFFIC_LED);
12502                 break;
12503
12504         case ETHTOOL_ID_OFF:
12505                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12506                      LED_CTRL_TRAFFIC_OVERRIDE);
12507                 break;
12508
12509         case ETHTOOL_ID_INACTIVE:
12510                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12511                 break;
12512         }
12513
12514         return 0;
12515 }
12516
12517 static void tg3_get_ethtool_stats(struct net_device *dev,
12518                                    struct ethtool_stats *estats, u64 *tmp_stats)
12519 {
12520         struct tg3 *tp = netdev_priv(dev);
12521
12522         if (tp->hw_stats)
12523                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12524         else
12525                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12526 }
12527
12528 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12529 {
12530         int i;
12531         __be32 *buf;
12532         u32 offset = 0, len = 0;
12533         u32 magic, val;
12534
12535         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12536                 return NULL;
12537
12538         if (magic == TG3_EEPROM_MAGIC) {
12539                 for (offset = TG3_NVM_DIR_START;
12540                      offset < TG3_NVM_DIR_END;
12541                      offset += TG3_NVM_DIRENT_SIZE) {
12542                         if (tg3_nvram_read(tp, offset, &val))
12543                                 return NULL;
12544
12545                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12546                             TG3_NVM_DIRTYPE_EXTVPD)
12547                                 break;
12548                 }
12549
12550                 if (offset != TG3_NVM_DIR_END) {
12551                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12552                         if (tg3_nvram_read(tp, offset + 4, &offset))
12553                                 return NULL;
12554
12555                         offset = tg3_nvram_logical_addr(tp, offset);
12556                 }
12557         }
12558
12559         if (!offset || !len) {
12560                 offset = TG3_NVM_VPD_OFF;
12561                 len = TG3_NVM_VPD_LEN;
12562         }
12563
12564         buf = kmalloc(len, GFP_KERNEL);
12565         if (buf == NULL)
12566                 return NULL;
12567
12568         if (magic == TG3_EEPROM_MAGIC) {
12569                 for (i = 0; i < len; i += 4) {
12570                         /* The data is in little-endian format in NVRAM.
12571                          * Use the big-endian read routines to preserve
12572                          * the byte order as it exists in NVRAM.
12573                          */
12574                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12575                                 goto error;
12576                 }
12577         } else {
12578                 u8 *ptr;
12579                 ssize_t cnt;
12580                 unsigned int pos = 0;
12581
12582                 ptr = (u8 *)&buf[0];
12583                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12584                         cnt = pci_read_vpd(tp->pdev, pos,
12585                                            len - pos, ptr);
12586                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12587                                 cnt = 0;
12588                         else if (cnt < 0)
12589                                 goto error;
12590                 }
12591                 if (pos != len)
12592                         goto error;
12593         }
12594
12595         *vpdlen = len;
12596
12597         return buf;
12598
12599 error:
12600         kfree(buf);
12601         return NULL;
12602 }
12603
12604 #define NVRAM_TEST_SIZE 0x100
12605 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12606 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12607 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12608 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12609 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12610 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12611 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12612 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12613
12614 static int tg3_test_nvram(struct tg3 *tp)
12615 {
12616         u32 csum, magic, len;
12617         __be32 *buf;
12618         int i, j, k, err = 0, size;
12619
12620         if (tg3_flag(tp, NO_NVRAM))
12621                 return 0;
12622
12623         if (tg3_nvram_read(tp, 0, &magic) != 0)
12624                 return -EIO;
12625
12626         if (magic == TG3_EEPROM_MAGIC)
12627                 size = NVRAM_TEST_SIZE;
12628         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12629                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12630                     TG3_EEPROM_SB_FORMAT_1) {
12631                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12632                         case TG3_EEPROM_SB_REVISION_0:
12633                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12634                                 break;
12635                         case TG3_EEPROM_SB_REVISION_2:
12636                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12637                                 break;
12638                         case TG3_EEPROM_SB_REVISION_3:
12639                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12640                                 break;
12641                         case TG3_EEPROM_SB_REVISION_4:
12642                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12643                                 break;
12644                         case TG3_EEPROM_SB_REVISION_5:
12645                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12646                                 break;
12647                         case TG3_EEPROM_SB_REVISION_6:
12648                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12649                                 break;
12650                         default:
12651                                 return -EIO;
12652                         }
12653                 } else
12654                         return 0;
12655         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12656                 size = NVRAM_SELFBOOT_HW_SIZE;
12657         else
12658                 return -EIO;
12659
12660         buf = kmalloc(size, GFP_KERNEL);
12661         if (buf == NULL)
12662                 return -ENOMEM;
12663
12664         err = -EIO;
12665         for (i = 0, j = 0; i < size; i += 4, j++) {
12666                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12667                 if (err)
12668                         break;
12669         }
12670         if (i < size)
12671                 goto out;
12672
12673         /* Selfboot format */
12674         magic = be32_to_cpu(buf[0]);
12675         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12676             TG3_EEPROM_MAGIC_FW) {
12677                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12678
12679                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12680                     TG3_EEPROM_SB_REVISION_2) {
12681                         /* For rev 2, the csum doesn't include the MBA. */
12682                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12683                                 csum8 += buf8[i];
12684                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12685                                 csum8 += buf8[i];
12686                 } else {
12687                         for (i = 0; i < size; i++)
12688                                 csum8 += buf8[i];
12689                 }
12690
12691                 if (csum8 == 0) {
12692                         err = 0;
12693                         goto out;
12694                 }
12695
12696                 err = -EIO;
12697                 goto out;
12698         }
12699
12700         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12701             TG3_EEPROM_MAGIC_HW) {
12702                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12703                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12704                 u8 *buf8 = (u8 *) buf;
12705
12706                 /* Separate the parity bits and the data bytes.  */
12707                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12708                         if ((i == 0) || (i == 8)) {
12709                                 int l;
12710                                 u8 msk;
12711
12712                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12713                                         parity[k++] = buf8[i] & msk;
12714                                 i++;
12715                         } else if (i == 16) {
12716                                 int l;
12717                                 u8 msk;
12718
12719                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12720                                         parity[k++] = buf8[i] & msk;
12721                                 i++;
12722
12723                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12724                                         parity[k++] = buf8[i] & msk;
12725                                 i++;
12726                         }
12727                         data[j++] = buf8[i];
12728                 }
12729
12730                 err = -EIO;
12731                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12732                         u8 hw8 = hweight8(data[i]);
12733
12734                         if ((hw8 & 0x1) && parity[i])
12735                                 goto out;
12736                         else if (!(hw8 & 0x1) && !parity[i])
12737                                 goto out;
12738                 }
12739                 err = 0;
12740                 goto out;
12741         }
12742
12743         err = -EIO;
12744
12745         /* Bootstrap checksum at offset 0x10 */
12746         csum = calc_crc((unsigned char *) buf, 0x10);
12747         if (csum != le32_to_cpu(buf[0x10/4]))
12748                 goto out;
12749
12750         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12751         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12752         if (csum != le32_to_cpu(buf[0xfc/4]))
12753                 goto out;
12754
12755         kfree(buf);
12756
12757         buf = tg3_vpd_readblock(tp, &len);
12758         if (!buf)
12759                 return -ENOMEM;
12760
12761         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12762         if (i > 0) {
12763                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12764                 if (j < 0)
12765                         goto out;
12766
12767                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12768                         goto out;
12769
12770                 i += PCI_VPD_LRDT_TAG_SIZE;
12771                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12772                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12773                 if (j > 0) {
12774                         u8 csum8 = 0;
12775
12776                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12777
12778                         for (i = 0; i <= j; i++)
12779                                 csum8 += ((u8 *)buf)[i];
12780
12781                         if (csum8)
12782                                 goto out;
12783                 }
12784         }
12785
12786         err = 0;
12787
12788 out:
12789         kfree(buf);
12790         return err;
12791 }
12792
12793 #define TG3_SERDES_TIMEOUT_SEC  2
12794 #define TG3_COPPER_TIMEOUT_SEC  6
12795
12796 static int tg3_test_link(struct tg3 *tp)
12797 {
12798         int i, max;
12799
12800         if (!netif_running(tp->dev))
12801                 return -ENODEV;
12802
12803         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12804                 max = TG3_SERDES_TIMEOUT_SEC;
12805         else
12806                 max = TG3_COPPER_TIMEOUT_SEC;
12807
12808         for (i = 0; i < max; i++) {
12809                 if (tp->link_up)
12810                         return 0;
12811
12812                 if (msleep_interruptible(1000))
12813                         break;
12814         }
12815
12816         return -EIO;
12817 }
12818
12819 /* Only test the commonly used registers */
12820 static int tg3_test_registers(struct tg3 *tp)
12821 {
12822         int i, is_5705, is_5750;
12823         u32 offset, read_mask, write_mask, val, save_val, read_val;
12824         static struct {
12825                 u16 offset;
12826                 u16 flags;
12827 #define TG3_FL_5705     0x1
12828 #define TG3_FL_NOT_5705 0x2
12829 #define TG3_FL_NOT_5788 0x4
12830 #define TG3_FL_NOT_5750 0x8
12831                 u32 read_mask;
12832                 u32 write_mask;
12833         } reg_tbl[] = {
12834                 /* MAC Control Registers */
12835                 { MAC_MODE, TG3_FL_NOT_5705,
12836                         0x00000000, 0x00ef6f8c },
12837                 { MAC_MODE, TG3_FL_5705,
12838                         0x00000000, 0x01ef6b8c },
12839                 { MAC_STATUS, TG3_FL_NOT_5705,
12840                         0x03800107, 0x00000000 },
12841                 { MAC_STATUS, TG3_FL_5705,
12842                         0x03800100, 0x00000000 },
12843                 { MAC_ADDR_0_HIGH, 0x0000,
12844                         0x00000000, 0x0000ffff },
12845                 { MAC_ADDR_0_LOW, 0x0000,
12846                         0x00000000, 0xffffffff },
12847                 { MAC_RX_MTU_SIZE, 0x0000,
12848                         0x00000000, 0x0000ffff },
12849                 { MAC_TX_MODE, 0x0000,
12850                         0x00000000, 0x00000070 },
12851                 { MAC_TX_LENGTHS, 0x0000,
12852                         0x00000000, 0x00003fff },
12853                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12854                         0x00000000, 0x000007fc },
12855                 { MAC_RX_MODE, TG3_FL_5705,
12856                         0x00000000, 0x000007dc },
12857                 { MAC_HASH_REG_0, 0x0000,
12858                         0x00000000, 0xffffffff },
12859                 { MAC_HASH_REG_1, 0x0000,
12860                         0x00000000, 0xffffffff },
12861                 { MAC_HASH_REG_2, 0x0000,
12862                         0x00000000, 0xffffffff },
12863                 { MAC_HASH_REG_3, 0x0000,
12864                         0x00000000, 0xffffffff },
12865
12866                 /* Receive Data and Receive BD Initiator Control Registers. */
12867                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12868                         0x00000000, 0xffffffff },
12869                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12870                         0x00000000, 0xffffffff },
12871                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12872                         0x00000000, 0x00000003 },
12873                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12874                         0x00000000, 0xffffffff },
12875                 { RCVDBDI_STD_BD+0, 0x0000,
12876                         0x00000000, 0xffffffff },
12877                 { RCVDBDI_STD_BD+4, 0x0000,
12878                         0x00000000, 0xffffffff },
12879                 { RCVDBDI_STD_BD+8, 0x0000,
12880                         0x00000000, 0xffff0002 },
12881                 { RCVDBDI_STD_BD+0xc, 0x0000,
12882                         0x00000000, 0xffffffff },
12883
12884                 /* Receive BD Initiator Control Registers. */
12885                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12886                         0x00000000, 0xffffffff },
12887                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12888                         0x00000000, 0x000003ff },
12889                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12890                         0x00000000, 0xffffffff },
12891
12892                 /* Host Coalescing Control Registers. */
12893                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12894                         0x00000000, 0x00000004 },
12895                 { HOSTCC_MODE, TG3_FL_5705,
12896                         0x00000000, 0x000000f6 },
12897                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12898                         0x00000000, 0xffffffff },
12899                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12900                         0x00000000, 0x000003ff },
12901                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12902                         0x00000000, 0xffffffff },
12903                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12904                         0x00000000, 0x000003ff },
12905                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12906                         0x00000000, 0xffffffff },
12907                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12908                         0x00000000, 0x000000ff },
12909                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12910                         0x00000000, 0xffffffff },
12911                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12912                         0x00000000, 0x000000ff },
12913                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12914                         0x00000000, 0xffffffff },
12915                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12916                         0x00000000, 0xffffffff },
12917                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12918                         0x00000000, 0xffffffff },
12919                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12920                         0x00000000, 0x000000ff },
12921                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12922                         0x00000000, 0xffffffff },
12923                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12924                         0x00000000, 0x000000ff },
12925                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12926                         0x00000000, 0xffffffff },
12927                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12928                         0x00000000, 0xffffffff },
12929                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12930                         0x00000000, 0xffffffff },
12931                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12932                         0x00000000, 0xffffffff },
12933                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12934                         0x00000000, 0xffffffff },
12935                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12936                         0xffffffff, 0x00000000 },
12937                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12938                         0xffffffff, 0x00000000 },
12939
12940                 /* Buffer Manager Control Registers. */
12941                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12942                         0x00000000, 0x007fff80 },
12943                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12944                         0x00000000, 0x007fffff },
12945                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12946                         0x00000000, 0x0000003f },
12947                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12948                         0x00000000, 0x000001ff },
12949                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12950                         0x00000000, 0x000001ff },
12951                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12952                         0xffffffff, 0x00000000 },
12953                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12954                         0xffffffff, 0x00000000 },
12955
12956                 /* Mailbox Registers */
12957                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12958                         0x00000000, 0x000001ff },
12959                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12960                         0x00000000, 0x000001ff },
12961                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12962                         0x00000000, 0x000007ff },
12963                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12964                         0x00000000, 0x000001ff },
12965
12966                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12967         };
12968
12969         is_5705 = is_5750 = 0;
12970         if (tg3_flag(tp, 5705_PLUS)) {
12971                 is_5705 = 1;
12972                 if (tg3_flag(tp, 5750_PLUS))
12973                         is_5750 = 1;
12974         }
12975
12976         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12977                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12978                         continue;
12979
12980                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12981                         continue;
12982
12983                 if (tg3_flag(tp, IS_5788) &&
12984                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12985                         continue;
12986
12987                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12988                         continue;
12989
12990                 offset = (u32) reg_tbl[i].offset;
12991                 read_mask = reg_tbl[i].read_mask;
12992                 write_mask = reg_tbl[i].write_mask;
12993
12994                 /* Save the original register content */
12995                 save_val = tr32(offset);
12996
12997                 /* Determine the read-only value. */
12998                 read_val = save_val & read_mask;
12999
13000                 /* Write zero to the register, then make sure the read-only bits
13001                  * are not changed and the read/write bits are all zeros.
13002                  */
13003                 tw32(offset, 0);
13004
13005                 val = tr32(offset);
13006
13007                 /* Test the read-only and read/write bits. */
13008                 if (((val & read_mask) != read_val) || (val & write_mask))
13009                         goto out;
13010
13011                 /* Write ones to all the bits defined by RdMask and WrMask, then
13012                  * make sure the read-only bits are not changed and the
13013                  * read/write bits are all ones.
13014                  */
13015                 tw32(offset, read_mask | write_mask);
13016
13017                 val = tr32(offset);
13018
13019                 /* Test the read-only bits. */
13020                 if ((val & read_mask) != read_val)
13021                         goto out;
13022
13023                 /* Test the read/write bits. */
13024                 if ((val & write_mask) != write_mask)
13025                         goto out;
13026
13027                 tw32(offset, save_val);
13028         }
13029
13030         return 0;
13031
13032 out:
13033         if (netif_msg_hw(tp))
13034                 netdev_err(tp->dev,
13035                            "Register test failed at offset %x\n", offset);
13036         tw32(offset, save_val);
13037         return -EIO;
13038 }
13039
13040 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13041 {
13042         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13043         int i;
13044         u32 j;
13045
13046         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13047                 for (j = 0; j < len; j += 4) {
13048                         u32 val;
13049
13050                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13051                         tg3_read_mem(tp, offset + j, &val);
13052                         if (val != test_pattern[i])
13053                                 return -EIO;
13054                 }
13055         }
13056         return 0;
13057 }
13058
13059 static int tg3_test_memory(struct tg3 *tp)
13060 {
13061         static struct mem_entry {
13062                 u32 offset;
13063                 u32 len;
13064         } mem_tbl_570x[] = {
13065                 { 0x00000000, 0x00b50},
13066                 { 0x00002000, 0x1c000},
13067                 { 0xffffffff, 0x00000}
13068         }, mem_tbl_5705[] = {
13069                 { 0x00000100, 0x0000c},
13070                 { 0x00000200, 0x00008},
13071                 { 0x00004000, 0x00800},
13072                 { 0x00006000, 0x01000},
13073                 { 0x00008000, 0x02000},
13074                 { 0x00010000, 0x0e000},
13075                 { 0xffffffff, 0x00000}
13076         }, mem_tbl_5755[] = {
13077                 { 0x00000200, 0x00008},
13078                 { 0x00004000, 0x00800},
13079                 { 0x00006000, 0x00800},
13080                 { 0x00008000, 0x02000},
13081                 { 0x00010000, 0x0c000},
13082                 { 0xffffffff, 0x00000}
13083         }, mem_tbl_5906[] = {
13084                 { 0x00000200, 0x00008},
13085                 { 0x00004000, 0x00400},
13086                 { 0x00006000, 0x00400},
13087                 { 0x00008000, 0x01000},
13088                 { 0x00010000, 0x01000},
13089                 { 0xffffffff, 0x00000}
13090         }, mem_tbl_5717[] = {
13091                 { 0x00000200, 0x00008},
13092                 { 0x00010000, 0x0a000},
13093                 { 0x00020000, 0x13c00},
13094                 { 0xffffffff, 0x00000}
13095         }, mem_tbl_57765[] = {
13096                 { 0x00000200, 0x00008},
13097                 { 0x00004000, 0x00800},
13098                 { 0x00006000, 0x09800},
13099                 { 0x00010000, 0x0a000},
13100                 { 0xffffffff, 0x00000}
13101         };
13102         struct mem_entry *mem_tbl;
13103         int err = 0;
13104         int i;
13105
13106         if (tg3_flag(tp, 5717_PLUS))
13107                 mem_tbl = mem_tbl_5717;
13108         else if (tg3_flag(tp, 57765_CLASS) ||
13109                  tg3_asic_rev(tp) == ASIC_REV_5762)
13110                 mem_tbl = mem_tbl_57765;
13111         else if (tg3_flag(tp, 5755_PLUS))
13112                 mem_tbl = mem_tbl_5755;
13113         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13114                 mem_tbl = mem_tbl_5906;
13115         else if (tg3_flag(tp, 5705_PLUS))
13116                 mem_tbl = mem_tbl_5705;
13117         else
13118                 mem_tbl = mem_tbl_570x;
13119
13120         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13121                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13122                 if (err)
13123                         break;
13124         }
13125
13126         return err;
13127 }
13128
13129 #define TG3_TSO_MSS             500
13130
13131 #define TG3_TSO_IP_HDR_LEN      20
13132 #define TG3_TSO_TCP_HDR_LEN     20
13133 #define TG3_TSO_TCP_OPT_LEN     12
13134
13135 static const u8 tg3_tso_header[] = {
13136 0x08, 0x00,
13137 0x45, 0x00, 0x00, 0x00,
13138 0x00, 0x00, 0x40, 0x00,
13139 0x40, 0x06, 0x00, 0x00,
13140 0x0a, 0x00, 0x00, 0x01,
13141 0x0a, 0x00, 0x00, 0x02,
13142 0x0d, 0x00, 0xe0, 0x00,
13143 0x00, 0x00, 0x01, 0x00,
13144 0x00, 0x00, 0x02, 0x00,
13145 0x80, 0x10, 0x10, 0x00,
13146 0x14, 0x09, 0x00, 0x00,
13147 0x01, 0x01, 0x08, 0x0a,
13148 0x11, 0x11, 0x11, 0x11,
13149 0x11, 0x11, 0x11, 0x11,
13150 };
13151
13152 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13153 {
13154         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13155         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13156         u32 budget;
13157         struct sk_buff *skb;
13158         u8 *tx_data, *rx_data;
13159         dma_addr_t map;
13160         int num_pkts, tx_len, rx_len, i, err;
13161         struct tg3_rx_buffer_desc *desc;
13162         struct tg3_napi *tnapi, *rnapi;
13163         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13164
13165         tnapi = &tp->napi[0];
13166         rnapi = &tp->napi[0];
13167         if (tp->irq_cnt > 1) {
13168                 if (tg3_flag(tp, ENABLE_RSS))
13169                         rnapi = &tp->napi[1];
13170                 if (tg3_flag(tp, ENABLE_TSS))
13171                         tnapi = &tp->napi[1];
13172         }
13173         coal_now = tnapi->coal_now | rnapi->coal_now;
13174
13175         err = -EIO;
13176
13177         tx_len = pktsz;
13178         skb = netdev_alloc_skb(tp->dev, tx_len);
13179         if (!skb)
13180                 return -ENOMEM;
13181
13182         tx_data = skb_put(skb, tx_len);
13183         memcpy(tx_data, tp->dev->dev_addr, 6);
13184         memset(tx_data + 6, 0x0, 8);
13185
13186         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13187
13188         if (tso_loopback) {
13189                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13190
13191                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13192                               TG3_TSO_TCP_OPT_LEN;
13193
13194                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13195                        sizeof(tg3_tso_header));
13196                 mss = TG3_TSO_MSS;
13197
13198                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13199                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13200
13201                 /* Set the total length field in the IP header */
13202                 iph->tot_len = htons((u16)(mss + hdr_len));
13203
13204                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13205                               TXD_FLAG_CPU_POST_DMA);
13206
13207                 if (tg3_flag(tp, HW_TSO_1) ||
13208                     tg3_flag(tp, HW_TSO_2) ||
13209                     tg3_flag(tp, HW_TSO_3)) {
13210                         struct tcphdr *th;
13211                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13212                         th = (struct tcphdr *)&tx_data[val];
13213                         th->check = 0;
13214                 } else
13215                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13216
13217                 if (tg3_flag(tp, HW_TSO_3)) {
13218                         mss |= (hdr_len & 0xc) << 12;
13219                         if (hdr_len & 0x10)
13220                                 base_flags |= 0x00000010;
13221                         base_flags |= (hdr_len & 0x3e0) << 5;
13222                 } else if (tg3_flag(tp, HW_TSO_2))
13223                         mss |= hdr_len << 9;
13224                 else if (tg3_flag(tp, HW_TSO_1) ||
13225                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13226                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13227                 } else {
13228                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13229                 }
13230
13231                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13232         } else {
13233                 num_pkts = 1;
13234                 data_off = ETH_HLEN;
13235
13236                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13237                     tx_len > VLAN_ETH_FRAME_LEN)
13238                         base_flags |= TXD_FLAG_JMB_PKT;
13239         }
13240
13241         for (i = data_off; i < tx_len; i++)
13242                 tx_data[i] = (u8) (i & 0xff);
13243
13244         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13245         if (pci_dma_mapping_error(tp->pdev, map)) {
13246                 dev_kfree_skb(skb);
13247                 return -EIO;
13248         }
13249
13250         val = tnapi->tx_prod;
13251         tnapi->tx_buffers[val].skb = skb;
13252         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13253
13254         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13255                rnapi->coal_now);
13256
13257         udelay(10);
13258
13259         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13260
13261         budget = tg3_tx_avail(tnapi);
13262         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13263                             base_flags | TXD_FLAG_END, mss, 0)) {
13264                 tnapi->tx_buffers[val].skb = NULL;
13265                 dev_kfree_skb(skb);
13266                 return -EIO;
13267         }
13268
13269         tnapi->tx_prod++;
13270
13271         /* Sync BD data before updating mailbox */
13272         wmb();
13273
13274         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13275         tr32_mailbox(tnapi->prodmbox);
13276
13277         udelay(10);
13278
13279         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13280         for (i = 0; i < 35; i++) {
13281                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13282                        coal_now);
13283
13284                 udelay(10);
13285
13286                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13287                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13288                 if ((tx_idx == tnapi->tx_prod) &&
13289                     (rx_idx == (rx_start_idx + num_pkts)))
13290                         break;
13291         }
13292
13293         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13294         dev_kfree_skb(skb);
13295
13296         if (tx_idx != tnapi->tx_prod)
13297                 goto out;
13298
13299         if (rx_idx != rx_start_idx + num_pkts)
13300                 goto out;
13301
13302         val = data_off;
13303         while (rx_idx != rx_start_idx) {
13304                 desc = &rnapi->rx_rcb[rx_start_idx++];
13305                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13306                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13307
13308                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13309                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13310                         goto out;
13311
13312                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13313                          - ETH_FCS_LEN;
13314
13315                 if (!tso_loopback) {
13316                         if (rx_len != tx_len)
13317                                 goto out;
13318
13319                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13320                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13321                                         goto out;
13322                         } else {
13323                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13324                                         goto out;
13325                         }
13326                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13327                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13328                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13329                         goto out;
13330                 }
13331
13332                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13333                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13334                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13335                                              mapping);
13336                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13337                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13338                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13339                                              mapping);
13340                 } else
13341                         goto out;
13342
13343                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13344                                             PCI_DMA_FROMDEVICE);
13345
13346                 rx_data += TG3_RX_OFFSET(tp);
13347                 for (i = data_off; i < rx_len; i++, val++) {
13348                         if (*(rx_data + i) != (u8) (val & 0xff))
13349                                 goto out;
13350                 }
13351         }
13352
13353         err = 0;
13354
13355         /* tg3_free_rings will unmap and free the rx_data */
13356 out:
13357         return err;
13358 }
13359
13360 #define TG3_STD_LOOPBACK_FAILED         1
13361 #define TG3_JMB_LOOPBACK_FAILED         2
13362 #define TG3_TSO_LOOPBACK_FAILED         4
13363 #define TG3_LOOPBACK_FAILED \
13364         (TG3_STD_LOOPBACK_FAILED | \
13365          TG3_JMB_LOOPBACK_FAILED | \
13366          TG3_TSO_LOOPBACK_FAILED)
13367
13368 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13369 {
13370         int err = -EIO;
13371         u32 eee_cap;
13372         u32 jmb_pkt_sz = 9000;
13373
13374         if (tp->dma_limit)
13375                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13376
13377         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13378         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13379
13380         if (!netif_running(tp->dev)) {
13381                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13382                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13383                 if (do_extlpbk)
13384                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13385                 goto done;
13386         }
13387
13388         err = tg3_reset_hw(tp, true);
13389         if (err) {
13390                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13391                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13392                 if (do_extlpbk)
13393                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13394                 goto done;
13395         }
13396
13397         if (tg3_flag(tp, ENABLE_RSS)) {
13398                 int i;
13399
13400                 /* Reroute all rx packets to the 1st queue */
13401                 for (i = MAC_RSS_INDIR_TBL_0;
13402                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13403                         tw32(i, 0x0);
13404         }
13405
13406         /* HW errata - mac loopback fails in some cases on 5780.
13407          * Normal traffic and PHY loopback are not affected by
13408          * errata.  Also, the MAC loopback test is deprecated for
13409          * all newer ASIC revisions.
13410          */
13411         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13412             !tg3_flag(tp, CPMU_PRESENT)) {
13413                 tg3_mac_loopback(tp, true);
13414
13415                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13416                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13417
13418                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13419                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13420                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13421
13422                 tg3_mac_loopback(tp, false);
13423         }
13424
13425         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13426             !tg3_flag(tp, USE_PHYLIB)) {
13427                 int i;
13428
13429                 tg3_phy_lpbk_set(tp, 0, false);
13430
13431                 /* Wait for link */
13432                 for (i = 0; i < 100; i++) {
13433                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13434                                 break;
13435                         mdelay(1);
13436                 }
13437
13438                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13439                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13440                 if (tg3_flag(tp, TSO_CAPABLE) &&
13441                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13442                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13443                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13444                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13445                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13446
13447                 if (do_extlpbk) {
13448                         tg3_phy_lpbk_set(tp, 0, true);
13449
13450                         /* All link indications report up, but the hardware
13451                          * isn't really ready for about 20 msec.  Double it
13452                          * to be sure.
13453                          */
13454                         mdelay(40);
13455
13456                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13457                                 data[TG3_EXT_LOOPB_TEST] |=
13458                                                         TG3_STD_LOOPBACK_FAILED;
13459                         if (tg3_flag(tp, TSO_CAPABLE) &&
13460                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13461                                 data[TG3_EXT_LOOPB_TEST] |=
13462                                                         TG3_TSO_LOOPBACK_FAILED;
13463                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13464                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13465                                 data[TG3_EXT_LOOPB_TEST] |=
13466                                                         TG3_JMB_LOOPBACK_FAILED;
13467                 }
13468
13469                 /* Re-enable gphy autopowerdown. */
13470                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13471                         tg3_phy_toggle_apd(tp, true);
13472         }
13473
13474         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13475                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13476
13477 done:
13478         tp->phy_flags |= eee_cap;
13479
13480         return err;
13481 }
13482
13483 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13484                           u64 *data)
13485 {
13486         struct tg3 *tp = netdev_priv(dev);
13487         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13488
13489         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13490                 if (tg3_power_up(tp)) {
13491                         etest->flags |= ETH_TEST_FL_FAILED;
13492                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13493                         return;
13494                 }
13495                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13496         }
13497
13498         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13499
13500         if (tg3_test_nvram(tp) != 0) {
13501                 etest->flags |= ETH_TEST_FL_FAILED;
13502                 data[TG3_NVRAM_TEST] = 1;
13503         }
13504         if (!doextlpbk && tg3_test_link(tp)) {
13505                 etest->flags |= ETH_TEST_FL_FAILED;
13506                 data[TG3_LINK_TEST] = 1;
13507         }
13508         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13509                 int err, err2 = 0, irq_sync = 0;
13510
13511                 if (netif_running(dev)) {
13512                         tg3_phy_stop(tp);
13513                         tg3_netif_stop(tp);
13514                         irq_sync = 1;
13515                 }
13516
13517                 tg3_full_lock(tp, irq_sync);
13518                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13519                 err = tg3_nvram_lock(tp);
13520                 tg3_halt_cpu(tp, RX_CPU_BASE);
13521                 if (!tg3_flag(tp, 5705_PLUS))
13522                         tg3_halt_cpu(tp, TX_CPU_BASE);
13523                 if (!err)
13524                         tg3_nvram_unlock(tp);
13525
13526                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13527                         tg3_phy_reset(tp);
13528
13529                 if (tg3_test_registers(tp) != 0) {
13530                         etest->flags |= ETH_TEST_FL_FAILED;
13531                         data[TG3_REGISTER_TEST] = 1;
13532                 }
13533
13534                 if (tg3_test_memory(tp) != 0) {
13535                         etest->flags |= ETH_TEST_FL_FAILED;
13536                         data[TG3_MEMORY_TEST] = 1;
13537                 }
13538
13539                 if (doextlpbk)
13540                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13541
13542                 if (tg3_test_loopback(tp, data, doextlpbk))
13543                         etest->flags |= ETH_TEST_FL_FAILED;
13544
13545                 tg3_full_unlock(tp);
13546
13547                 if (tg3_test_interrupt(tp) != 0) {
13548                         etest->flags |= ETH_TEST_FL_FAILED;
13549                         data[TG3_INTERRUPT_TEST] = 1;
13550                 }
13551
13552                 tg3_full_lock(tp, 0);
13553
13554                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13555                 if (netif_running(dev)) {
13556                         tg3_flag_set(tp, INIT_COMPLETE);
13557                         err2 = tg3_restart_hw(tp, true);
13558                         if (!err2)
13559                                 tg3_netif_start(tp);
13560                 }
13561
13562                 tg3_full_unlock(tp);
13563
13564                 if (irq_sync && !err2)
13565                         tg3_phy_start(tp);
13566         }
13567         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13568                 tg3_power_down_prepare(tp);
13569
13570 }
13571
13572 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13573                               struct ifreq *ifr, int cmd)
13574 {
13575         struct tg3 *tp = netdev_priv(dev);
13576         struct hwtstamp_config stmpconf;
13577
13578         if (!tg3_flag(tp, PTP_CAPABLE))
13579                 return -EINVAL;
13580
13581         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13582                 return -EFAULT;
13583
13584         if (stmpconf.flags)
13585                 return -EINVAL;
13586
13587         switch (stmpconf.tx_type) {
13588         case HWTSTAMP_TX_ON:
13589                 tg3_flag_set(tp, TX_TSTAMP_EN);
13590                 break;
13591         case HWTSTAMP_TX_OFF:
13592                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13593                 break;
13594         default:
13595                 return -ERANGE;
13596         }
13597
13598         switch (stmpconf.rx_filter) {
13599         case HWTSTAMP_FILTER_NONE:
13600                 tp->rxptpctl = 0;
13601                 break;
13602         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13603                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13604                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13605                 break;
13606         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13607                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13608                                TG3_RX_PTP_CTL_SYNC_EVNT;
13609                 break;
13610         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13611                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13612                                TG3_RX_PTP_CTL_DELAY_REQ;
13613                 break;
13614         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13615                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13616                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13617                 break;
13618         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13619                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13620                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13621                 break;
13622         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13623                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13624                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13625                 break;
13626         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13627                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13628                                TG3_RX_PTP_CTL_SYNC_EVNT;
13629                 break;
13630         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13631                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13632                                TG3_RX_PTP_CTL_SYNC_EVNT;
13633                 break;
13634         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13635                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13636                                TG3_RX_PTP_CTL_SYNC_EVNT;
13637                 break;
13638         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13639                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13640                                TG3_RX_PTP_CTL_DELAY_REQ;
13641                 break;
13642         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13643                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13644                                TG3_RX_PTP_CTL_DELAY_REQ;
13645                 break;
13646         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13647                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13648                                TG3_RX_PTP_CTL_DELAY_REQ;
13649                 break;
13650         default:
13651                 return -ERANGE;
13652         }
13653
13654         if (netif_running(dev) && tp->rxptpctl)
13655                 tw32(TG3_RX_PTP_CTL,
13656                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13657
13658         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13659                 -EFAULT : 0;
13660 }
13661
13662 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13663 {
13664         struct mii_ioctl_data *data = if_mii(ifr);
13665         struct tg3 *tp = netdev_priv(dev);
13666         int err;
13667
13668         if (tg3_flag(tp, USE_PHYLIB)) {
13669                 struct phy_device *phydev;
13670                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13671                         return -EAGAIN;
13672                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13673                 return phy_mii_ioctl(phydev, ifr, cmd);
13674         }
13675
13676         switch (cmd) {
13677         case SIOCGMIIPHY:
13678                 data->phy_id = tp->phy_addr;
13679
13680                 /* fallthru */
13681         case SIOCGMIIREG: {
13682                 u32 mii_regval;
13683
13684                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13685                         break;                  /* We have no PHY */
13686
13687                 if (!netif_running(dev))
13688                         return -EAGAIN;
13689
13690                 spin_lock_bh(&tp->lock);
13691                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13692                                     data->reg_num & 0x1f, &mii_regval);
13693                 spin_unlock_bh(&tp->lock);
13694
13695                 data->val_out = mii_regval;
13696
13697                 return err;
13698         }
13699
13700         case SIOCSMIIREG:
13701                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13702                         break;                  /* We have no PHY */
13703
13704                 if (!netif_running(dev))
13705                         return -EAGAIN;
13706
13707                 spin_lock_bh(&tp->lock);
13708                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13709                                      data->reg_num & 0x1f, data->val_in);
13710                 spin_unlock_bh(&tp->lock);
13711
13712                 return err;
13713
13714         case SIOCSHWTSTAMP:
13715                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13716
13717         default:
13718                 /* do nothing */
13719                 break;
13720         }
13721         return -EOPNOTSUPP;
13722 }
13723
13724 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13725 {
13726         struct tg3 *tp = netdev_priv(dev);
13727
13728         memcpy(ec, &tp->coal, sizeof(*ec));
13729         return 0;
13730 }
13731
13732 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13733 {
13734         struct tg3 *tp = netdev_priv(dev);
13735         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13736         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13737
13738         if (!tg3_flag(tp, 5705_PLUS)) {
13739                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13740                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13741                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13742                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13743         }
13744
13745         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13746             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13747             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13748             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13749             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13750             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13751             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13752             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13753             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13754             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13755                 return -EINVAL;
13756
13757         /* No rx interrupts will be generated if both are zero */
13758         if ((ec->rx_coalesce_usecs == 0) &&
13759             (ec->rx_max_coalesced_frames == 0))
13760                 return -EINVAL;
13761
13762         /* No tx interrupts will be generated if both are zero */
13763         if ((ec->tx_coalesce_usecs == 0) &&
13764             (ec->tx_max_coalesced_frames == 0))
13765                 return -EINVAL;
13766
13767         /* Only copy relevant parameters, ignore all others. */
13768         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13769         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13770         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13771         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13772         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13773         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13774         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13775         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13776         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13777
13778         if (netif_running(dev)) {
13779                 tg3_full_lock(tp, 0);
13780                 __tg3_set_coalesce(tp, &tp->coal);
13781                 tg3_full_unlock(tp);
13782         }
13783         return 0;
13784 }
13785
13786 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13787 {
13788         struct tg3 *tp = netdev_priv(dev);
13789
13790         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13791                 netdev_warn(tp->dev, "Board does not support EEE!\n");
13792                 return -EOPNOTSUPP;
13793         }
13794
13795         if (edata->advertised != tp->eee.advertised) {
13796                 netdev_warn(tp->dev,
13797                             "Direct manipulation of EEE advertisement is not supported\n");
13798                 return -EINVAL;
13799         }
13800
13801         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13802                 netdev_warn(tp->dev,
13803                             "Maximal Tx Lpi timer supported is %#x(u)\n",
13804                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13805                 return -EINVAL;
13806         }
13807
13808         tp->eee = *edata;
13809
13810         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13811         tg3_warn_mgmt_link_flap(tp);
13812
13813         if (netif_running(tp->dev)) {
13814                 tg3_full_lock(tp, 0);
13815                 tg3_setup_eee(tp);
13816                 tg3_phy_reset(tp);
13817                 tg3_full_unlock(tp);
13818         }
13819
13820         return 0;
13821 }
13822
13823 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13824 {
13825         struct tg3 *tp = netdev_priv(dev);
13826
13827         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13828                 netdev_warn(tp->dev,
13829                             "Board does not support EEE!\n");
13830                 return -EOPNOTSUPP;
13831         }
13832
13833         *edata = tp->eee;
13834         return 0;
13835 }
13836
13837 static const struct ethtool_ops tg3_ethtool_ops = {
13838         .get_settings           = tg3_get_settings,
13839         .set_settings           = tg3_set_settings,
13840         .get_drvinfo            = tg3_get_drvinfo,
13841         .get_regs_len           = tg3_get_regs_len,
13842         .get_regs               = tg3_get_regs,
13843         .get_wol                = tg3_get_wol,
13844         .set_wol                = tg3_set_wol,
13845         .get_msglevel           = tg3_get_msglevel,
13846         .set_msglevel           = tg3_set_msglevel,
13847         .nway_reset             = tg3_nway_reset,
13848         .get_link               = ethtool_op_get_link,
13849         .get_eeprom_len         = tg3_get_eeprom_len,
13850         .get_eeprom             = tg3_get_eeprom,
13851         .set_eeprom             = tg3_set_eeprom,
13852         .get_ringparam          = tg3_get_ringparam,
13853         .set_ringparam          = tg3_set_ringparam,
13854         .get_pauseparam         = tg3_get_pauseparam,
13855         .set_pauseparam         = tg3_set_pauseparam,
13856         .self_test              = tg3_self_test,
13857         .get_strings            = tg3_get_strings,
13858         .set_phys_id            = tg3_set_phys_id,
13859         .get_ethtool_stats      = tg3_get_ethtool_stats,
13860         .get_coalesce           = tg3_get_coalesce,
13861         .set_coalesce           = tg3_set_coalesce,
13862         .get_sset_count         = tg3_get_sset_count,
13863         .get_rxnfc              = tg3_get_rxnfc,
13864         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13865         .get_rxfh_indir         = tg3_get_rxfh_indir,
13866         .set_rxfh_indir         = tg3_set_rxfh_indir,
13867         .get_channels           = tg3_get_channels,
13868         .set_channels           = tg3_set_channels,
13869         .get_ts_info            = tg3_get_ts_info,
13870         .get_eee                = tg3_get_eee,
13871         .set_eee                = tg3_set_eee,
13872 };
13873
13874 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13875                                                 struct rtnl_link_stats64 *stats)
13876 {
13877         struct tg3 *tp = netdev_priv(dev);
13878
13879         spin_lock_bh(&tp->lock);
13880         if (!tp->hw_stats) {
13881                 spin_unlock_bh(&tp->lock);
13882                 return &tp->net_stats_prev;
13883         }
13884
13885         tg3_get_nstats(tp, stats);
13886         spin_unlock_bh(&tp->lock);
13887
13888         return stats;
13889 }
13890
13891 static void tg3_set_rx_mode(struct net_device *dev)
13892 {
13893         struct tg3 *tp = netdev_priv(dev);
13894
13895         if (!netif_running(dev))
13896                 return;
13897
13898         tg3_full_lock(tp, 0);
13899         __tg3_set_rx_mode(dev);
13900         tg3_full_unlock(tp);
13901 }
13902
13903 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13904                                int new_mtu)
13905 {
13906         dev->mtu = new_mtu;
13907
13908         if (new_mtu > ETH_DATA_LEN) {
13909                 if (tg3_flag(tp, 5780_CLASS)) {
13910                         netdev_update_features(dev);
13911                         tg3_flag_clear(tp, TSO_CAPABLE);
13912                 } else {
13913                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13914                 }
13915         } else {
13916                 if (tg3_flag(tp, 5780_CLASS)) {
13917                         tg3_flag_set(tp, TSO_CAPABLE);
13918                         netdev_update_features(dev);
13919                 }
13920                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13921         }
13922 }
13923
13924 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13925 {
13926         struct tg3 *tp = netdev_priv(dev);
13927         int err;
13928         bool reset_phy = false;
13929
13930         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13931                 return -EINVAL;
13932
13933         if (!netif_running(dev)) {
13934                 /* We'll just catch it later when the
13935                  * device is up'd.
13936                  */
13937                 tg3_set_mtu(dev, tp, new_mtu);
13938                 return 0;
13939         }
13940
13941         tg3_phy_stop(tp);
13942
13943         tg3_netif_stop(tp);
13944
13945         tg3_full_lock(tp, 1);
13946
13947         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13948
13949         tg3_set_mtu(dev, tp, new_mtu);
13950
13951         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13952          * breaks all requests to 256 bytes.
13953          */
13954         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13955                 reset_phy = true;
13956
13957         err = tg3_restart_hw(tp, reset_phy);
13958
13959         if (!err)
13960                 tg3_netif_start(tp);
13961
13962         tg3_full_unlock(tp);
13963
13964         if (!err)
13965                 tg3_phy_start(tp);
13966
13967         return err;
13968 }
13969
13970 static const struct net_device_ops tg3_netdev_ops = {
13971         .ndo_open               = tg3_open,
13972         .ndo_stop               = tg3_close,
13973         .ndo_start_xmit         = tg3_start_xmit,
13974         .ndo_get_stats64        = tg3_get_stats64,
13975         .ndo_validate_addr      = eth_validate_addr,
13976         .ndo_set_rx_mode        = tg3_set_rx_mode,
13977         .ndo_set_mac_address    = tg3_set_mac_addr,
13978         .ndo_do_ioctl           = tg3_ioctl,
13979         .ndo_tx_timeout         = tg3_tx_timeout,
13980         .ndo_change_mtu         = tg3_change_mtu,
13981         .ndo_fix_features       = tg3_fix_features,
13982         .ndo_set_features       = tg3_set_features,
13983 #ifdef CONFIG_NET_POLL_CONTROLLER
13984         .ndo_poll_controller    = tg3_poll_controller,
13985 #endif
13986 };
13987
13988 static void tg3_get_eeprom_size(struct tg3 *tp)
13989 {
13990         u32 cursize, val, magic;
13991
13992         tp->nvram_size = EEPROM_CHIP_SIZE;
13993
13994         if (tg3_nvram_read(tp, 0, &magic) != 0)
13995                 return;
13996
13997         if ((magic != TG3_EEPROM_MAGIC) &&
13998             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13999             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14000                 return;
14001
14002         /*
14003          * Size the chip by reading offsets at increasing powers of two.
14004          * When we encounter our validation signature, we know the addressing
14005          * has wrapped around, and thus have our chip size.
14006          */
14007         cursize = 0x10;
14008
14009         while (cursize < tp->nvram_size) {
14010                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14011                         return;
14012
14013                 if (val == magic)
14014                         break;
14015
14016                 cursize <<= 1;
14017         }
14018
14019         tp->nvram_size = cursize;
14020 }
14021
14022 static void tg3_get_nvram_size(struct tg3 *tp)
14023 {
14024         u32 val;
14025
14026         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14027                 return;
14028
14029         /* Selfboot format */
14030         if (val != TG3_EEPROM_MAGIC) {
14031                 tg3_get_eeprom_size(tp);
14032                 return;
14033         }
14034
14035         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14036                 if (val != 0) {
14037                         /* This is confusing.  We want to operate on the
14038                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14039                          * call will read from NVRAM and byteswap the data
14040                          * according to the byteswapping settings for all
14041                          * other register accesses.  This ensures the data we
14042                          * want will always reside in the lower 16-bits.
14043                          * However, the data in NVRAM is in LE format, which
14044                          * means the data from the NVRAM read will always be
14045                          * opposite the endianness of the CPU.  The 16-bit
14046                          * byteswap then brings the data to CPU endianness.
14047                          */
14048                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14049                         return;
14050                 }
14051         }
14052         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14053 }
14054
14055 static void tg3_get_nvram_info(struct tg3 *tp)
14056 {
14057         u32 nvcfg1;
14058
14059         nvcfg1 = tr32(NVRAM_CFG1);
14060         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14061                 tg3_flag_set(tp, FLASH);
14062         } else {
14063                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14064                 tw32(NVRAM_CFG1, nvcfg1);
14065         }
14066
14067         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14068             tg3_flag(tp, 5780_CLASS)) {
14069                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14070                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14071                         tp->nvram_jedecnum = JEDEC_ATMEL;
14072                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14073                         tg3_flag_set(tp, NVRAM_BUFFERED);
14074                         break;
14075                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14076                         tp->nvram_jedecnum = JEDEC_ATMEL;
14077                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14078                         break;
14079                 case FLASH_VENDOR_ATMEL_EEPROM:
14080                         tp->nvram_jedecnum = JEDEC_ATMEL;
14081                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14082                         tg3_flag_set(tp, NVRAM_BUFFERED);
14083                         break;
14084                 case FLASH_VENDOR_ST:
14085                         tp->nvram_jedecnum = JEDEC_ST;
14086                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14087                         tg3_flag_set(tp, NVRAM_BUFFERED);
14088                         break;
14089                 case FLASH_VENDOR_SAIFUN:
14090                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14091                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14092                         break;
14093                 case FLASH_VENDOR_SST_SMALL:
14094                 case FLASH_VENDOR_SST_LARGE:
14095                         tp->nvram_jedecnum = JEDEC_SST;
14096                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14097                         break;
14098                 }
14099         } else {
14100                 tp->nvram_jedecnum = JEDEC_ATMEL;
14101                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14102                 tg3_flag_set(tp, NVRAM_BUFFERED);
14103         }
14104 }
14105
14106 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14107 {
14108         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14109         case FLASH_5752PAGE_SIZE_256:
14110                 tp->nvram_pagesize = 256;
14111                 break;
14112         case FLASH_5752PAGE_SIZE_512:
14113                 tp->nvram_pagesize = 512;
14114                 break;
14115         case FLASH_5752PAGE_SIZE_1K:
14116                 tp->nvram_pagesize = 1024;
14117                 break;
14118         case FLASH_5752PAGE_SIZE_2K:
14119                 tp->nvram_pagesize = 2048;
14120                 break;
14121         case FLASH_5752PAGE_SIZE_4K:
14122                 tp->nvram_pagesize = 4096;
14123                 break;
14124         case FLASH_5752PAGE_SIZE_264:
14125                 tp->nvram_pagesize = 264;
14126                 break;
14127         case FLASH_5752PAGE_SIZE_528:
14128                 tp->nvram_pagesize = 528;
14129                 break;
14130         }
14131 }
14132
14133 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14134 {
14135         u32 nvcfg1;
14136
14137         nvcfg1 = tr32(NVRAM_CFG1);
14138
14139         /* NVRAM protection for TPM */
14140         if (nvcfg1 & (1 << 27))
14141                 tg3_flag_set(tp, PROTECTED_NVRAM);
14142
14143         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14144         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14145         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14146                 tp->nvram_jedecnum = JEDEC_ATMEL;
14147                 tg3_flag_set(tp, NVRAM_BUFFERED);
14148                 break;
14149         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14150                 tp->nvram_jedecnum = JEDEC_ATMEL;
14151                 tg3_flag_set(tp, NVRAM_BUFFERED);
14152                 tg3_flag_set(tp, FLASH);
14153                 break;
14154         case FLASH_5752VENDOR_ST_M45PE10:
14155         case FLASH_5752VENDOR_ST_M45PE20:
14156         case FLASH_5752VENDOR_ST_M45PE40:
14157                 tp->nvram_jedecnum = JEDEC_ST;
14158                 tg3_flag_set(tp, NVRAM_BUFFERED);
14159                 tg3_flag_set(tp, FLASH);
14160                 break;
14161         }
14162
14163         if (tg3_flag(tp, FLASH)) {
14164                 tg3_nvram_get_pagesize(tp, nvcfg1);
14165         } else {
14166                 /* For eeprom, set pagesize to maximum eeprom size */
14167                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14168
14169                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14170                 tw32(NVRAM_CFG1, nvcfg1);
14171         }
14172 }
14173
14174 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14175 {
14176         u32 nvcfg1, protect = 0;
14177
14178         nvcfg1 = tr32(NVRAM_CFG1);
14179
14180         /* NVRAM protection for TPM */
14181         if (nvcfg1 & (1 << 27)) {
14182                 tg3_flag_set(tp, PROTECTED_NVRAM);
14183                 protect = 1;
14184         }
14185
14186         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14187         switch (nvcfg1) {
14188         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14189         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14190         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14191         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14192                 tp->nvram_jedecnum = JEDEC_ATMEL;
14193                 tg3_flag_set(tp, NVRAM_BUFFERED);
14194                 tg3_flag_set(tp, FLASH);
14195                 tp->nvram_pagesize = 264;
14196                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14197                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14198                         tp->nvram_size = (protect ? 0x3e200 :
14199                                           TG3_NVRAM_SIZE_512KB);
14200                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14201                         tp->nvram_size = (protect ? 0x1f200 :
14202                                           TG3_NVRAM_SIZE_256KB);
14203                 else
14204                         tp->nvram_size = (protect ? 0x1f200 :
14205                                           TG3_NVRAM_SIZE_128KB);
14206                 break;
14207         case FLASH_5752VENDOR_ST_M45PE10:
14208         case FLASH_5752VENDOR_ST_M45PE20:
14209         case FLASH_5752VENDOR_ST_M45PE40:
14210                 tp->nvram_jedecnum = JEDEC_ST;
14211                 tg3_flag_set(tp, NVRAM_BUFFERED);
14212                 tg3_flag_set(tp, FLASH);
14213                 tp->nvram_pagesize = 256;
14214                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14215                         tp->nvram_size = (protect ?
14216                                           TG3_NVRAM_SIZE_64KB :
14217                                           TG3_NVRAM_SIZE_128KB);
14218                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14219                         tp->nvram_size = (protect ?
14220                                           TG3_NVRAM_SIZE_64KB :
14221                                           TG3_NVRAM_SIZE_256KB);
14222                 else
14223                         tp->nvram_size = (protect ?
14224                                           TG3_NVRAM_SIZE_128KB :
14225                                           TG3_NVRAM_SIZE_512KB);
14226                 break;
14227         }
14228 }
14229
14230 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14231 {
14232         u32 nvcfg1;
14233
14234         nvcfg1 = tr32(NVRAM_CFG1);
14235
14236         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14237         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14238         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14239         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14240         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14241                 tp->nvram_jedecnum = JEDEC_ATMEL;
14242                 tg3_flag_set(tp, NVRAM_BUFFERED);
14243                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14244
14245                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14246                 tw32(NVRAM_CFG1, nvcfg1);
14247                 break;
14248         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14249         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14250         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14251         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14252                 tp->nvram_jedecnum = JEDEC_ATMEL;
14253                 tg3_flag_set(tp, NVRAM_BUFFERED);
14254                 tg3_flag_set(tp, FLASH);
14255                 tp->nvram_pagesize = 264;
14256                 break;
14257         case FLASH_5752VENDOR_ST_M45PE10:
14258         case FLASH_5752VENDOR_ST_M45PE20:
14259         case FLASH_5752VENDOR_ST_M45PE40:
14260                 tp->nvram_jedecnum = JEDEC_ST;
14261                 tg3_flag_set(tp, NVRAM_BUFFERED);
14262                 tg3_flag_set(tp, FLASH);
14263                 tp->nvram_pagesize = 256;
14264                 break;
14265         }
14266 }
14267
14268 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14269 {
14270         u32 nvcfg1, protect = 0;
14271
14272         nvcfg1 = tr32(NVRAM_CFG1);
14273
14274         /* NVRAM protection for TPM */
14275         if (nvcfg1 & (1 << 27)) {
14276                 tg3_flag_set(tp, PROTECTED_NVRAM);
14277                 protect = 1;
14278         }
14279
14280         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14281         switch (nvcfg1) {
14282         case FLASH_5761VENDOR_ATMEL_ADB021D:
14283         case FLASH_5761VENDOR_ATMEL_ADB041D:
14284         case FLASH_5761VENDOR_ATMEL_ADB081D:
14285         case FLASH_5761VENDOR_ATMEL_ADB161D:
14286         case FLASH_5761VENDOR_ATMEL_MDB021D:
14287         case FLASH_5761VENDOR_ATMEL_MDB041D:
14288         case FLASH_5761VENDOR_ATMEL_MDB081D:
14289         case FLASH_5761VENDOR_ATMEL_MDB161D:
14290                 tp->nvram_jedecnum = JEDEC_ATMEL;
14291                 tg3_flag_set(tp, NVRAM_BUFFERED);
14292                 tg3_flag_set(tp, FLASH);
14293                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14294                 tp->nvram_pagesize = 256;
14295                 break;
14296         case FLASH_5761VENDOR_ST_A_M45PE20:
14297         case FLASH_5761VENDOR_ST_A_M45PE40:
14298         case FLASH_5761VENDOR_ST_A_M45PE80:
14299         case FLASH_5761VENDOR_ST_A_M45PE16:
14300         case FLASH_5761VENDOR_ST_M_M45PE20:
14301         case FLASH_5761VENDOR_ST_M_M45PE40:
14302         case FLASH_5761VENDOR_ST_M_M45PE80:
14303         case FLASH_5761VENDOR_ST_M_M45PE16:
14304                 tp->nvram_jedecnum = JEDEC_ST;
14305                 tg3_flag_set(tp, NVRAM_BUFFERED);
14306                 tg3_flag_set(tp, FLASH);
14307                 tp->nvram_pagesize = 256;
14308                 break;
14309         }
14310
14311         if (protect) {
14312                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14313         } else {
14314                 switch (nvcfg1) {
14315                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14316                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14317                 case FLASH_5761VENDOR_ST_A_M45PE16:
14318                 case FLASH_5761VENDOR_ST_M_M45PE16:
14319                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14320                         break;
14321                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14322                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14323                 case FLASH_5761VENDOR_ST_A_M45PE80:
14324                 case FLASH_5761VENDOR_ST_M_M45PE80:
14325                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14326                         break;
14327                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14328                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14329                 case FLASH_5761VENDOR_ST_A_M45PE40:
14330                 case FLASH_5761VENDOR_ST_M_M45PE40:
14331                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14332                         break;
14333                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14334                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14335                 case FLASH_5761VENDOR_ST_A_M45PE20:
14336                 case FLASH_5761VENDOR_ST_M_M45PE20:
14337                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14338                         break;
14339                 }
14340         }
14341 }
14342
14343 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14344 {
14345         tp->nvram_jedecnum = JEDEC_ATMEL;
14346         tg3_flag_set(tp, NVRAM_BUFFERED);
14347         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14348 }
14349
14350 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14351 {
14352         u32 nvcfg1;
14353
14354         nvcfg1 = tr32(NVRAM_CFG1);
14355
14356         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14357         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14358         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14359                 tp->nvram_jedecnum = JEDEC_ATMEL;
14360                 tg3_flag_set(tp, NVRAM_BUFFERED);
14361                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14362
14363                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14364                 tw32(NVRAM_CFG1, nvcfg1);
14365                 return;
14366         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14367         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14368         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14369         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14370         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14371         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14372         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14373                 tp->nvram_jedecnum = JEDEC_ATMEL;
14374                 tg3_flag_set(tp, NVRAM_BUFFERED);
14375                 tg3_flag_set(tp, FLASH);
14376
14377                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14378                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14379                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14380                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14381                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14382                         break;
14383                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14384                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14385                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14386                         break;
14387                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14388                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14389                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14390                         break;
14391                 }
14392                 break;
14393         case FLASH_5752VENDOR_ST_M45PE10:
14394         case FLASH_5752VENDOR_ST_M45PE20:
14395         case FLASH_5752VENDOR_ST_M45PE40:
14396                 tp->nvram_jedecnum = JEDEC_ST;
14397                 tg3_flag_set(tp, NVRAM_BUFFERED);
14398                 tg3_flag_set(tp, FLASH);
14399
14400                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14401                 case FLASH_5752VENDOR_ST_M45PE10:
14402                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14403                         break;
14404                 case FLASH_5752VENDOR_ST_M45PE20:
14405                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14406                         break;
14407                 case FLASH_5752VENDOR_ST_M45PE40:
14408                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14409                         break;
14410                 }
14411                 break;
14412         default:
14413                 tg3_flag_set(tp, NO_NVRAM);
14414                 return;
14415         }
14416
14417         tg3_nvram_get_pagesize(tp, nvcfg1);
14418         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14419                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14420 }
14421
14422
14423 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14424 {
14425         u32 nvcfg1;
14426
14427         nvcfg1 = tr32(NVRAM_CFG1);
14428
14429         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14430         case FLASH_5717VENDOR_ATMEL_EEPROM:
14431         case FLASH_5717VENDOR_MICRO_EEPROM:
14432                 tp->nvram_jedecnum = JEDEC_ATMEL;
14433                 tg3_flag_set(tp, NVRAM_BUFFERED);
14434                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14435
14436                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14437                 tw32(NVRAM_CFG1, nvcfg1);
14438                 return;
14439         case FLASH_5717VENDOR_ATMEL_MDB011D:
14440         case FLASH_5717VENDOR_ATMEL_ADB011B:
14441         case FLASH_5717VENDOR_ATMEL_ADB011D:
14442         case FLASH_5717VENDOR_ATMEL_MDB021D:
14443         case FLASH_5717VENDOR_ATMEL_ADB021B:
14444         case FLASH_5717VENDOR_ATMEL_ADB021D:
14445         case FLASH_5717VENDOR_ATMEL_45USPT:
14446                 tp->nvram_jedecnum = JEDEC_ATMEL;
14447                 tg3_flag_set(tp, NVRAM_BUFFERED);
14448                 tg3_flag_set(tp, FLASH);
14449
14450                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14451                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14452                         /* Detect size with tg3_nvram_get_size() */
14453                         break;
14454                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14455                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14456                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14457                         break;
14458                 default:
14459                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14460                         break;
14461                 }
14462                 break;
14463         case FLASH_5717VENDOR_ST_M_M25PE10:
14464         case FLASH_5717VENDOR_ST_A_M25PE10:
14465         case FLASH_5717VENDOR_ST_M_M45PE10:
14466         case FLASH_5717VENDOR_ST_A_M45PE10:
14467         case FLASH_5717VENDOR_ST_M_M25PE20:
14468         case FLASH_5717VENDOR_ST_A_M25PE20:
14469         case FLASH_5717VENDOR_ST_M_M45PE20:
14470         case FLASH_5717VENDOR_ST_A_M45PE20:
14471         case FLASH_5717VENDOR_ST_25USPT:
14472         case FLASH_5717VENDOR_ST_45USPT:
14473                 tp->nvram_jedecnum = JEDEC_ST;
14474                 tg3_flag_set(tp, NVRAM_BUFFERED);
14475                 tg3_flag_set(tp, FLASH);
14476
14477                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14478                 case FLASH_5717VENDOR_ST_M_M25PE20:
14479                 case FLASH_5717VENDOR_ST_M_M45PE20:
14480                         /* Detect size with tg3_nvram_get_size() */
14481                         break;
14482                 case FLASH_5717VENDOR_ST_A_M25PE20:
14483                 case FLASH_5717VENDOR_ST_A_M45PE20:
14484                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14485                         break;
14486                 default:
14487                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14488                         break;
14489                 }
14490                 break;
14491         default:
14492                 tg3_flag_set(tp, NO_NVRAM);
14493                 return;
14494         }
14495
14496         tg3_nvram_get_pagesize(tp, nvcfg1);
14497         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14498                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14499 }
14500
14501 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14502 {
14503         u32 nvcfg1, nvmpinstrp;
14504
14505         nvcfg1 = tr32(NVRAM_CFG1);
14506         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14507
14508         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14509                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14510                         tg3_flag_set(tp, NO_NVRAM);
14511                         return;
14512                 }
14513
14514                 switch (nvmpinstrp) {
14515                 case FLASH_5762_EEPROM_HD:
14516                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14517                         break;
14518                 case FLASH_5762_EEPROM_LD:
14519                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14520                         break;
14521                 case FLASH_5720VENDOR_M_ST_M45PE20:
14522                         /* This pinstrap supports multiple sizes, so force it
14523                          * to read the actual size from location 0xf0.
14524                          */
14525                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14526                         break;
14527                 }
14528         }
14529
14530         switch (nvmpinstrp) {
14531         case FLASH_5720_EEPROM_HD:
14532         case FLASH_5720_EEPROM_LD:
14533                 tp->nvram_jedecnum = JEDEC_ATMEL;
14534                 tg3_flag_set(tp, NVRAM_BUFFERED);
14535
14536                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14537                 tw32(NVRAM_CFG1, nvcfg1);
14538                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14539                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14540                 else
14541                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14542                 return;
14543         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14544         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14545         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14546         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14547         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14548         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14549         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14550         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14551         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14552         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14553         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14554         case FLASH_5720VENDOR_ATMEL_45USPT:
14555                 tp->nvram_jedecnum = JEDEC_ATMEL;
14556                 tg3_flag_set(tp, NVRAM_BUFFERED);
14557                 tg3_flag_set(tp, FLASH);
14558
14559                 switch (nvmpinstrp) {
14560                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14561                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14562                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14563                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14564                         break;
14565                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14566                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14567                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14568                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14569                         break;
14570                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14571                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14572                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14573                         break;
14574                 default:
14575                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14576                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14577                         break;
14578                 }
14579                 break;
14580         case FLASH_5720VENDOR_M_ST_M25PE10:
14581         case FLASH_5720VENDOR_M_ST_M45PE10:
14582         case FLASH_5720VENDOR_A_ST_M25PE10:
14583         case FLASH_5720VENDOR_A_ST_M45PE10:
14584         case FLASH_5720VENDOR_M_ST_M25PE20:
14585         case FLASH_5720VENDOR_M_ST_M45PE20:
14586         case FLASH_5720VENDOR_A_ST_M25PE20:
14587         case FLASH_5720VENDOR_A_ST_M45PE20:
14588         case FLASH_5720VENDOR_M_ST_M25PE40:
14589         case FLASH_5720VENDOR_M_ST_M45PE40:
14590         case FLASH_5720VENDOR_A_ST_M25PE40:
14591         case FLASH_5720VENDOR_A_ST_M45PE40:
14592         case FLASH_5720VENDOR_M_ST_M25PE80:
14593         case FLASH_5720VENDOR_M_ST_M45PE80:
14594         case FLASH_5720VENDOR_A_ST_M25PE80:
14595         case FLASH_5720VENDOR_A_ST_M45PE80:
14596         case FLASH_5720VENDOR_ST_25USPT:
14597         case FLASH_5720VENDOR_ST_45USPT:
14598                 tp->nvram_jedecnum = JEDEC_ST;
14599                 tg3_flag_set(tp, NVRAM_BUFFERED);
14600                 tg3_flag_set(tp, FLASH);
14601
14602                 switch (nvmpinstrp) {
14603                 case FLASH_5720VENDOR_M_ST_M25PE20:
14604                 case FLASH_5720VENDOR_M_ST_M45PE20:
14605                 case FLASH_5720VENDOR_A_ST_M25PE20:
14606                 case FLASH_5720VENDOR_A_ST_M45PE20:
14607                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14608                         break;
14609                 case FLASH_5720VENDOR_M_ST_M25PE40:
14610                 case FLASH_5720VENDOR_M_ST_M45PE40:
14611                 case FLASH_5720VENDOR_A_ST_M25PE40:
14612                 case FLASH_5720VENDOR_A_ST_M45PE40:
14613                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14614                         break;
14615                 case FLASH_5720VENDOR_M_ST_M25PE80:
14616                 case FLASH_5720VENDOR_M_ST_M45PE80:
14617                 case FLASH_5720VENDOR_A_ST_M25PE80:
14618                 case FLASH_5720VENDOR_A_ST_M45PE80:
14619                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14620                         break;
14621                 default:
14622                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14623                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14624                         break;
14625                 }
14626                 break;
14627         default:
14628                 tg3_flag_set(tp, NO_NVRAM);
14629                 return;
14630         }
14631
14632         tg3_nvram_get_pagesize(tp, nvcfg1);
14633         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14634                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14635
14636         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14637                 u32 val;
14638
14639                 if (tg3_nvram_read(tp, 0, &val))
14640                         return;
14641
14642                 if (val != TG3_EEPROM_MAGIC &&
14643                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14644                         tg3_flag_set(tp, NO_NVRAM);
14645         }
14646 }
14647
14648 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14649 static void tg3_nvram_init(struct tg3 *tp)
14650 {
14651         if (tg3_flag(tp, IS_SSB_CORE)) {
14652                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14653                 tg3_flag_clear(tp, NVRAM);
14654                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14655                 tg3_flag_set(tp, NO_NVRAM);
14656                 return;
14657         }
14658
14659         tw32_f(GRC_EEPROM_ADDR,
14660              (EEPROM_ADDR_FSM_RESET |
14661               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14662                EEPROM_ADDR_CLKPERD_SHIFT)));
14663
14664         msleep(1);
14665
14666         /* Enable seeprom accesses. */
14667         tw32_f(GRC_LOCAL_CTRL,
14668              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14669         udelay(100);
14670
14671         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14672             tg3_asic_rev(tp) != ASIC_REV_5701) {
14673                 tg3_flag_set(tp, NVRAM);
14674
14675                 if (tg3_nvram_lock(tp)) {
14676                         netdev_warn(tp->dev,
14677                                     "Cannot get nvram lock, %s failed\n",
14678                                     __func__);
14679                         return;
14680                 }
14681                 tg3_enable_nvram_access(tp);
14682
14683                 tp->nvram_size = 0;
14684
14685                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14686                         tg3_get_5752_nvram_info(tp);
14687                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14688                         tg3_get_5755_nvram_info(tp);
14689                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14690                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14691                          tg3_asic_rev(tp) == ASIC_REV_5785)
14692                         tg3_get_5787_nvram_info(tp);
14693                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14694                         tg3_get_5761_nvram_info(tp);
14695                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14696                         tg3_get_5906_nvram_info(tp);
14697                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14698                          tg3_flag(tp, 57765_CLASS))
14699                         tg3_get_57780_nvram_info(tp);
14700                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14701                          tg3_asic_rev(tp) == ASIC_REV_5719)
14702                         tg3_get_5717_nvram_info(tp);
14703                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14704                          tg3_asic_rev(tp) == ASIC_REV_5762)
14705                         tg3_get_5720_nvram_info(tp);
14706                 else
14707                         tg3_get_nvram_info(tp);
14708
14709                 if (tp->nvram_size == 0)
14710                         tg3_get_nvram_size(tp);
14711
14712                 tg3_disable_nvram_access(tp);
14713                 tg3_nvram_unlock(tp);
14714
14715         } else {
14716                 tg3_flag_clear(tp, NVRAM);
14717                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14718
14719                 tg3_get_eeprom_size(tp);
14720         }
14721 }
14722
14723 struct subsys_tbl_ent {
14724         u16 subsys_vendor, subsys_devid;
14725         u32 phy_id;
14726 };
14727
14728 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14729         /* Broadcom boards. */
14730         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14731           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14732         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14733           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14734         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14735           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14736         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14737           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14738         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14739           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14740         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14741           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14742         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14743           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14744         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14745           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14746         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14747           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14748         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14749           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14750         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14751           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14752
14753         /* 3com boards. */
14754         { TG3PCI_SUBVENDOR_ID_3COM,
14755           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14756         { TG3PCI_SUBVENDOR_ID_3COM,
14757           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14758         { TG3PCI_SUBVENDOR_ID_3COM,
14759           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14760         { TG3PCI_SUBVENDOR_ID_3COM,
14761           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14762         { TG3PCI_SUBVENDOR_ID_3COM,
14763           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14764
14765         /* DELL boards. */
14766         { TG3PCI_SUBVENDOR_ID_DELL,
14767           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14768         { TG3PCI_SUBVENDOR_ID_DELL,
14769           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14770         { TG3PCI_SUBVENDOR_ID_DELL,
14771           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14772         { TG3PCI_SUBVENDOR_ID_DELL,
14773           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14774
14775         /* Compaq boards. */
14776         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14777           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14778         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14779           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14780         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14781           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14782         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14783           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14784         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14785           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14786
14787         /* IBM boards. */
14788         { TG3PCI_SUBVENDOR_ID_IBM,
14789           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14790 };
14791
14792 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14793 {
14794         int i;
14795
14796         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14797                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14798                      tp->pdev->subsystem_vendor) &&
14799                     (subsys_id_to_phy_id[i].subsys_devid ==
14800                      tp->pdev->subsystem_device))
14801                         return &subsys_id_to_phy_id[i];
14802         }
14803         return NULL;
14804 }
14805
14806 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14807 {
14808         u32 val;
14809
14810         tp->phy_id = TG3_PHY_ID_INVALID;
14811         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14812
14813         /* Assume an onboard device and WOL capable by default.  */
14814         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14815         tg3_flag_set(tp, WOL_CAP);
14816
14817         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14818                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14819                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14820                         tg3_flag_set(tp, IS_NIC);
14821                 }
14822                 val = tr32(VCPU_CFGSHDW);
14823                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14824                         tg3_flag_set(tp, ASPM_WORKAROUND);
14825                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14826                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14827                         tg3_flag_set(tp, WOL_ENABLE);
14828                         device_set_wakeup_enable(&tp->pdev->dev, true);
14829                 }
14830                 goto done;
14831         }
14832
14833         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14834         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14835                 u32 nic_cfg, led_cfg;
14836                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14837                 int eeprom_phy_serdes = 0;
14838
14839                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14840                 tp->nic_sram_data_cfg = nic_cfg;
14841
14842                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14843                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14844                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14845                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14846                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14847                     (ver > 0) && (ver < 0x100))
14848                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14849
14850                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14851                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14852
14853                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14854                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14855                         eeprom_phy_serdes = 1;
14856
14857                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14858                 if (nic_phy_id != 0) {
14859                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14860                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14861
14862                         eeprom_phy_id  = (id1 >> 16) << 10;
14863                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14864                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14865                 } else
14866                         eeprom_phy_id = 0;
14867
14868                 tp->phy_id = eeprom_phy_id;
14869                 if (eeprom_phy_serdes) {
14870                         if (!tg3_flag(tp, 5705_PLUS))
14871                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14872                         else
14873                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14874                 }
14875
14876                 if (tg3_flag(tp, 5750_PLUS))
14877                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14878                                     SHASTA_EXT_LED_MODE_MASK);
14879                 else
14880                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14881
14882                 switch (led_cfg) {
14883                 default:
14884                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14885                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14886                         break;
14887
14888                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14889                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14890                         break;
14891
14892                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14893                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14894
14895                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14896                          * read on some older 5700/5701 bootcode.
14897                          */
14898                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14899                             tg3_asic_rev(tp) == ASIC_REV_5701)
14900                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14901
14902                         break;
14903
14904                 case SHASTA_EXT_LED_SHARED:
14905                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14906                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14907                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14908                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14909                                                  LED_CTRL_MODE_PHY_2);
14910                         break;
14911
14912                 case SHASTA_EXT_LED_MAC:
14913                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14914                         break;
14915
14916                 case SHASTA_EXT_LED_COMBO:
14917                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14918                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14919                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14920                                                  LED_CTRL_MODE_PHY_2);
14921                         break;
14922
14923                 }
14924
14925                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14926                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14927                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14928                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14929
14930                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14931                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14932
14933                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14934                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14935                         if ((tp->pdev->subsystem_vendor ==
14936                              PCI_VENDOR_ID_ARIMA) &&
14937                             (tp->pdev->subsystem_device == 0x205a ||
14938                              tp->pdev->subsystem_device == 0x2063))
14939                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14940                 } else {
14941                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14942                         tg3_flag_set(tp, IS_NIC);
14943                 }
14944
14945                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14946                         tg3_flag_set(tp, ENABLE_ASF);
14947                         if (tg3_flag(tp, 5750_PLUS))
14948                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14949                 }
14950
14951                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14952                     tg3_flag(tp, 5750_PLUS))
14953                         tg3_flag_set(tp, ENABLE_APE);
14954
14955                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14956                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14957                         tg3_flag_clear(tp, WOL_CAP);
14958
14959                 if (tg3_flag(tp, WOL_CAP) &&
14960                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14961                         tg3_flag_set(tp, WOL_ENABLE);
14962                         device_set_wakeup_enable(&tp->pdev->dev, true);
14963                 }
14964
14965                 if (cfg2 & (1 << 17))
14966                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14967
14968                 /* serdes signal pre-emphasis in register 0x590 set by */
14969                 /* bootcode if bit 18 is set */
14970                 if (cfg2 & (1 << 18))
14971                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14972
14973                 if ((tg3_flag(tp, 57765_PLUS) ||
14974                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14975                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14976                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14977                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14978
14979                 if (tg3_flag(tp, PCI_EXPRESS)) {
14980                         u32 cfg3;
14981
14982                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14983                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14984                             !tg3_flag(tp, 57765_PLUS) &&
14985                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14986                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14987                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14988                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14989                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14990                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14991                 }
14992
14993                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14994                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14995                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14996                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14997                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14998                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14999         }
15000 done:
15001         if (tg3_flag(tp, WOL_CAP))
15002                 device_set_wakeup_enable(&tp->pdev->dev,
15003                                          tg3_flag(tp, WOL_ENABLE));
15004         else
15005                 device_set_wakeup_capable(&tp->pdev->dev, false);
15006 }
15007
15008 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15009 {
15010         int i, err;
15011         u32 val2, off = offset * 8;
15012
15013         err = tg3_nvram_lock(tp);
15014         if (err)
15015                 return err;
15016
15017         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15018         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15019                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15020         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15021         udelay(10);
15022
15023         for (i = 0; i < 100; i++) {
15024                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15025                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15026                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15027                         break;
15028                 }
15029                 udelay(10);
15030         }
15031
15032         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15033
15034         tg3_nvram_unlock(tp);
15035         if (val2 & APE_OTP_STATUS_CMD_DONE)
15036                 return 0;
15037
15038         return -EBUSY;
15039 }
15040
15041 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15042 {
15043         int i;
15044         u32 val;
15045
15046         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15047         tw32(OTP_CTRL, cmd);
15048
15049         /* Wait for up to 1 ms for command to execute. */
15050         for (i = 0; i < 100; i++) {
15051                 val = tr32(OTP_STATUS);
15052                 if (val & OTP_STATUS_CMD_DONE)
15053                         break;
15054                 udelay(10);
15055         }
15056
15057         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15058 }
15059
15060 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15061  * configuration is a 32-bit value that straddles the alignment boundary.
15062  * We do two 32-bit reads and then shift and merge the results.
15063  */
15064 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15065 {
15066         u32 bhalf_otp, thalf_otp;
15067
15068         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15069
15070         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15071                 return 0;
15072
15073         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15074
15075         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15076                 return 0;
15077
15078         thalf_otp = tr32(OTP_READ_DATA);
15079
15080         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15081
15082         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15083                 return 0;
15084
15085         bhalf_otp = tr32(OTP_READ_DATA);
15086
15087         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15088 }
15089
15090 static void tg3_phy_init_link_config(struct tg3 *tp)
15091 {
15092         u32 adv = ADVERTISED_Autoneg;
15093
15094         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15095                 adv |= ADVERTISED_1000baseT_Half |
15096                        ADVERTISED_1000baseT_Full;
15097
15098         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15099                 adv |= ADVERTISED_100baseT_Half |
15100                        ADVERTISED_100baseT_Full |
15101                        ADVERTISED_10baseT_Half |
15102                        ADVERTISED_10baseT_Full |
15103                        ADVERTISED_TP;
15104         else
15105                 adv |= ADVERTISED_FIBRE;
15106
15107         tp->link_config.advertising = adv;
15108         tp->link_config.speed = SPEED_UNKNOWN;
15109         tp->link_config.duplex = DUPLEX_UNKNOWN;
15110         tp->link_config.autoneg = AUTONEG_ENABLE;
15111         tp->link_config.active_speed = SPEED_UNKNOWN;
15112         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15113
15114         tp->old_link = -1;
15115 }
15116
15117 static int tg3_phy_probe(struct tg3 *tp)
15118 {
15119         u32 hw_phy_id_1, hw_phy_id_2;
15120         u32 hw_phy_id, hw_phy_id_masked;
15121         int err;
15122
15123         /* flow control autonegotiation is default behavior */
15124         tg3_flag_set(tp, PAUSE_AUTONEG);
15125         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15126
15127         if (tg3_flag(tp, ENABLE_APE)) {
15128                 switch (tp->pci_fn) {
15129                 case 0:
15130                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15131                         break;
15132                 case 1:
15133                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15134                         break;
15135                 case 2:
15136                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15137                         break;
15138                 case 3:
15139                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15140                         break;
15141                 }
15142         }
15143
15144         if (!tg3_flag(tp, ENABLE_ASF) &&
15145             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15146             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15147                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15148                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15149
15150         if (tg3_flag(tp, USE_PHYLIB))
15151                 return tg3_phy_init(tp);
15152
15153         /* Reading the PHY ID register can conflict with ASF
15154          * firmware access to the PHY hardware.
15155          */
15156         err = 0;
15157         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15158                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15159         } else {
15160                 /* Now read the physical PHY_ID from the chip and verify
15161                  * that it is sane.  If it doesn't look good, we fall back
15162                  * to either the hard-coded table based PHY_ID and failing
15163                  * that the value found in the eeprom area.
15164                  */
15165                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15166                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15167
15168                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15169                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15170                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15171
15172                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15173         }
15174
15175         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15176                 tp->phy_id = hw_phy_id;
15177                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15178                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15179                 else
15180                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15181         } else {
15182                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15183                         /* Do nothing, phy ID already set up in
15184                          * tg3_get_eeprom_hw_cfg().
15185                          */
15186                 } else {
15187                         struct subsys_tbl_ent *p;
15188
15189                         /* No eeprom signature?  Try the hardcoded
15190                          * subsys device table.
15191                          */
15192                         p = tg3_lookup_by_subsys(tp);
15193                         if (p) {
15194                                 tp->phy_id = p->phy_id;
15195                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15196                                 /* For now we saw the IDs 0xbc050cd0,
15197                                  * 0xbc050f80 and 0xbc050c30 on devices
15198                                  * connected to an BCM4785 and there are
15199                                  * probably more. Just assume that the phy is
15200                                  * supported when it is connected to a SSB core
15201                                  * for now.
15202                                  */
15203                                 return -ENODEV;
15204                         }
15205
15206                         if (!tp->phy_id ||
15207                             tp->phy_id == TG3_PHY_ID_BCM8002)
15208                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15209                 }
15210         }
15211
15212         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15213             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15214              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15215              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15216              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15217              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15218               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15219              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15220               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15221                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15222
15223                 tp->eee.supported = SUPPORTED_100baseT_Full |
15224                                     SUPPORTED_1000baseT_Full;
15225                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15226                                      ADVERTISED_1000baseT_Full;
15227                 tp->eee.eee_enabled = 1;
15228                 tp->eee.tx_lpi_enabled = 1;
15229                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15230         }
15231
15232         tg3_phy_init_link_config(tp);
15233
15234         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15235             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15236             !tg3_flag(tp, ENABLE_APE) &&
15237             !tg3_flag(tp, ENABLE_ASF)) {
15238                 u32 bmsr, dummy;
15239
15240                 tg3_readphy(tp, MII_BMSR, &bmsr);
15241                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15242                     (bmsr & BMSR_LSTATUS))
15243                         goto skip_phy_reset;
15244
15245                 err = tg3_phy_reset(tp);
15246                 if (err)
15247                         return err;
15248
15249                 tg3_phy_set_wirespeed(tp);
15250
15251                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15252                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15253                                             tp->link_config.flowctrl);
15254
15255                         tg3_writephy(tp, MII_BMCR,
15256                                      BMCR_ANENABLE | BMCR_ANRESTART);
15257                 }
15258         }
15259
15260 skip_phy_reset:
15261         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15262                 err = tg3_init_5401phy_dsp(tp);
15263                 if (err)
15264                         return err;
15265
15266                 err = tg3_init_5401phy_dsp(tp);
15267         }
15268
15269         return err;
15270 }
15271
15272 static void tg3_read_vpd(struct tg3 *tp)
15273 {
15274         u8 *vpd_data;
15275         unsigned int block_end, rosize, len;
15276         u32 vpdlen;
15277         int j, i = 0;
15278
15279         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15280         if (!vpd_data)
15281                 goto out_no_vpd;
15282
15283         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15284         if (i < 0)
15285                 goto out_not_found;
15286
15287         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15288         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15289         i += PCI_VPD_LRDT_TAG_SIZE;
15290
15291         if (block_end > vpdlen)
15292                 goto out_not_found;
15293
15294         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15295                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15296         if (j > 0) {
15297                 len = pci_vpd_info_field_size(&vpd_data[j]);
15298
15299                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15300                 if (j + len > block_end || len != 4 ||
15301                     memcmp(&vpd_data[j], "1028", 4))
15302                         goto partno;
15303
15304                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15305                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15306                 if (j < 0)
15307                         goto partno;
15308
15309                 len = pci_vpd_info_field_size(&vpd_data[j]);
15310
15311                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15312                 if (j + len > block_end)
15313                         goto partno;
15314
15315                 if (len >= sizeof(tp->fw_ver))
15316                         len = sizeof(tp->fw_ver) - 1;
15317                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15318                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15319                          &vpd_data[j]);
15320         }
15321
15322 partno:
15323         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15324                                       PCI_VPD_RO_KEYWORD_PARTNO);
15325         if (i < 0)
15326                 goto out_not_found;
15327
15328         len = pci_vpd_info_field_size(&vpd_data[i]);
15329
15330         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15331         if (len > TG3_BPN_SIZE ||
15332             (len + i) > vpdlen)
15333                 goto out_not_found;
15334
15335         memcpy(tp->board_part_number, &vpd_data[i], len);
15336
15337 out_not_found:
15338         kfree(vpd_data);
15339         if (tp->board_part_number[0])
15340                 return;
15341
15342 out_no_vpd:
15343         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15344                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15345                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15346                         strcpy(tp->board_part_number, "BCM5717");
15347                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15348                         strcpy(tp->board_part_number, "BCM5718");
15349                 else
15350                         goto nomatch;
15351         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15352                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15353                         strcpy(tp->board_part_number, "BCM57780");
15354                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15355                         strcpy(tp->board_part_number, "BCM57760");
15356                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15357                         strcpy(tp->board_part_number, "BCM57790");
15358                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15359                         strcpy(tp->board_part_number, "BCM57788");
15360                 else
15361                         goto nomatch;
15362         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15363                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15364                         strcpy(tp->board_part_number, "BCM57761");
15365                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15366                         strcpy(tp->board_part_number, "BCM57765");
15367                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15368                         strcpy(tp->board_part_number, "BCM57781");
15369                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15370                         strcpy(tp->board_part_number, "BCM57785");
15371                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15372                         strcpy(tp->board_part_number, "BCM57791");
15373                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15374                         strcpy(tp->board_part_number, "BCM57795");
15375                 else
15376                         goto nomatch;
15377         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15378                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15379                         strcpy(tp->board_part_number, "BCM57762");
15380                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15381                         strcpy(tp->board_part_number, "BCM57766");
15382                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15383                         strcpy(tp->board_part_number, "BCM57782");
15384                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15385                         strcpy(tp->board_part_number, "BCM57786");
15386                 else
15387                         goto nomatch;
15388         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15389                 strcpy(tp->board_part_number, "BCM95906");
15390         } else {
15391 nomatch:
15392                 strcpy(tp->board_part_number, "none");
15393         }
15394 }
15395
15396 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15397 {
15398         u32 val;
15399
15400         if (tg3_nvram_read(tp, offset, &val) ||
15401             (val & 0xfc000000) != 0x0c000000 ||
15402             tg3_nvram_read(tp, offset + 4, &val) ||
15403             val != 0)
15404                 return 0;
15405
15406         return 1;
15407 }
15408
15409 static void tg3_read_bc_ver(struct tg3 *tp)
15410 {
15411         u32 val, offset, start, ver_offset;
15412         int i, dst_off;
15413         bool newver = false;
15414
15415         if (tg3_nvram_read(tp, 0xc, &offset) ||
15416             tg3_nvram_read(tp, 0x4, &start))
15417                 return;
15418
15419         offset = tg3_nvram_logical_addr(tp, offset);
15420
15421         if (tg3_nvram_read(tp, offset, &val))
15422                 return;
15423
15424         if ((val & 0xfc000000) == 0x0c000000) {
15425                 if (tg3_nvram_read(tp, offset + 4, &val))
15426                         return;
15427
15428                 if (val == 0)
15429                         newver = true;
15430         }
15431
15432         dst_off = strlen(tp->fw_ver);
15433
15434         if (newver) {
15435                 if (TG3_VER_SIZE - dst_off < 16 ||
15436                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15437                         return;
15438
15439                 offset = offset + ver_offset - start;
15440                 for (i = 0; i < 16; i += 4) {
15441                         __be32 v;
15442                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15443                                 return;
15444
15445                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15446                 }
15447         } else {
15448                 u32 major, minor;
15449
15450                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15451                         return;
15452
15453                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15454                         TG3_NVM_BCVER_MAJSFT;
15455                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15456                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15457                          "v%d.%02d", major, minor);
15458         }
15459 }
15460
15461 static void tg3_read_hwsb_ver(struct tg3 *tp)
15462 {
15463         u32 val, major, minor;
15464
15465         /* Use native endian representation */
15466         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15467                 return;
15468
15469         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15470                 TG3_NVM_HWSB_CFG1_MAJSFT;
15471         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15472                 TG3_NVM_HWSB_CFG1_MINSFT;
15473
15474         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15475 }
15476
15477 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15478 {
15479         u32 offset, major, minor, build;
15480
15481         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15482
15483         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15484                 return;
15485
15486         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15487         case TG3_EEPROM_SB_REVISION_0:
15488                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15489                 break;
15490         case TG3_EEPROM_SB_REVISION_2:
15491                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15492                 break;
15493         case TG3_EEPROM_SB_REVISION_3:
15494                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15495                 break;
15496         case TG3_EEPROM_SB_REVISION_4:
15497                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15498                 break;
15499         case TG3_EEPROM_SB_REVISION_5:
15500                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15501                 break;
15502         case TG3_EEPROM_SB_REVISION_6:
15503                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15504                 break;
15505         default:
15506                 return;
15507         }
15508
15509         if (tg3_nvram_read(tp, offset, &val))
15510                 return;
15511
15512         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15513                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15514         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15515                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15516         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15517
15518         if (minor > 99 || build > 26)
15519                 return;
15520
15521         offset = strlen(tp->fw_ver);
15522         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15523                  " v%d.%02d", major, minor);
15524
15525         if (build > 0) {
15526                 offset = strlen(tp->fw_ver);
15527                 if (offset < TG3_VER_SIZE - 1)
15528                         tp->fw_ver[offset] = 'a' + build - 1;
15529         }
15530 }
15531
15532 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15533 {
15534         u32 val, offset, start;
15535         int i, vlen;
15536
15537         for (offset = TG3_NVM_DIR_START;
15538              offset < TG3_NVM_DIR_END;
15539              offset += TG3_NVM_DIRENT_SIZE) {
15540                 if (tg3_nvram_read(tp, offset, &val))
15541                         return;
15542
15543                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15544                         break;
15545         }
15546
15547         if (offset == TG3_NVM_DIR_END)
15548                 return;
15549
15550         if (!tg3_flag(tp, 5705_PLUS))
15551                 start = 0x08000000;
15552         else if (tg3_nvram_read(tp, offset - 4, &start))
15553                 return;
15554
15555         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15556             !tg3_fw_img_is_valid(tp, offset) ||
15557             tg3_nvram_read(tp, offset + 8, &val))
15558                 return;
15559
15560         offset += val - start;
15561
15562         vlen = strlen(tp->fw_ver);
15563
15564         tp->fw_ver[vlen++] = ',';
15565         tp->fw_ver[vlen++] = ' ';
15566
15567         for (i = 0; i < 4; i++) {
15568                 __be32 v;
15569                 if (tg3_nvram_read_be32(tp, offset, &v))
15570                         return;
15571
15572                 offset += sizeof(v);
15573
15574                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15575                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15576                         break;
15577                 }
15578
15579                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15580                 vlen += sizeof(v);
15581         }
15582 }
15583
15584 static void tg3_probe_ncsi(struct tg3 *tp)
15585 {
15586         u32 apedata;
15587
15588         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15589         if (apedata != APE_SEG_SIG_MAGIC)
15590                 return;
15591
15592         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15593         if (!(apedata & APE_FW_STATUS_READY))
15594                 return;
15595
15596         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15597                 tg3_flag_set(tp, APE_HAS_NCSI);
15598 }
15599
15600 static void tg3_read_dash_ver(struct tg3 *tp)
15601 {
15602         int vlen;
15603         u32 apedata;
15604         char *fwtype;
15605
15606         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15607
15608         if (tg3_flag(tp, APE_HAS_NCSI))
15609                 fwtype = "NCSI";
15610         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15611                 fwtype = "SMASH";
15612         else
15613                 fwtype = "DASH";
15614
15615         vlen = strlen(tp->fw_ver);
15616
15617         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15618                  fwtype,
15619                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15620                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15621                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15622                  (apedata & APE_FW_VERSION_BLDMSK));
15623 }
15624
15625 static void tg3_read_otp_ver(struct tg3 *tp)
15626 {
15627         u32 val, val2;
15628
15629         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15630                 return;
15631
15632         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15633             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15634             TG3_OTP_MAGIC0_VALID(val)) {
15635                 u64 val64 = (u64) val << 32 | val2;
15636                 u32 ver = 0;
15637                 int i, vlen;
15638
15639                 for (i = 0; i < 7; i++) {
15640                         if ((val64 & 0xff) == 0)
15641                                 break;
15642                         ver = val64 & 0xff;
15643                         val64 >>= 8;
15644                 }
15645                 vlen = strlen(tp->fw_ver);
15646                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15647         }
15648 }
15649
15650 static void tg3_read_fw_ver(struct tg3 *tp)
15651 {
15652         u32 val;
15653         bool vpd_vers = false;
15654
15655         if (tp->fw_ver[0] != 0)
15656                 vpd_vers = true;
15657
15658         if (tg3_flag(tp, NO_NVRAM)) {
15659                 strcat(tp->fw_ver, "sb");
15660                 tg3_read_otp_ver(tp);
15661                 return;
15662         }
15663
15664         if (tg3_nvram_read(tp, 0, &val))
15665                 return;
15666
15667         if (val == TG3_EEPROM_MAGIC)
15668                 tg3_read_bc_ver(tp);
15669         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15670                 tg3_read_sb_ver(tp, val);
15671         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15672                 tg3_read_hwsb_ver(tp);
15673
15674         if (tg3_flag(tp, ENABLE_ASF)) {
15675                 if (tg3_flag(tp, ENABLE_APE)) {
15676                         tg3_probe_ncsi(tp);
15677                         if (!vpd_vers)
15678                                 tg3_read_dash_ver(tp);
15679                 } else if (!vpd_vers) {
15680                         tg3_read_mgmtfw_ver(tp);
15681                 }
15682         }
15683
15684         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15685 }
15686
15687 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15688 {
15689         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15690                 return TG3_RX_RET_MAX_SIZE_5717;
15691         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15692                 return TG3_RX_RET_MAX_SIZE_5700;
15693         else
15694                 return TG3_RX_RET_MAX_SIZE_5705;
15695 }
15696
15697 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15698         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15699         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15700         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15701         { },
15702 };
15703
15704 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15705 {
15706         struct pci_dev *peer;
15707         unsigned int func, devnr = tp->pdev->devfn & ~7;
15708
15709         for (func = 0; func < 8; func++) {
15710                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15711                 if (peer && peer != tp->pdev)
15712                         break;
15713                 pci_dev_put(peer);
15714         }
15715         /* 5704 can be configured in single-port mode, set peer to
15716          * tp->pdev in that case.
15717          */
15718         if (!peer) {
15719                 peer = tp->pdev;
15720                 return peer;
15721         }
15722
15723         /*
15724          * We don't need to keep the refcount elevated; there's no way
15725          * to remove one half of this device without removing the other
15726          */
15727         pci_dev_put(peer);
15728
15729         return peer;
15730 }
15731
15732 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15733 {
15734         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15735         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15736                 u32 reg;
15737
15738                 /* All devices that use the alternate
15739                  * ASIC REV location have a CPMU.
15740                  */
15741                 tg3_flag_set(tp, CPMU_PRESENT);
15742
15743                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15744                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15745                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15746                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15747                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15748                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15749                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15750                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15751                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15752                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15753                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15754                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15755                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15756                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15757                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15758                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15759                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15760                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15761                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15762                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15763                 else
15764                         reg = TG3PCI_PRODID_ASICREV;
15765
15766                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15767         }
15768
15769         /* Wrong chip ID in 5752 A0. This code can be removed later
15770          * as A0 is not in production.
15771          */
15772         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15773                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15774
15775         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15776                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15777
15778         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15779             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15780             tg3_asic_rev(tp) == ASIC_REV_5720)
15781                 tg3_flag_set(tp, 5717_PLUS);
15782
15783         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15784             tg3_asic_rev(tp) == ASIC_REV_57766)
15785                 tg3_flag_set(tp, 57765_CLASS);
15786
15787         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15788              tg3_asic_rev(tp) == ASIC_REV_5762)
15789                 tg3_flag_set(tp, 57765_PLUS);
15790
15791         /* Intentionally exclude ASIC_REV_5906 */
15792         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15793             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15794             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15795             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15796             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15797             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15798             tg3_flag(tp, 57765_PLUS))
15799                 tg3_flag_set(tp, 5755_PLUS);
15800
15801         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15802             tg3_asic_rev(tp) == ASIC_REV_5714)
15803                 tg3_flag_set(tp, 5780_CLASS);
15804
15805         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15806             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15807             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15808             tg3_flag(tp, 5755_PLUS) ||
15809             tg3_flag(tp, 5780_CLASS))
15810                 tg3_flag_set(tp, 5750_PLUS);
15811
15812         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15813             tg3_flag(tp, 5750_PLUS))
15814                 tg3_flag_set(tp, 5705_PLUS);
15815 }
15816
15817 static bool tg3_10_100_only_device(struct tg3 *tp,
15818                                    const struct pci_device_id *ent)
15819 {
15820         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15821
15822         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15823              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15824             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15825                 return true;
15826
15827         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15828                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15829                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15830                                 return true;
15831                 } else {
15832                         return true;
15833                 }
15834         }
15835
15836         return false;
15837 }
15838
15839 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15840 {
15841         u32 misc_ctrl_reg;
15842         u32 pci_state_reg, grc_misc_cfg;
15843         u32 val;
15844         u16 pci_cmd;
15845         int err;
15846
15847         /* Force memory write invalidate off.  If we leave it on,
15848          * then on 5700_BX chips we have to enable a workaround.
15849          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15850          * to match the cacheline size.  The Broadcom driver have this
15851          * workaround but turns MWI off all the times so never uses
15852          * it.  This seems to suggest that the workaround is insufficient.
15853          */
15854         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15855         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15856         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15857
15858         /* Important! -- Make sure register accesses are byteswapped
15859          * correctly.  Also, for those chips that require it, make
15860          * sure that indirect register accesses are enabled before
15861          * the first operation.
15862          */
15863         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15864                               &misc_ctrl_reg);
15865         tp->misc_host_ctrl |= (misc_ctrl_reg &
15866                                MISC_HOST_CTRL_CHIPREV);
15867         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15868                                tp->misc_host_ctrl);
15869
15870         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15871
15872         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15873          * we need to disable memory and use config. cycles
15874          * only to access all registers. The 5702/03 chips
15875          * can mistakenly decode the special cycles from the
15876          * ICH chipsets as memory write cycles, causing corruption
15877          * of register and memory space. Only certain ICH bridges
15878          * will drive special cycles with non-zero data during the
15879          * address phase which can fall within the 5703's address
15880          * range. This is not an ICH bug as the PCI spec allows
15881          * non-zero address during special cycles. However, only
15882          * these ICH bridges are known to drive non-zero addresses
15883          * during special cycles.
15884          *
15885          * Since special cycles do not cross PCI bridges, we only
15886          * enable this workaround if the 5703 is on the secondary
15887          * bus of these ICH bridges.
15888          */
15889         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15890             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15891                 static struct tg3_dev_id {
15892                         u32     vendor;
15893                         u32     device;
15894                         u32     rev;
15895                 } ich_chipsets[] = {
15896                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15897                           PCI_ANY_ID },
15898                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15899                           PCI_ANY_ID },
15900                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15901                           0xa },
15902                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15903                           PCI_ANY_ID },
15904                         { },
15905                 };
15906                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15907                 struct pci_dev *bridge = NULL;
15908
15909                 while (pci_id->vendor != 0) {
15910                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15911                                                 bridge);
15912                         if (!bridge) {
15913                                 pci_id++;
15914                                 continue;
15915                         }
15916                         if (pci_id->rev != PCI_ANY_ID) {
15917                                 if (bridge->revision > pci_id->rev)
15918                                         continue;
15919                         }
15920                         if (bridge->subordinate &&
15921                             (bridge->subordinate->number ==
15922                              tp->pdev->bus->number)) {
15923                                 tg3_flag_set(tp, ICH_WORKAROUND);
15924                                 pci_dev_put(bridge);
15925                                 break;
15926                         }
15927                 }
15928         }
15929
15930         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15931                 static struct tg3_dev_id {
15932                         u32     vendor;
15933                         u32     device;
15934                 } bridge_chipsets[] = {
15935                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15936                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15937                         { },
15938                 };
15939                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15940                 struct pci_dev *bridge = NULL;
15941
15942                 while (pci_id->vendor != 0) {
15943                         bridge = pci_get_device(pci_id->vendor,
15944                                                 pci_id->device,
15945                                                 bridge);
15946                         if (!bridge) {
15947                                 pci_id++;
15948                                 continue;
15949                         }
15950                         if (bridge->subordinate &&
15951                             (bridge->subordinate->number <=
15952                              tp->pdev->bus->number) &&
15953                             (bridge->subordinate->busn_res.end >=
15954                              tp->pdev->bus->number)) {
15955                                 tg3_flag_set(tp, 5701_DMA_BUG);
15956                                 pci_dev_put(bridge);
15957                                 break;
15958                         }
15959                 }
15960         }
15961
15962         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15963          * DMA addresses > 40-bit. This bridge may have other additional
15964          * 57xx devices behind it in some 4-port NIC designs for example.
15965          * Any tg3 device found behind the bridge will also need the 40-bit
15966          * DMA workaround.
15967          */
15968         if (tg3_flag(tp, 5780_CLASS)) {
15969                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15970                 tp->msi_cap = tp->pdev->msi_cap;
15971         } else {
15972                 struct pci_dev *bridge = NULL;
15973
15974                 do {
15975                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15976                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15977                                                 bridge);
15978                         if (bridge && bridge->subordinate &&
15979                             (bridge->subordinate->number <=
15980                              tp->pdev->bus->number) &&
15981                             (bridge->subordinate->busn_res.end >=
15982                              tp->pdev->bus->number)) {
15983                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15984                                 pci_dev_put(bridge);
15985                                 break;
15986                         }
15987                 } while (bridge);
15988         }
15989
15990         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15991             tg3_asic_rev(tp) == ASIC_REV_5714)
15992                 tp->pdev_peer = tg3_find_peer(tp);
15993
15994         /* Determine TSO capabilities */
15995         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15996                 ; /* Do nothing. HW bug. */
15997         else if (tg3_flag(tp, 57765_PLUS))
15998                 tg3_flag_set(tp, HW_TSO_3);
15999         else if (tg3_flag(tp, 5755_PLUS) ||
16000                  tg3_asic_rev(tp) == ASIC_REV_5906)
16001                 tg3_flag_set(tp, HW_TSO_2);
16002         else if (tg3_flag(tp, 5750_PLUS)) {
16003                 tg3_flag_set(tp, HW_TSO_1);
16004                 tg3_flag_set(tp, TSO_BUG);
16005                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16006                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16007                         tg3_flag_clear(tp, TSO_BUG);
16008         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16009                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16010                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16011                 tg3_flag_set(tp, FW_TSO);
16012                 tg3_flag_set(tp, TSO_BUG);
16013                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16014                         tp->fw_needed = FIRMWARE_TG3TSO5;
16015                 else
16016                         tp->fw_needed = FIRMWARE_TG3TSO;
16017         }
16018
16019         /* Selectively allow TSO based on operating conditions */
16020         if (tg3_flag(tp, HW_TSO_1) ||
16021             tg3_flag(tp, HW_TSO_2) ||
16022             tg3_flag(tp, HW_TSO_3) ||
16023             tg3_flag(tp, FW_TSO)) {
16024                 /* For firmware TSO, assume ASF is disabled.
16025                  * We'll disable TSO later if we discover ASF
16026                  * is enabled in tg3_get_eeprom_hw_cfg().
16027                  */
16028                 tg3_flag_set(tp, TSO_CAPABLE);
16029         } else {
16030                 tg3_flag_clear(tp, TSO_CAPABLE);
16031                 tg3_flag_clear(tp, TSO_BUG);
16032                 tp->fw_needed = NULL;
16033         }
16034
16035         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16036                 tp->fw_needed = FIRMWARE_TG3;
16037
16038         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16039                 tp->fw_needed = FIRMWARE_TG357766;
16040
16041         tp->irq_max = 1;
16042
16043         if (tg3_flag(tp, 5750_PLUS)) {
16044                 tg3_flag_set(tp, SUPPORT_MSI);
16045                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16046                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16047                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16048                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16049                      tp->pdev_peer == tp->pdev))
16050                         tg3_flag_clear(tp, SUPPORT_MSI);
16051
16052                 if (tg3_flag(tp, 5755_PLUS) ||
16053                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16054                         tg3_flag_set(tp, 1SHOT_MSI);
16055                 }
16056
16057                 if (tg3_flag(tp, 57765_PLUS)) {
16058                         tg3_flag_set(tp, SUPPORT_MSIX);
16059                         tp->irq_max = TG3_IRQ_MAX_VECS;
16060                 }
16061         }
16062
16063         tp->txq_max = 1;
16064         tp->rxq_max = 1;
16065         if (tp->irq_max > 1) {
16066                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16067                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16068
16069                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16070                     tg3_asic_rev(tp) == ASIC_REV_5720)
16071                         tp->txq_max = tp->irq_max - 1;
16072         }
16073
16074         if (tg3_flag(tp, 5755_PLUS) ||
16075             tg3_asic_rev(tp) == ASIC_REV_5906)
16076                 tg3_flag_set(tp, SHORT_DMA_BUG);
16077
16078         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16079                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16080
16081         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16082             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16083             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16084             tg3_asic_rev(tp) == ASIC_REV_5762)
16085                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16086
16087         if (tg3_flag(tp, 57765_PLUS) &&
16088             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16089                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16090
16091         if (!tg3_flag(tp, 5705_PLUS) ||
16092             tg3_flag(tp, 5780_CLASS) ||
16093             tg3_flag(tp, USE_JUMBO_BDFLAG))
16094                 tg3_flag_set(tp, JUMBO_CAPABLE);
16095
16096         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16097                               &pci_state_reg);
16098
16099         if (pci_is_pcie(tp->pdev)) {
16100                 u16 lnkctl;
16101
16102                 tg3_flag_set(tp, PCI_EXPRESS);
16103
16104                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16105                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16106                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16107                                 tg3_flag_clear(tp, HW_TSO_2);
16108                                 tg3_flag_clear(tp, TSO_CAPABLE);
16109                         }
16110                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16111                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16112                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16113                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16114                                 tg3_flag_set(tp, CLKREQ_BUG);
16115                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16116                         tg3_flag_set(tp, L1PLLPD_EN);
16117                 }
16118         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16119                 /* BCM5785 devices are effectively PCIe devices, and should
16120                  * follow PCIe codepaths, but do not have a PCIe capabilities
16121                  * section.
16122                  */
16123                 tg3_flag_set(tp, PCI_EXPRESS);
16124         } else if (!tg3_flag(tp, 5705_PLUS) ||
16125                    tg3_flag(tp, 5780_CLASS)) {
16126                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16127                 if (!tp->pcix_cap) {
16128                         dev_err(&tp->pdev->dev,
16129                                 "Cannot find PCI-X capability, aborting\n");
16130                         return -EIO;
16131                 }
16132
16133                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16134                         tg3_flag_set(tp, PCIX_MODE);
16135         }
16136
16137         /* If we have an AMD 762 or VIA K8T800 chipset, write
16138          * reordering to the mailbox registers done by the host
16139          * controller can cause major troubles.  We read back from
16140          * every mailbox register write to force the writes to be
16141          * posted to the chip in order.
16142          */
16143         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16144             !tg3_flag(tp, PCI_EXPRESS))
16145                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16146
16147         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16148                              &tp->pci_cacheline_sz);
16149         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16150                              &tp->pci_lat_timer);
16151         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16152             tp->pci_lat_timer < 64) {
16153                 tp->pci_lat_timer = 64;
16154                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16155                                       tp->pci_lat_timer);
16156         }
16157
16158         /* Important! -- It is critical that the PCI-X hw workaround
16159          * situation is decided before the first MMIO register access.
16160          */
16161         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16162                 /* 5700 BX chips need to have their TX producer index
16163                  * mailboxes written twice to workaround a bug.
16164                  */
16165                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16166
16167                 /* If we are in PCI-X mode, enable register write workaround.
16168                  *
16169                  * The workaround is to use indirect register accesses
16170                  * for all chip writes not to mailbox registers.
16171                  */
16172                 if (tg3_flag(tp, PCIX_MODE)) {
16173                         u32 pm_reg;
16174
16175                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16176
16177                         /* The chip can have it's power management PCI config
16178                          * space registers clobbered due to this bug.
16179                          * So explicitly force the chip into D0 here.
16180                          */
16181                         pci_read_config_dword(tp->pdev,
16182                                               tp->pm_cap + PCI_PM_CTRL,
16183                                               &pm_reg);
16184                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16185                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16186                         pci_write_config_dword(tp->pdev,
16187                                                tp->pm_cap + PCI_PM_CTRL,
16188                                                pm_reg);
16189
16190                         /* Also, force SERR#/PERR# in PCI command. */
16191                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16192                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16193                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16194                 }
16195         }
16196
16197         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16198                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16199         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16200                 tg3_flag_set(tp, PCI_32BIT);
16201
16202         /* Chip-specific fixup from Broadcom driver */
16203         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16204             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16205                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16206                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16207         }
16208
16209         /* Default fast path register access methods */
16210         tp->read32 = tg3_read32;
16211         tp->write32 = tg3_write32;
16212         tp->read32_mbox = tg3_read32;
16213         tp->write32_mbox = tg3_write32;
16214         tp->write32_tx_mbox = tg3_write32;
16215         tp->write32_rx_mbox = tg3_write32;
16216
16217         /* Various workaround register access methods */
16218         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16219                 tp->write32 = tg3_write_indirect_reg32;
16220         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16221                  (tg3_flag(tp, PCI_EXPRESS) &&
16222                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16223                 /*
16224                  * Back to back register writes can cause problems on these
16225                  * chips, the workaround is to read back all reg writes
16226                  * except those to mailbox regs.
16227                  *
16228                  * See tg3_write_indirect_reg32().
16229                  */
16230                 tp->write32 = tg3_write_flush_reg32;
16231         }
16232
16233         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16234                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16235                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16236                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16237         }
16238
16239         if (tg3_flag(tp, ICH_WORKAROUND)) {
16240                 tp->read32 = tg3_read_indirect_reg32;
16241                 tp->write32 = tg3_write_indirect_reg32;
16242                 tp->read32_mbox = tg3_read_indirect_mbox;
16243                 tp->write32_mbox = tg3_write_indirect_mbox;
16244                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16245                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16246
16247                 iounmap(tp->regs);
16248                 tp->regs = NULL;
16249
16250                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16251                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16252                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16253         }
16254         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16255                 tp->read32_mbox = tg3_read32_mbox_5906;
16256                 tp->write32_mbox = tg3_write32_mbox_5906;
16257                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16258                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16259         }
16260
16261         if (tp->write32 == tg3_write_indirect_reg32 ||
16262             (tg3_flag(tp, PCIX_MODE) &&
16263              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16264               tg3_asic_rev(tp) == ASIC_REV_5701)))
16265                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16266
16267         /* The memory arbiter has to be enabled in order for SRAM accesses
16268          * to succeed.  Normally on powerup the tg3 chip firmware will make
16269          * sure it is enabled, but other entities such as system netboot
16270          * code might disable it.
16271          */
16272         val = tr32(MEMARB_MODE);
16273         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16274
16275         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16276         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16277             tg3_flag(tp, 5780_CLASS)) {
16278                 if (tg3_flag(tp, PCIX_MODE)) {
16279                         pci_read_config_dword(tp->pdev,
16280                                               tp->pcix_cap + PCI_X_STATUS,
16281                                               &val);
16282                         tp->pci_fn = val & 0x7;
16283                 }
16284         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16285                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16286                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16287                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16288                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16289                         val = tr32(TG3_CPMU_STATUS);
16290
16291                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16292                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16293                 else
16294                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16295                                      TG3_CPMU_STATUS_FSHFT_5719;
16296         }
16297
16298         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16299                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16300                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16301         }
16302
16303         /* Get eeprom hw config before calling tg3_set_power_state().
16304          * In particular, the TG3_FLAG_IS_NIC flag must be
16305          * determined before calling tg3_set_power_state() so that
16306          * we know whether or not to switch out of Vaux power.
16307          * When the flag is set, it means that GPIO1 is used for eeprom
16308          * write protect and also implies that it is a LOM where GPIOs
16309          * are not used to switch power.
16310          */
16311         tg3_get_eeprom_hw_cfg(tp);
16312
16313         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16314                 tg3_flag_clear(tp, TSO_CAPABLE);
16315                 tg3_flag_clear(tp, TSO_BUG);
16316                 tp->fw_needed = NULL;
16317         }
16318
16319         if (tg3_flag(tp, ENABLE_APE)) {
16320                 /* Allow reads and writes to the
16321                  * APE register and memory space.
16322                  */
16323                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16324                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16325                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16326                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16327                                        pci_state_reg);
16328
16329                 tg3_ape_lock_init(tp);
16330         }
16331
16332         /* Set up tp->grc_local_ctrl before calling
16333          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16334          * will bring 5700's external PHY out of reset.
16335          * It is also used as eeprom write protect on LOMs.
16336          */
16337         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16338         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16339             tg3_flag(tp, EEPROM_WRITE_PROT))
16340                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16341                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16342         /* Unused GPIO3 must be driven as output on 5752 because there
16343          * are no pull-up resistors on unused GPIO pins.
16344          */
16345         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16346                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16347
16348         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16349             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16350             tg3_flag(tp, 57765_CLASS))
16351                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16352
16353         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16354             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16355                 /* Turn off the debug UART. */
16356                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16357                 if (tg3_flag(tp, IS_NIC))
16358                         /* Keep VMain power. */
16359                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16360                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16361         }
16362
16363         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16364                 tp->grc_local_ctrl |=
16365                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16366
16367         /* Switch out of Vaux if it is a NIC */
16368         tg3_pwrsrc_switch_to_vmain(tp);
16369
16370         /* Derive initial jumbo mode from MTU assigned in
16371          * ether_setup() via the alloc_etherdev() call
16372          */
16373         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16374                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16375
16376         /* Determine WakeOnLan speed to use. */
16377         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16378             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16379             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16380             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16381                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16382         } else {
16383                 tg3_flag_set(tp, WOL_SPEED_100MB);
16384         }
16385
16386         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16387                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16388
16389         /* A few boards don't want Ethernet@WireSpeed phy feature */
16390         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16391             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16392              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16393              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16394             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16395             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16396                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16397
16398         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16399             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16400                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16401         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16402                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16403
16404         if (tg3_flag(tp, 5705_PLUS) &&
16405             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16406             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16407             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16408             !tg3_flag(tp, 57765_PLUS)) {
16409                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16410                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16411                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16412                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16413                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16414                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16415                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16416                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16417                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16418                 } else
16419                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16420         }
16421
16422         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16423             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16424                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16425                 if (tp->phy_otp == 0)
16426                         tp->phy_otp = TG3_OTP_DEFAULT;
16427         }
16428
16429         if (tg3_flag(tp, CPMU_PRESENT))
16430                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16431         else
16432                 tp->mi_mode = MAC_MI_MODE_BASE;
16433
16434         tp->coalesce_mode = 0;
16435         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16436             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16437                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16438
16439         /* Set these bits to enable statistics workaround. */
16440         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16441             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16442             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16443                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16444                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16445         }
16446
16447         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16448             tg3_asic_rev(tp) == ASIC_REV_57780)
16449                 tg3_flag_set(tp, USE_PHYLIB);
16450
16451         err = tg3_mdio_init(tp);
16452         if (err)
16453                 return err;
16454
16455         /* Initialize data/descriptor byte/word swapping. */
16456         val = tr32(GRC_MODE);
16457         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16458             tg3_asic_rev(tp) == ASIC_REV_5762)
16459                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16460                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16461                         GRC_MODE_B2HRX_ENABLE |
16462                         GRC_MODE_HTX2B_ENABLE |
16463                         GRC_MODE_HOST_STACKUP);
16464         else
16465                 val &= GRC_MODE_HOST_STACKUP;
16466
16467         tw32(GRC_MODE, val | tp->grc_mode);
16468
16469         tg3_switch_clocks(tp);
16470
16471         /* Clear this out for sanity. */
16472         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16473
16474         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16475                               &pci_state_reg);
16476         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16477             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16478                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16479                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16480                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16481                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16482                         void __iomem *sram_base;
16483
16484                         /* Write some dummy words into the SRAM status block
16485                          * area, see if it reads back correctly.  If the return
16486                          * value is bad, force enable the PCIX workaround.
16487                          */
16488                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16489
16490                         writel(0x00000000, sram_base);
16491                         writel(0x00000000, sram_base + 4);
16492                         writel(0xffffffff, sram_base + 4);
16493                         if (readl(sram_base) != 0x00000000)
16494                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16495                 }
16496         }
16497
16498         udelay(50);
16499         tg3_nvram_init(tp);
16500
16501         /* If the device has an NVRAM, no need to load patch firmware */
16502         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16503             !tg3_flag(tp, NO_NVRAM))
16504                 tp->fw_needed = NULL;
16505
16506         grc_misc_cfg = tr32(GRC_MISC_CFG);
16507         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16508
16509         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16510             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16511              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16512                 tg3_flag_set(tp, IS_5788);
16513
16514         if (!tg3_flag(tp, IS_5788) &&
16515             tg3_asic_rev(tp) != ASIC_REV_5700)
16516                 tg3_flag_set(tp, TAGGED_STATUS);
16517         if (tg3_flag(tp, TAGGED_STATUS)) {
16518                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16519                                       HOSTCC_MODE_CLRTICK_TXBD);
16520
16521                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16522                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16523                                        tp->misc_host_ctrl);
16524         }
16525
16526         /* Preserve the APE MAC_MODE bits */
16527         if (tg3_flag(tp, ENABLE_APE))
16528                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16529         else
16530                 tp->mac_mode = 0;
16531
16532         if (tg3_10_100_only_device(tp, ent))
16533                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16534
16535         err = tg3_phy_probe(tp);
16536         if (err) {
16537                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16538                 /* ... but do not return immediately ... */
16539                 tg3_mdio_fini(tp);
16540         }
16541
16542         tg3_read_vpd(tp);
16543         tg3_read_fw_ver(tp);
16544
16545         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16546                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16547         } else {
16548                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16549                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16550                 else
16551                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16552         }
16553
16554         /* 5700 {AX,BX} chips have a broken status block link
16555          * change bit implementation, so we must use the
16556          * status register in those cases.
16557          */
16558         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16559                 tg3_flag_set(tp, USE_LINKCHG_REG);
16560         else
16561                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16562
16563         /* The led_ctrl is set during tg3_phy_probe, here we might
16564          * have to force the link status polling mechanism based
16565          * upon subsystem IDs.
16566          */
16567         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16568             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16569             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16570                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16571                 tg3_flag_set(tp, USE_LINKCHG_REG);
16572         }
16573
16574         /* For all SERDES we poll the MAC status register. */
16575         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16576                 tg3_flag_set(tp, POLL_SERDES);
16577         else
16578                 tg3_flag_clear(tp, POLL_SERDES);
16579
16580         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16581         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16582         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16583             tg3_flag(tp, PCIX_MODE)) {
16584                 tp->rx_offset = NET_SKB_PAD;
16585 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16586                 tp->rx_copy_thresh = ~(u16)0;
16587 #endif
16588         }
16589
16590         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16591         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16592         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16593
16594         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16595
16596         /* Increment the rx prod index on the rx std ring by at most
16597          * 8 for these chips to workaround hw errata.
16598          */
16599         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16600             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16601             tg3_asic_rev(tp) == ASIC_REV_5755)
16602                 tp->rx_std_max_post = 8;
16603
16604         if (tg3_flag(tp, ASPM_WORKAROUND))
16605                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16606                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16607
16608         return err;
16609 }
16610
16611 #ifdef CONFIG_SPARC
16612 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16613 {
16614         struct net_device *dev = tp->dev;
16615         struct pci_dev *pdev = tp->pdev;
16616         struct device_node *dp = pci_device_to_OF_node(pdev);
16617         const unsigned char *addr;
16618         int len;
16619
16620         addr = of_get_property(dp, "local-mac-address", &len);
16621         if (addr && len == 6) {
16622                 memcpy(dev->dev_addr, addr, 6);
16623                 return 0;
16624         }
16625         return -ENODEV;
16626 }
16627
16628 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16629 {
16630         struct net_device *dev = tp->dev;
16631
16632         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16633         return 0;
16634 }
16635 #endif
16636
16637 static int tg3_get_device_address(struct tg3 *tp)
16638 {
16639         struct net_device *dev = tp->dev;
16640         u32 hi, lo, mac_offset;
16641         int addr_ok = 0;
16642         int err;
16643
16644 #ifdef CONFIG_SPARC
16645         if (!tg3_get_macaddr_sparc(tp))
16646                 return 0;
16647 #endif
16648
16649         if (tg3_flag(tp, IS_SSB_CORE)) {
16650                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16651                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16652                         return 0;
16653         }
16654
16655         mac_offset = 0x7c;
16656         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16657             tg3_flag(tp, 5780_CLASS)) {
16658                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16659                         mac_offset = 0xcc;
16660                 if (tg3_nvram_lock(tp))
16661                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16662                 else
16663                         tg3_nvram_unlock(tp);
16664         } else if (tg3_flag(tp, 5717_PLUS)) {
16665                 if (tp->pci_fn & 1)
16666                         mac_offset = 0xcc;
16667                 if (tp->pci_fn > 1)
16668                         mac_offset += 0x18c;
16669         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16670                 mac_offset = 0x10;
16671
16672         /* First try to get it from MAC address mailbox. */
16673         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16674         if ((hi >> 16) == 0x484b) {
16675                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16676                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16677
16678                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16679                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16680                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16681                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16682                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16683
16684                 /* Some old bootcode may report a 0 MAC address in SRAM */
16685                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16686         }
16687         if (!addr_ok) {
16688                 /* Next, try NVRAM. */
16689                 if (!tg3_flag(tp, NO_NVRAM) &&
16690                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16691                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16692                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16693                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16694                 }
16695                 /* Finally just fetch it out of the MAC control regs. */
16696                 else {
16697                         hi = tr32(MAC_ADDR_0_HIGH);
16698                         lo = tr32(MAC_ADDR_0_LOW);
16699
16700                         dev->dev_addr[5] = lo & 0xff;
16701                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16702                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16703                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16704                         dev->dev_addr[1] = hi & 0xff;
16705                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16706                 }
16707         }
16708
16709         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16710 #ifdef CONFIG_SPARC
16711                 if (!tg3_get_default_macaddr_sparc(tp))
16712                         return 0;
16713 #endif
16714                 return -EINVAL;
16715         }
16716         return 0;
16717 }
16718
16719 #define BOUNDARY_SINGLE_CACHELINE       1
16720 #define BOUNDARY_MULTI_CACHELINE        2
16721
16722 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16723 {
16724         int cacheline_size;
16725         u8 byte;
16726         int goal;
16727
16728         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16729         if (byte == 0)
16730                 cacheline_size = 1024;
16731         else
16732                 cacheline_size = (int) byte * 4;
16733
16734         /* On 5703 and later chips, the boundary bits have no
16735          * effect.
16736          */
16737         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16738             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16739             !tg3_flag(tp, PCI_EXPRESS))
16740                 goto out;
16741
16742 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16743         goal = BOUNDARY_MULTI_CACHELINE;
16744 #else
16745 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16746         goal = BOUNDARY_SINGLE_CACHELINE;
16747 #else
16748         goal = 0;
16749 #endif
16750 #endif
16751
16752         if (tg3_flag(tp, 57765_PLUS)) {
16753                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16754                 goto out;
16755         }
16756
16757         if (!goal)
16758                 goto out;
16759
16760         /* PCI controllers on most RISC systems tend to disconnect
16761          * when a device tries to burst across a cache-line boundary.
16762          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16763          *
16764          * Unfortunately, for PCI-E there are only limited
16765          * write-side controls for this, and thus for reads
16766          * we will still get the disconnects.  We'll also waste
16767          * these PCI cycles for both read and write for chips
16768          * other than 5700 and 5701 which do not implement the
16769          * boundary bits.
16770          */
16771         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16772                 switch (cacheline_size) {
16773                 case 16:
16774                 case 32:
16775                 case 64:
16776                 case 128:
16777                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16778                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16779                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16780                         } else {
16781                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16782                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16783                         }
16784                         break;
16785
16786                 case 256:
16787                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16788                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16789                         break;
16790
16791                 default:
16792                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16793                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16794                         break;
16795                 }
16796         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16797                 switch (cacheline_size) {
16798                 case 16:
16799                 case 32:
16800                 case 64:
16801                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16802                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16803                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16804                                 break;
16805                         }
16806                         /* fallthrough */
16807                 case 128:
16808                 default:
16809                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16810                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16811                         break;
16812                 }
16813         } else {
16814                 switch (cacheline_size) {
16815                 case 16:
16816                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16817                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16818                                         DMA_RWCTRL_WRITE_BNDRY_16);
16819                                 break;
16820                         }
16821                         /* fallthrough */
16822                 case 32:
16823                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16824                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16825                                         DMA_RWCTRL_WRITE_BNDRY_32);
16826                                 break;
16827                         }
16828                         /* fallthrough */
16829                 case 64:
16830                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16831                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16832                                         DMA_RWCTRL_WRITE_BNDRY_64);
16833                                 break;
16834                         }
16835                         /* fallthrough */
16836                 case 128:
16837                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16838                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16839                                         DMA_RWCTRL_WRITE_BNDRY_128);
16840                                 break;
16841                         }
16842                         /* fallthrough */
16843                 case 256:
16844                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16845                                 DMA_RWCTRL_WRITE_BNDRY_256);
16846                         break;
16847                 case 512:
16848                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16849                                 DMA_RWCTRL_WRITE_BNDRY_512);
16850                         break;
16851                 case 1024:
16852                 default:
16853                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16854                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16855                         break;
16856                 }
16857         }
16858
16859 out:
16860         return val;
16861 }
16862
16863 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16864                            int size, bool to_device)
16865 {
16866         struct tg3_internal_buffer_desc test_desc;
16867         u32 sram_dma_descs;
16868         int i, ret;
16869
16870         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16871
16872         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16873         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16874         tw32(RDMAC_STATUS, 0);
16875         tw32(WDMAC_STATUS, 0);
16876
16877         tw32(BUFMGR_MODE, 0);
16878         tw32(FTQ_RESET, 0);
16879
16880         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16881         test_desc.addr_lo = buf_dma & 0xffffffff;
16882         test_desc.nic_mbuf = 0x00002100;
16883         test_desc.len = size;
16884
16885         /*
16886          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16887          * the *second* time the tg3 driver was getting loaded after an
16888          * initial scan.
16889          *
16890          * Broadcom tells me:
16891          *   ...the DMA engine is connected to the GRC block and a DMA
16892          *   reset may affect the GRC block in some unpredictable way...
16893          *   The behavior of resets to individual blocks has not been tested.
16894          *
16895          * Broadcom noted the GRC reset will also reset all sub-components.
16896          */
16897         if (to_device) {
16898                 test_desc.cqid_sqid = (13 << 8) | 2;
16899
16900                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16901                 udelay(40);
16902         } else {
16903                 test_desc.cqid_sqid = (16 << 8) | 7;
16904
16905                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16906                 udelay(40);
16907         }
16908         test_desc.flags = 0x00000005;
16909
16910         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16911                 u32 val;
16912
16913                 val = *(((u32 *)&test_desc) + i);
16914                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16915                                        sram_dma_descs + (i * sizeof(u32)));
16916                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16917         }
16918         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16919
16920         if (to_device)
16921                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16922         else
16923                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16924
16925         ret = -ENODEV;
16926         for (i = 0; i < 40; i++) {
16927                 u32 val;
16928
16929                 if (to_device)
16930                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16931                 else
16932                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16933                 if ((val & 0xffff) == sram_dma_descs) {
16934                         ret = 0;
16935                         break;
16936                 }
16937
16938                 udelay(100);
16939         }
16940
16941         return ret;
16942 }
16943
16944 #define TEST_BUFFER_SIZE        0x2000
16945
16946 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16947         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16948         { },
16949 };
16950
16951 static int tg3_test_dma(struct tg3 *tp)
16952 {
16953         dma_addr_t buf_dma;
16954         u32 *buf, saved_dma_rwctrl;
16955         int ret = 0;
16956
16957         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16958                                  &buf_dma, GFP_KERNEL);
16959         if (!buf) {
16960                 ret = -ENOMEM;
16961                 goto out_nofree;
16962         }
16963
16964         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16965                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16966
16967         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16968
16969         if (tg3_flag(tp, 57765_PLUS))
16970                 goto out;
16971
16972         if (tg3_flag(tp, PCI_EXPRESS)) {
16973                 /* DMA read watermark not used on PCIE */
16974                 tp->dma_rwctrl |= 0x00180000;
16975         } else if (!tg3_flag(tp, PCIX_MODE)) {
16976                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16977                     tg3_asic_rev(tp) == ASIC_REV_5750)
16978                         tp->dma_rwctrl |= 0x003f0000;
16979                 else
16980                         tp->dma_rwctrl |= 0x003f000f;
16981         } else {
16982                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16983                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16984                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16985                         u32 read_water = 0x7;
16986
16987                         /* If the 5704 is behind the EPB bridge, we can
16988                          * do the less restrictive ONE_DMA workaround for
16989                          * better performance.
16990                          */
16991                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16992                             tg3_asic_rev(tp) == ASIC_REV_5704)
16993                                 tp->dma_rwctrl |= 0x8000;
16994                         else if (ccval == 0x6 || ccval == 0x7)
16995                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16996
16997                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16998                                 read_water = 4;
16999                         /* Set bit 23 to enable PCIX hw bug fix */
17000                         tp->dma_rwctrl |=
17001                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17002                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17003                                 (1 << 23);
17004                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17005                         /* 5780 always in PCIX mode */
17006                         tp->dma_rwctrl |= 0x00144000;
17007                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17008                         /* 5714 always in PCIX mode */
17009                         tp->dma_rwctrl |= 0x00148000;
17010                 } else {
17011                         tp->dma_rwctrl |= 0x001b000f;
17012                 }
17013         }
17014         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17015                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17016
17017         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17018             tg3_asic_rev(tp) == ASIC_REV_5704)
17019                 tp->dma_rwctrl &= 0xfffffff0;
17020
17021         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17022             tg3_asic_rev(tp) == ASIC_REV_5701) {
17023                 /* Remove this if it causes problems for some boards. */
17024                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17025
17026                 /* On 5700/5701 chips, we need to set this bit.
17027                  * Otherwise the chip will issue cacheline transactions
17028                  * to streamable DMA memory with not all the byte
17029                  * enables turned on.  This is an error on several
17030                  * RISC PCI controllers, in particular sparc64.
17031                  *
17032                  * On 5703/5704 chips, this bit has been reassigned
17033                  * a different meaning.  In particular, it is used
17034                  * on those chips to enable a PCI-X workaround.
17035                  */
17036                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17037         }
17038
17039         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17040
17041 #if 0
17042         /* Unneeded, already done by tg3_get_invariants.  */
17043         tg3_switch_clocks(tp);
17044 #endif
17045
17046         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17047             tg3_asic_rev(tp) != ASIC_REV_5701)
17048                 goto out;
17049
17050         /* It is best to perform DMA test with maximum write burst size
17051          * to expose the 5700/5701 write DMA bug.
17052          */
17053         saved_dma_rwctrl = tp->dma_rwctrl;
17054         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17055         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17056
17057         while (1) {
17058                 u32 *p = buf, i;
17059
17060                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17061                         p[i] = i;
17062
17063                 /* Send the buffer to the chip. */
17064                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17065                 if (ret) {
17066                         dev_err(&tp->pdev->dev,
17067                                 "%s: Buffer write failed. err = %d\n",
17068                                 __func__, ret);
17069                         break;
17070                 }
17071
17072 #if 0
17073                 /* validate data reached card RAM correctly. */
17074                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17075                         u32 val;
17076                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
17077                         if (le32_to_cpu(val) != p[i]) {
17078                                 dev_err(&tp->pdev->dev,
17079                                         "%s: Buffer corrupted on device! "
17080                                         "(%d != %d)\n", __func__, val, i);
17081                                 /* ret = -ENODEV here? */
17082                         }
17083                         p[i] = 0;
17084                 }
17085 #endif
17086                 /* Now read it back. */
17087                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17088                 if (ret) {
17089                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17090                                 "err = %d\n", __func__, ret);
17091                         break;
17092                 }
17093
17094                 /* Verify it. */
17095                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17096                         if (p[i] == i)
17097                                 continue;
17098
17099                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17100                             DMA_RWCTRL_WRITE_BNDRY_16) {
17101                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17102                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17103                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17104                                 break;
17105                         } else {
17106                                 dev_err(&tp->pdev->dev,
17107                                         "%s: Buffer corrupted on read back! "
17108                                         "(%d != %d)\n", __func__, p[i], i);
17109                                 ret = -ENODEV;
17110                                 goto out;
17111                         }
17112                 }
17113
17114                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17115                         /* Success. */
17116                         ret = 0;
17117                         break;
17118                 }
17119         }
17120         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17121             DMA_RWCTRL_WRITE_BNDRY_16) {
17122                 /* DMA test passed without adjusting DMA boundary,
17123                  * now look for chipsets that are known to expose the
17124                  * DMA bug without failing the test.
17125                  */
17126                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17127                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17128                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17129                 } else {
17130                         /* Safe to use the calculated DMA boundary. */
17131                         tp->dma_rwctrl = saved_dma_rwctrl;
17132                 }
17133
17134                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17135         }
17136
17137 out:
17138         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17139 out_nofree:
17140         return ret;
17141 }
17142
17143 static void tg3_init_bufmgr_config(struct tg3 *tp)
17144 {
17145         if (tg3_flag(tp, 57765_PLUS)) {
17146                 tp->bufmgr_config.mbuf_read_dma_low_water =
17147                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17148                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17149                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17150                 tp->bufmgr_config.mbuf_high_water =
17151                         DEFAULT_MB_HIGH_WATER_57765;
17152
17153                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17154                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17155                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17156                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17157                 tp->bufmgr_config.mbuf_high_water_jumbo =
17158                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17159         } else if (tg3_flag(tp, 5705_PLUS)) {
17160                 tp->bufmgr_config.mbuf_read_dma_low_water =
17161                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17162                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17163                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17164                 tp->bufmgr_config.mbuf_high_water =
17165                         DEFAULT_MB_HIGH_WATER_5705;
17166                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17167                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17168                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17169                         tp->bufmgr_config.mbuf_high_water =
17170                                 DEFAULT_MB_HIGH_WATER_5906;
17171                 }
17172
17173                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17174                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17175                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17176                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17177                 tp->bufmgr_config.mbuf_high_water_jumbo =
17178                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17179         } else {
17180                 tp->bufmgr_config.mbuf_read_dma_low_water =
17181                         DEFAULT_MB_RDMA_LOW_WATER;
17182                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17183                         DEFAULT_MB_MACRX_LOW_WATER;
17184                 tp->bufmgr_config.mbuf_high_water =
17185                         DEFAULT_MB_HIGH_WATER;
17186
17187                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17188                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17189                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17190                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17191                 tp->bufmgr_config.mbuf_high_water_jumbo =
17192                         DEFAULT_MB_HIGH_WATER_JUMBO;
17193         }
17194
17195         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17196         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17197 }
17198
17199 static char *tg3_phy_string(struct tg3 *tp)
17200 {
17201         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17202         case TG3_PHY_ID_BCM5400:        return "5400";
17203         case TG3_PHY_ID_BCM5401:        return "5401";
17204         case TG3_PHY_ID_BCM5411:        return "5411";
17205         case TG3_PHY_ID_BCM5701:        return "5701";
17206         case TG3_PHY_ID_BCM5703:        return "5703";
17207         case TG3_PHY_ID_BCM5704:        return "5704";
17208         case TG3_PHY_ID_BCM5705:        return "5705";
17209         case TG3_PHY_ID_BCM5750:        return "5750";
17210         case TG3_PHY_ID_BCM5752:        return "5752";
17211         case TG3_PHY_ID_BCM5714:        return "5714";
17212         case TG3_PHY_ID_BCM5780:        return "5780";
17213         case TG3_PHY_ID_BCM5755:        return "5755";
17214         case TG3_PHY_ID_BCM5787:        return "5787";
17215         case TG3_PHY_ID_BCM5784:        return "5784";
17216         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17217         case TG3_PHY_ID_BCM5906:        return "5906";
17218         case TG3_PHY_ID_BCM5761:        return "5761";
17219         case TG3_PHY_ID_BCM5718C:       return "5718C";
17220         case TG3_PHY_ID_BCM5718S:       return "5718S";
17221         case TG3_PHY_ID_BCM57765:       return "57765";
17222         case TG3_PHY_ID_BCM5719C:       return "5719C";
17223         case TG3_PHY_ID_BCM5720C:       return "5720C";
17224         case TG3_PHY_ID_BCM5762:        return "5762C";
17225         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17226         case 0:                 return "serdes";
17227         default:                return "unknown";
17228         }
17229 }
17230
17231 static char *tg3_bus_string(struct tg3 *tp, char *str)
17232 {
17233         if (tg3_flag(tp, PCI_EXPRESS)) {
17234                 strcpy(str, "PCI Express");
17235                 return str;
17236         } else if (tg3_flag(tp, PCIX_MODE)) {
17237                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17238
17239                 strcpy(str, "PCIX:");
17240
17241                 if ((clock_ctrl == 7) ||
17242                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17243                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17244                         strcat(str, "133MHz");
17245                 else if (clock_ctrl == 0)
17246                         strcat(str, "33MHz");
17247                 else if (clock_ctrl == 2)
17248                         strcat(str, "50MHz");
17249                 else if (clock_ctrl == 4)
17250                         strcat(str, "66MHz");
17251                 else if (clock_ctrl == 6)
17252                         strcat(str, "100MHz");
17253         } else {
17254                 strcpy(str, "PCI:");
17255                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17256                         strcat(str, "66MHz");
17257                 else
17258                         strcat(str, "33MHz");
17259         }
17260         if (tg3_flag(tp, PCI_32BIT))
17261                 strcat(str, ":32-bit");
17262         else
17263                 strcat(str, ":64-bit");
17264         return str;
17265 }
17266
17267 static void tg3_init_coal(struct tg3 *tp)
17268 {
17269         struct ethtool_coalesce *ec = &tp->coal;
17270
17271         memset(ec, 0, sizeof(*ec));
17272         ec->cmd = ETHTOOL_GCOALESCE;
17273         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17274         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17275         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17276         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17277         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17278         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17279         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17280         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17281         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17282
17283         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17284                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17285                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17286                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17287                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17288                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17289         }
17290
17291         if (tg3_flag(tp, 5705_PLUS)) {
17292                 ec->rx_coalesce_usecs_irq = 0;
17293                 ec->tx_coalesce_usecs_irq = 0;
17294                 ec->stats_block_coalesce_usecs = 0;
17295         }
17296 }
17297
17298 static int tg3_init_one(struct pci_dev *pdev,
17299                                   const struct pci_device_id *ent)
17300 {
17301         struct net_device *dev;
17302         struct tg3 *tp;
17303         int i, err;
17304         u32 sndmbx, rcvmbx, intmbx;
17305         char str[40];
17306         u64 dma_mask, persist_dma_mask;
17307         netdev_features_t features = 0;
17308
17309         printk_once(KERN_INFO "%s\n", version);
17310
17311         err = pci_enable_device(pdev);
17312         if (err) {
17313                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17314                 return err;
17315         }
17316
17317         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17318         if (err) {
17319                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17320                 goto err_out_disable_pdev;
17321         }
17322
17323         pci_set_master(pdev);
17324
17325         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17326         if (!dev) {
17327                 err = -ENOMEM;
17328                 goto err_out_free_res;
17329         }
17330
17331         SET_NETDEV_DEV(dev, &pdev->dev);
17332
17333         tp = netdev_priv(dev);
17334         tp->pdev = pdev;
17335         tp->dev = dev;
17336         tp->pm_cap = pdev->pm_cap;
17337         tp->rx_mode = TG3_DEF_RX_MODE;
17338         tp->tx_mode = TG3_DEF_TX_MODE;
17339         tp->irq_sync = 1;
17340
17341         if (tg3_debug > 0)
17342                 tp->msg_enable = tg3_debug;
17343         else
17344                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17345
17346         if (pdev_is_ssb_gige_core(pdev)) {
17347                 tg3_flag_set(tp, IS_SSB_CORE);
17348                 if (ssb_gige_must_flush_posted_writes(pdev))
17349                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17350                 if (ssb_gige_one_dma_at_once(pdev))
17351                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17352                 if (ssb_gige_have_roboswitch(pdev))
17353                         tg3_flag_set(tp, ROBOSWITCH);
17354                 if (ssb_gige_is_rgmii(pdev))
17355                         tg3_flag_set(tp, RGMII_MODE);
17356         }
17357
17358         /* The word/byte swap controls here control register access byte
17359          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17360          * setting below.
17361          */
17362         tp->misc_host_ctrl =
17363                 MISC_HOST_CTRL_MASK_PCI_INT |
17364                 MISC_HOST_CTRL_WORD_SWAP |
17365                 MISC_HOST_CTRL_INDIR_ACCESS |
17366                 MISC_HOST_CTRL_PCISTATE_RW;
17367
17368         /* The NONFRM (non-frame) byte/word swap controls take effect
17369          * on descriptor entries, anything which isn't packet data.
17370          *
17371          * The StrongARM chips on the board (one for tx, one for rx)
17372          * are running in big-endian mode.
17373          */
17374         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17375                         GRC_MODE_WSWAP_NONFRM_DATA);
17376 #ifdef __BIG_ENDIAN
17377         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17378 #endif
17379         spin_lock_init(&tp->lock);
17380         spin_lock_init(&tp->indirect_lock);
17381         INIT_WORK(&tp->reset_task, tg3_reset_task);
17382
17383         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17384         if (!tp->regs) {
17385                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17386                 err = -ENOMEM;
17387                 goto err_out_free_dev;
17388         }
17389
17390         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17391             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17392             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17393             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17394             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17395             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17396             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17397             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17398             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17399             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17400             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17401             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17402                 tg3_flag_set(tp, ENABLE_APE);
17403                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17404                 if (!tp->aperegs) {
17405                         dev_err(&pdev->dev,
17406                                 "Cannot map APE registers, aborting\n");
17407                         err = -ENOMEM;
17408                         goto err_out_iounmap;
17409                 }
17410         }
17411
17412         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17413         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17414
17415         dev->ethtool_ops = &tg3_ethtool_ops;
17416         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17417         dev->netdev_ops = &tg3_netdev_ops;
17418         dev->irq = pdev->irq;
17419
17420         err = tg3_get_invariants(tp, ent);
17421         if (err) {
17422                 dev_err(&pdev->dev,
17423                         "Problem fetching invariants of chip, aborting\n");
17424                 goto err_out_apeunmap;
17425         }
17426
17427         /* The EPB bridge inside 5714, 5715, and 5780 and any
17428          * device behind the EPB cannot support DMA addresses > 40-bit.
17429          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17430          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17431          * do DMA address check in tg3_start_xmit().
17432          */
17433         if (tg3_flag(tp, IS_5788))
17434                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17435         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17436                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17437 #ifdef CONFIG_HIGHMEM
17438                 dma_mask = DMA_BIT_MASK(64);
17439 #endif
17440         } else
17441                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17442
17443         /* Configure DMA attributes. */
17444         if (dma_mask > DMA_BIT_MASK(32)) {
17445                 err = pci_set_dma_mask(pdev, dma_mask);
17446                 if (!err) {
17447                         features |= NETIF_F_HIGHDMA;
17448                         err = pci_set_consistent_dma_mask(pdev,
17449                                                           persist_dma_mask);
17450                         if (err < 0) {
17451                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17452                                         "DMA for consistent allocations\n");
17453                                 goto err_out_apeunmap;
17454                         }
17455                 }
17456         }
17457         if (err || dma_mask == DMA_BIT_MASK(32)) {
17458                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17459                 if (err) {
17460                         dev_err(&pdev->dev,
17461                                 "No usable DMA configuration, aborting\n");
17462                         goto err_out_apeunmap;
17463                 }
17464         }
17465
17466         tg3_init_bufmgr_config(tp);
17467
17468         features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17469
17470         /* 5700 B0 chips do not support checksumming correctly due
17471          * to hardware bugs.
17472          */
17473         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17474                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17475
17476                 if (tg3_flag(tp, 5755_PLUS))
17477                         features |= NETIF_F_IPV6_CSUM;
17478         }
17479
17480         /* TSO is on by default on chips that support hardware TSO.
17481          * Firmware TSO on older chips gives lower performance, so it
17482          * is off by default, but can be enabled using ethtool.
17483          */
17484         if ((tg3_flag(tp, HW_TSO_1) ||
17485              tg3_flag(tp, HW_TSO_2) ||
17486              tg3_flag(tp, HW_TSO_3)) &&
17487             (features & NETIF_F_IP_CSUM))
17488                 features |= NETIF_F_TSO;
17489         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17490                 if (features & NETIF_F_IPV6_CSUM)
17491                         features |= NETIF_F_TSO6;
17492                 if (tg3_flag(tp, HW_TSO_3) ||
17493                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17494                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17495                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17496                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17497                     tg3_asic_rev(tp) == ASIC_REV_57780)
17498                         features |= NETIF_F_TSO_ECN;
17499         }
17500
17501         dev->features |= features;
17502         dev->vlan_features |= features;
17503
17504         /*
17505          * Add loopback capability only for a subset of devices that support
17506          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17507          * loopback for the remaining devices.
17508          */
17509         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17510             !tg3_flag(tp, CPMU_PRESENT))
17511                 /* Add the loopback capability */
17512                 features |= NETIF_F_LOOPBACK;
17513
17514         dev->hw_features |= features;
17515
17516         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17517             !tg3_flag(tp, TSO_CAPABLE) &&
17518             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17519                 tg3_flag_set(tp, MAX_RXPEND_64);
17520                 tp->rx_pending = 63;
17521         }
17522
17523         err = tg3_get_device_address(tp);
17524         if (err) {
17525                 dev_err(&pdev->dev,
17526                         "Could not obtain valid ethernet address, aborting\n");
17527                 goto err_out_apeunmap;
17528         }
17529
17530         /*
17531          * Reset chip in case UNDI or EFI driver did not shutdown
17532          * DMA self test will enable WDMAC and we'll see (spurious)
17533          * pending DMA on the PCI bus at that point.
17534          */
17535         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17536             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17537                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17538                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17539         }
17540
17541         err = tg3_test_dma(tp);
17542         if (err) {
17543                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17544                 goto err_out_apeunmap;
17545         }
17546
17547         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17548         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17549         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17550         for (i = 0; i < tp->irq_max; i++) {
17551                 struct tg3_napi *tnapi = &tp->napi[i];
17552
17553                 tnapi->tp = tp;
17554                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17555
17556                 tnapi->int_mbox = intmbx;
17557                 if (i <= 4)
17558                         intmbx += 0x8;
17559                 else
17560                         intmbx += 0x4;
17561
17562                 tnapi->consmbox = rcvmbx;
17563                 tnapi->prodmbox = sndmbx;
17564
17565                 if (i)
17566                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17567                 else
17568                         tnapi->coal_now = HOSTCC_MODE_NOW;
17569
17570                 if (!tg3_flag(tp, SUPPORT_MSIX))
17571                         break;
17572
17573                 /*
17574                  * If we support MSIX, we'll be using RSS.  If we're using
17575                  * RSS, the first vector only handles link interrupts and the
17576                  * remaining vectors handle rx and tx interrupts.  Reuse the
17577                  * mailbox values for the next iteration.  The values we setup
17578                  * above are still useful for the single vectored mode.
17579                  */
17580                 if (!i)
17581                         continue;
17582
17583                 rcvmbx += 0x8;
17584
17585                 if (sndmbx & 0x4)
17586                         sndmbx -= 0x4;
17587                 else
17588                         sndmbx += 0xc;
17589         }
17590
17591         tg3_init_coal(tp);
17592
17593         pci_set_drvdata(pdev, dev);
17594
17595         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17596             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17597             tg3_asic_rev(tp) == ASIC_REV_5762)
17598                 tg3_flag_set(tp, PTP_CAPABLE);
17599
17600         tg3_timer_init(tp);
17601
17602         tg3_carrier_off(tp);
17603
17604         err = register_netdev(dev);
17605         if (err) {
17606                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17607                 goto err_out_apeunmap;
17608         }
17609
17610         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17611                     tp->board_part_number,
17612                     tg3_chip_rev_id(tp),
17613                     tg3_bus_string(tp, str),
17614                     dev->dev_addr);
17615
17616         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17617                 struct phy_device *phydev;
17618                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17619                 netdev_info(dev,
17620                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17621                             phydev->drv->name, dev_name(&phydev->dev));
17622         } else {
17623                 char *ethtype;
17624
17625                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17626                         ethtype = "10/100Base-TX";
17627                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17628                         ethtype = "1000Base-SX";
17629                 else
17630                         ethtype = "10/100/1000Base-T";
17631
17632                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17633                             "(WireSpeed[%d], EEE[%d])\n",
17634                             tg3_phy_string(tp), ethtype,
17635                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17636                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17637         }
17638
17639         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17640                     (dev->features & NETIF_F_RXCSUM) != 0,
17641                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17642                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17643                     tg3_flag(tp, ENABLE_ASF) != 0,
17644                     tg3_flag(tp, TSO_CAPABLE) != 0);
17645         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17646                     tp->dma_rwctrl,
17647                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17648                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17649
17650         pci_save_state(pdev);
17651
17652         return 0;
17653
17654 err_out_apeunmap:
17655         if (tp->aperegs) {
17656                 iounmap(tp->aperegs);
17657                 tp->aperegs = NULL;
17658         }
17659
17660 err_out_iounmap:
17661         if (tp->regs) {
17662                 iounmap(tp->regs);
17663                 tp->regs = NULL;
17664         }
17665
17666 err_out_free_dev:
17667         free_netdev(dev);
17668
17669 err_out_free_res:
17670         pci_release_regions(pdev);
17671
17672 err_out_disable_pdev:
17673         if (pci_is_enabled(pdev))
17674                 pci_disable_device(pdev);
17675         pci_set_drvdata(pdev, NULL);
17676         return err;
17677 }
17678
17679 static void tg3_remove_one(struct pci_dev *pdev)
17680 {
17681         struct net_device *dev = pci_get_drvdata(pdev);
17682
17683         if (dev) {
17684                 struct tg3 *tp = netdev_priv(dev);
17685
17686                 release_firmware(tp->fw);
17687
17688                 tg3_reset_task_cancel(tp);
17689
17690                 if (tg3_flag(tp, USE_PHYLIB)) {
17691                         tg3_phy_fini(tp);
17692                         tg3_mdio_fini(tp);
17693                 }
17694
17695                 unregister_netdev(dev);
17696                 if (tp->aperegs) {
17697                         iounmap(tp->aperegs);
17698                         tp->aperegs = NULL;
17699                 }
17700                 if (tp->regs) {
17701                         iounmap(tp->regs);
17702                         tp->regs = NULL;
17703                 }
17704                 free_netdev(dev);
17705                 pci_release_regions(pdev);
17706                 pci_disable_device(pdev);
17707                 pci_set_drvdata(pdev, NULL);
17708         }
17709 }
17710
17711 #ifdef CONFIG_PM_SLEEP
17712 static int tg3_suspend(struct device *device)
17713 {
17714         struct pci_dev *pdev = to_pci_dev(device);
17715         struct net_device *dev = pci_get_drvdata(pdev);
17716         struct tg3 *tp = netdev_priv(dev);
17717         int err;
17718
17719         if (!netif_running(dev))
17720                 return 0;
17721
17722         tg3_reset_task_cancel(tp);
17723         tg3_phy_stop(tp);
17724         tg3_netif_stop(tp);
17725
17726         tg3_timer_stop(tp);
17727
17728         tg3_full_lock(tp, 1);
17729         tg3_disable_ints(tp);
17730         tg3_full_unlock(tp);
17731
17732         netif_device_detach(dev);
17733
17734         tg3_full_lock(tp, 0);
17735         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17736         tg3_flag_clear(tp, INIT_COMPLETE);
17737         tg3_full_unlock(tp);
17738
17739         err = tg3_power_down_prepare(tp);
17740         if (err) {
17741                 int err2;
17742
17743                 tg3_full_lock(tp, 0);
17744
17745                 tg3_flag_set(tp, INIT_COMPLETE);
17746                 err2 = tg3_restart_hw(tp, true);
17747                 if (err2)
17748                         goto out;
17749
17750                 tg3_timer_start(tp);
17751
17752                 netif_device_attach(dev);
17753                 tg3_netif_start(tp);
17754
17755 out:
17756                 tg3_full_unlock(tp);
17757
17758                 if (!err2)
17759                         tg3_phy_start(tp);
17760         }
17761
17762         return err;
17763 }
17764
17765 static int tg3_resume(struct device *device)
17766 {
17767         struct pci_dev *pdev = to_pci_dev(device);
17768         struct net_device *dev = pci_get_drvdata(pdev);
17769         struct tg3 *tp = netdev_priv(dev);
17770         int err;
17771
17772         if (!netif_running(dev))
17773                 return 0;
17774
17775         netif_device_attach(dev);
17776
17777         tg3_full_lock(tp, 0);
17778
17779         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17780
17781         tg3_flag_set(tp, INIT_COMPLETE);
17782         err = tg3_restart_hw(tp,
17783                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17784         if (err)
17785                 goto out;
17786
17787         tg3_timer_start(tp);
17788
17789         tg3_netif_start(tp);
17790
17791 out:
17792         tg3_full_unlock(tp);
17793
17794         if (!err)
17795                 tg3_phy_start(tp);
17796
17797         return err;
17798 }
17799 #endif /* CONFIG_PM_SLEEP */
17800
17801 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17802
17803 static void tg3_shutdown(struct pci_dev *pdev)
17804 {
17805         struct net_device *dev = pci_get_drvdata(pdev);
17806         struct tg3 *tp = netdev_priv(dev);
17807
17808         rtnl_lock();
17809         netif_device_detach(dev);
17810
17811         if (netif_running(dev))
17812                 dev_close(dev);
17813
17814         if (system_state == SYSTEM_POWER_OFF)
17815                 tg3_power_down(tp);
17816
17817         rtnl_unlock();
17818 }
17819
17820 /**
17821  * tg3_io_error_detected - called when PCI error is detected
17822  * @pdev: Pointer to PCI device
17823  * @state: The current pci connection state
17824  *
17825  * This function is called after a PCI bus error affecting
17826  * this device has been detected.
17827  */
17828 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17829                                               pci_channel_state_t state)
17830 {
17831         struct net_device *netdev = pci_get_drvdata(pdev);
17832         struct tg3 *tp = netdev_priv(netdev);
17833         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17834
17835         netdev_info(netdev, "PCI I/O error detected\n");
17836
17837         rtnl_lock();
17838
17839         /* We probably don't have netdev yet */
17840         if (!netdev || !netif_running(netdev))
17841                 goto done;
17842
17843         tg3_phy_stop(tp);
17844
17845         tg3_netif_stop(tp);
17846
17847         tg3_timer_stop(tp);
17848
17849         /* Want to make sure that the reset task doesn't run */
17850         tg3_reset_task_cancel(tp);
17851
17852         netif_device_detach(netdev);
17853
17854         /* Clean up software state, even if MMIO is blocked */
17855         tg3_full_lock(tp, 0);
17856         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17857         tg3_full_unlock(tp);
17858
17859 done:
17860         if (state == pci_channel_io_perm_failure) {
17861                 if (netdev) {
17862                         tg3_napi_enable(tp);
17863                         dev_close(netdev);
17864                 }
17865                 err = PCI_ERS_RESULT_DISCONNECT;
17866         } else {
17867                 pci_disable_device(pdev);
17868         }
17869
17870         rtnl_unlock();
17871
17872         return err;
17873 }
17874
17875 /**
17876  * tg3_io_slot_reset - called after the pci bus has been reset.
17877  * @pdev: Pointer to PCI device
17878  *
17879  * Restart the card from scratch, as if from a cold-boot.
17880  * At this point, the card has exprienced a hard reset,
17881  * followed by fixups by BIOS, and has its config space
17882  * set up identically to what it was at cold boot.
17883  */
17884 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17885 {
17886         struct net_device *netdev = pci_get_drvdata(pdev);
17887         struct tg3 *tp = netdev_priv(netdev);
17888         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17889         int err;
17890
17891         rtnl_lock();
17892
17893         if (pci_enable_device(pdev)) {
17894                 dev_err(&pdev->dev,
17895                         "Cannot re-enable PCI device after reset.\n");
17896                 goto done;
17897         }
17898
17899         pci_set_master(pdev);
17900         pci_restore_state(pdev);
17901         pci_save_state(pdev);
17902
17903         if (!netdev || !netif_running(netdev)) {
17904                 rc = PCI_ERS_RESULT_RECOVERED;
17905                 goto done;
17906         }
17907
17908         err = tg3_power_up(tp);
17909         if (err)
17910                 goto done;
17911
17912         rc = PCI_ERS_RESULT_RECOVERED;
17913
17914 done:
17915         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17916                 tg3_napi_enable(tp);
17917                 dev_close(netdev);
17918         }
17919         rtnl_unlock();
17920
17921         return rc;
17922 }
17923
17924 /**
17925  * tg3_io_resume - called when traffic can start flowing again.
17926  * @pdev: Pointer to PCI device
17927  *
17928  * This callback is called when the error recovery driver tells
17929  * us that its OK to resume normal operation.
17930  */
17931 static void tg3_io_resume(struct pci_dev *pdev)
17932 {
17933         struct net_device *netdev = pci_get_drvdata(pdev);
17934         struct tg3 *tp = netdev_priv(netdev);
17935         int err;
17936
17937         rtnl_lock();
17938
17939         if (!netif_running(netdev))
17940                 goto done;
17941
17942         tg3_full_lock(tp, 0);
17943         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17944         tg3_flag_set(tp, INIT_COMPLETE);
17945         err = tg3_restart_hw(tp, true);
17946         if (err) {
17947                 tg3_full_unlock(tp);
17948                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17949                 goto done;
17950         }
17951
17952         netif_device_attach(netdev);
17953
17954         tg3_timer_start(tp);
17955
17956         tg3_netif_start(tp);
17957
17958         tg3_full_unlock(tp);
17959
17960         tg3_phy_start(tp);
17961
17962 done:
17963         rtnl_unlock();
17964 }
17965
17966 static const struct pci_error_handlers tg3_err_handler = {
17967         .error_detected = tg3_io_error_detected,
17968         .slot_reset     = tg3_io_slot_reset,
17969         .resume         = tg3_io_resume
17970 };
17971
17972 static struct pci_driver tg3_driver = {
17973         .name           = DRV_MODULE_NAME,
17974         .id_table       = tg3_pci_tbl,
17975         .probe          = tg3_init_one,
17976         .remove         = tg3_remove_one,
17977         .err_handler    = &tg3_err_handler,
17978         .driver.pm      = &tg3_pm_ops,
17979         .shutdown       = tg3_shutdown,
17980 };
17981
17982 module_pci_driver(tg3_driver);