2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 133
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Jul 29, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
747 if (pci_channel_offline(tp->pdev))
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
766 if (!tg3_flag(tp, ENABLE_APE))
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
776 bit = APE_LOCK_GRANT_DRIVER;
778 bit = 1 << tp->pci_fn;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
793 gnt = TG3_APE_PER_LOCK_GRANT;
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
816 return timeout_us ? 0 : -EBUSY;
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
823 for (i = 0; i < timeout_us / 10; i++) {
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
826 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832 return i == timeout_us / 10;
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
839 u32 i, bufoff, msgoff, maxlen, apedata;
841 if (!tg3_flag(tp, APE_HAS_NCSI))
844 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 if (apedata != APE_SEG_SIG_MAGIC)
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
852 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
854 msgoff = bufoff + 2 * sizeof(u32);
855 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
860 /* Cap xfer sizes to scratchpad limits. */
861 length = (len > maxlen) ? maxlen : len;
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err = tg3_ape_event_lock(tp, 1000);
873 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 APE_EVENT_STATUS_SCRTCHPD_READ |
875 APE_EVENT_STATUS_EVENT_PENDING;
876 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
878 tg3_ape_write32(tp, bufoff, base_off);
879 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
881 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
886 if (tg3_ape_wait_for_event(tp, 30000))
889 for (i = 0; length; i += 4, length -= 4) {
890 u32 val = tg3_ape_read32(tp, msgoff + i);
891 memcpy(data, &val, sizeof(u32));
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 if (apedata != APE_SEG_SIG_MAGIC)
908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 if (!(apedata & APE_FW_STATUS_READY))
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err = tg3_ape_event_lock(tp, 1000);
917 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 event | APE_EVENT_STATUS_EVENT_PENDING);
920 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
931 if (!tg3_flag(tp, ENABLE_APE))
935 case RESET_KIND_INIT:
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 APE_HOST_SEG_SIG_MAGIC);
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 APE_HOST_SEG_LEN_MAGIC);
940 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 APE_HOST_BEHAV_NO_PHYLOCK);
946 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 TG3_APE_HOST_DRVR_STATE_START);
949 event = APE_EVENT_STATUS_STATE_START;
951 case RESET_KIND_SHUTDOWN:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
959 if (device_may_wakeup(&tp->pdev->dev) &&
960 tg3_flag(tp, WOL_ENABLE)) {
961 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 TG3_APE_HOST_WOL_SPEED_AUTO);
963 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
965 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
969 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1638 if (pci_channel_offline(tp->pdev))
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1651 if (!tg3_readphy(tp, MII_BMCR, ®))
1653 if (!tg3_readphy(tp, MII_BMSR, ®))
1654 val |= (reg & 0xffff);
1658 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1660 if (!tg3_readphy(tp, MII_LPA, ®))
1661 val |= (reg & 0xffff);
1665 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1666 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1668 if (!tg3_readphy(tp, MII_STAT1000, ®))
1669 val |= (reg & 0xffff);
1673 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3 *tp)
1685 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1688 tg3_phy_gather_ump_data(tp, data);
1690 tg3_wait_for_event_ack(tp);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1699 tg3_generate_fw_event(tp);
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3 *tp)
1705 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1706 /* Wait for RX cpu to ACK the previous event. */
1707 tg3_wait_for_event_ack(tp);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1711 tg3_generate_fw_event(tp);
1713 /* Wait for RX cpu to ACK this event. */
1714 tg3_wait_for_event_ack(tp);
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1721 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1722 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1724 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1726 case RESET_KIND_INIT:
1727 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1731 case RESET_KIND_SHUTDOWN:
1732 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 case RESET_KIND_SUSPEND:
1737 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1771 if (tg3_flag(tp, ENABLE_ASF)) {
1773 case RESET_KIND_INIT:
1774 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 case RESET_KIND_SHUTDOWN:
1779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 case RESET_KIND_SUSPEND:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794 static int tg3_poll_fw(struct tg3 *tp)
1799 if (tg3_flag(tp, NO_FWARE_REPORTED))
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1807 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 if (pci_channel_offline(tp->pdev))
1820 /* Wait for firmware initialization to complete. */
1821 for (i = 0; i < 100000; i++) {
1822 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1823 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1825 if (pci_channel_offline(tp->pdev)) {
1826 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1827 tg3_flag_set(tp, NO_FWARE_REPORTED);
1828 netdev_info(tp->dev, "No firmware running\n");
1837 /* Chip might not be fitted with firmware. Some Sun onboard
1838 * parts are configured like that. So don't signal the timeout
1839 * of the above loop as an error, but do report the lack of
1840 * running firmware once.
1842 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1843 tg3_flag_set(tp, NO_FWARE_REPORTED);
1845 netdev_info(tp->dev, "No firmware running\n");
1848 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1849 /* The 57765 A0 needs a little more
1850 * time to do some important work.
1858 static void tg3_link_report(struct tg3 *tp)
1860 if (!netif_carrier_ok(tp->dev)) {
1861 netif_info(tp, link, tp->dev, "Link is down\n");
1862 tg3_ump_link_report(tp);
1863 } else if (netif_msg_link(tp)) {
1864 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1865 (tp->link_config.active_speed == SPEED_1000 ?
1867 (tp->link_config.active_speed == SPEED_100 ?
1869 (tp->link_config.active_duplex == DUPLEX_FULL ?
1872 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1873 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1875 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1878 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1879 netdev_info(tp->dev, "EEE is %s\n",
1880 tp->setlpicnt ? "enabled" : "disabled");
1882 tg3_ump_link_report(tp);
1885 tp->link_up = netif_carrier_ok(tp->dev);
1888 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1892 if (adv & ADVERTISE_PAUSE_CAP) {
1893 flowctrl |= FLOW_CTRL_RX;
1894 if (!(adv & ADVERTISE_PAUSE_ASYM))
1895 flowctrl |= FLOW_CTRL_TX;
1896 } else if (adv & ADVERTISE_PAUSE_ASYM)
1897 flowctrl |= FLOW_CTRL_TX;
1902 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1906 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1907 miireg = ADVERTISE_1000XPAUSE;
1908 else if (flow_ctrl & FLOW_CTRL_TX)
1909 miireg = ADVERTISE_1000XPSE_ASYM;
1910 else if (flow_ctrl & FLOW_CTRL_RX)
1911 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1918 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1922 if (adv & ADVERTISE_1000XPAUSE) {
1923 flowctrl |= FLOW_CTRL_RX;
1924 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1925 flowctrl |= FLOW_CTRL_TX;
1926 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1927 flowctrl |= FLOW_CTRL_TX;
1932 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1936 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1937 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1938 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1939 if (lcladv & ADVERTISE_1000XPAUSE)
1941 if (rmtadv & ADVERTISE_1000XPAUSE)
1948 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1952 u32 old_rx_mode = tp->rx_mode;
1953 u32 old_tx_mode = tp->tx_mode;
1955 if (tg3_flag(tp, USE_PHYLIB))
1956 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1958 autoneg = tp->link_config.autoneg;
1960 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1961 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1962 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1964 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1966 flowctrl = tp->link_config.flowctrl;
1968 tp->link_config.active_flowctrl = flowctrl;
1970 if (flowctrl & FLOW_CTRL_RX)
1971 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1973 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1975 if (old_rx_mode != tp->rx_mode)
1976 tw32_f(MAC_RX_MODE, tp->rx_mode);
1978 if (flowctrl & FLOW_CTRL_TX)
1979 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1981 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1983 if (old_tx_mode != tp->tx_mode)
1984 tw32_f(MAC_TX_MODE, tp->tx_mode);
1987 static void tg3_adjust_link(struct net_device *dev)
1989 u8 oldflowctrl, linkmesg = 0;
1990 u32 mac_mode, lcl_adv, rmt_adv;
1991 struct tg3 *tp = netdev_priv(dev);
1992 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1994 spin_lock_bh(&tp->lock);
1996 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1997 MAC_MODE_HALF_DUPLEX);
1999 oldflowctrl = tp->link_config.active_flowctrl;
2005 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2006 mac_mode |= MAC_MODE_PORT_MODE_MII;
2007 else if (phydev->speed == SPEED_1000 ||
2008 tg3_asic_rev(tp) != ASIC_REV_5785)
2009 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2011 mac_mode |= MAC_MODE_PORT_MODE_MII;
2013 if (phydev->duplex == DUPLEX_HALF)
2014 mac_mode |= MAC_MODE_HALF_DUPLEX;
2016 lcl_adv = mii_advertise_flowctrl(
2017 tp->link_config.flowctrl);
2020 rmt_adv = LPA_PAUSE_CAP;
2021 if (phydev->asym_pause)
2022 rmt_adv |= LPA_PAUSE_ASYM;
2025 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2027 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2029 if (mac_mode != tp->mac_mode) {
2030 tp->mac_mode = mac_mode;
2031 tw32_f(MAC_MODE, tp->mac_mode);
2035 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2036 if (phydev->speed == SPEED_10)
2038 MAC_MI_STAT_10MBPS_MODE |
2039 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2041 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2044 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2045 tw32(MAC_TX_LENGTHS,
2046 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2047 (6 << TX_LENGTHS_IPG_SHIFT) |
2048 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2050 tw32(MAC_TX_LENGTHS,
2051 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052 (6 << TX_LENGTHS_IPG_SHIFT) |
2053 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2055 if (phydev->link != tp->old_link ||
2056 phydev->speed != tp->link_config.active_speed ||
2057 phydev->duplex != tp->link_config.active_duplex ||
2058 oldflowctrl != tp->link_config.active_flowctrl)
2061 tp->old_link = phydev->link;
2062 tp->link_config.active_speed = phydev->speed;
2063 tp->link_config.active_duplex = phydev->duplex;
2065 spin_unlock_bh(&tp->lock);
2068 tg3_link_report(tp);
2071 static int tg3_phy_init(struct tg3 *tp)
2073 struct phy_device *phydev;
2075 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2078 /* Bring the PHY back to a known state. */
2081 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2083 /* Attach the MAC to the PHY. */
2084 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2085 tg3_adjust_link, phydev->interface);
2086 if (IS_ERR(phydev)) {
2087 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2088 return PTR_ERR(phydev);
2091 /* Mask with MAC supported features. */
2092 switch (phydev->interface) {
2093 case PHY_INTERFACE_MODE_GMII:
2094 case PHY_INTERFACE_MODE_RGMII:
2095 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2096 phydev->supported &= (PHY_GBIT_FEATURES |
2098 SUPPORTED_Asym_Pause);
2102 case PHY_INTERFACE_MODE_MII:
2103 phydev->supported &= (PHY_BASIC_FEATURES |
2105 SUPPORTED_Asym_Pause);
2108 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2112 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2114 phydev->advertising = phydev->supported;
2119 static void tg3_phy_start(struct tg3 *tp)
2121 struct phy_device *phydev;
2123 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2126 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2128 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2130 phydev->speed = tp->link_config.speed;
2131 phydev->duplex = tp->link_config.duplex;
2132 phydev->autoneg = tp->link_config.autoneg;
2133 phydev->advertising = tp->link_config.advertising;
2138 phy_start_aneg(phydev);
2141 static void tg3_phy_stop(struct tg3 *tp)
2143 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2146 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2149 static void tg3_phy_fini(struct tg3 *tp)
2151 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2153 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2157 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2162 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2165 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2166 /* Cannot do read-modify-write on 5401 */
2167 err = tg3_phy_auxctl_write(tp,
2168 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2169 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2174 err = tg3_phy_auxctl_read(tp,
2175 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2180 err = tg3_phy_auxctl_write(tp,
2181 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2187 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2191 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2194 tg3_writephy(tp, MII_TG3_FET_TEST,
2195 phytest | MII_TG3_FET_SHADOW_EN);
2196 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2198 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2200 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2201 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2203 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2207 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2211 if (!tg3_flag(tp, 5705_PLUS) ||
2212 (tg3_flag(tp, 5717_PLUS) &&
2213 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2216 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2217 tg3_phy_fet_toggle_apd(tp, enable);
2221 reg = MII_TG3_MISC_SHDW_WREN |
2222 MII_TG3_MISC_SHDW_SCR5_SEL |
2223 MII_TG3_MISC_SHDW_SCR5_LPED |
2224 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225 MII_TG3_MISC_SHDW_SCR5_SDTL |
2226 MII_TG3_MISC_SHDW_SCR5_C125OE;
2227 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2230 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2233 reg = MII_TG3_MISC_SHDW_WREN |
2234 MII_TG3_MISC_SHDW_APD_SEL |
2235 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2237 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2239 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2246 if (!tg3_flag(tp, 5705_PLUS) ||
2247 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2250 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2253 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2256 tg3_writephy(tp, MII_TG3_FET_TEST,
2257 ephy | MII_TG3_FET_SHADOW_EN);
2258 if (!tg3_readphy(tp, reg, &phy)) {
2260 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2262 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263 tg3_writephy(tp, reg, phy);
2265 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2270 ret = tg3_phy_auxctl_read(tp,
2271 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2274 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2276 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277 tg3_phy_auxctl_write(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2288 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2291 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2293 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2309 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2313 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2317 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2321 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2324 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2327 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2331 tg3_phy_toggle_auxctl_smdsp(tp, false);
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2337 struct ethtool_eee *dest = &tp->eee;
2339 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2345 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2348 /* Pull eee_active */
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351 dest->eee_active = 1;
2353 dest->eee_active = 0;
2355 /* Pull lp advertised settings */
2356 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2358 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2360 /* Pull advertised and eee_enabled settings */
2361 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2363 dest->eee_enabled = !!val;
2364 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366 /* Pull tx_lpi_enabled */
2367 val = tr32(TG3_CPMU_EEE_MODE);
2368 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2370 /* Pull lpi timer value */
2371 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2378 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2383 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2385 tp->link_config.active_duplex == DUPLEX_FULL &&
2386 (tp->link_config.active_speed == SPEED_100 ||
2387 tp->link_config.active_speed == SPEED_1000)) {
2390 if (tp->link_config.active_speed == SPEED_1000)
2391 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2393 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2395 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2397 tg3_eee_pull_config(tp, NULL);
2398 if (tp->eee.eee_active)
2402 if (!tp->setlpicnt) {
2403 if (current_link_up &&
2404 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406 tg3_phy_toggle_auxctl_smdsp(tp, false);
2409 val = tr32(TG3_CPMU_EEE_MODE);
2410 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2418 if (tp->link_config.active_speed == SPEED_1000 &&
2419 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421 tg3_flag(tp, 57765_CLASS)) &&
2422 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423 val = MII_TG3_DSP_TAP26_ALNOKO |
2424 MII_TG3_DSP_TAP26_RMRXSTO;
2425 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426 tg3_phy_toggle_auxctl_smdsp(tp, false);
2429 val = tr32(TG3_CPMU_EEE_MODE);
2430 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2440 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441 if ((tmp32 & 0x1000) == 0)
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2453 static const u32 test_pat[4][6] = {
2454 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2461 for (chan = 0; chan < 4; chan++) {
2464 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465 (chan * 0x2000) | 0x0200);
2466 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2468 for (i = 0; i < 6; i++)
2469 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473 if (tg3_wait_macro_done(tp)) {
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 (chan * 0x2000) | 0x0200);
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481 if (tg3_wait_macro_done(tp)) {
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487 if (tg3_wait_macro_done(tp)) {
2492 for (i = 0; i < 6; i += 2) {
2495 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497 tg3_wait_macro_done(tp)) {
2503 if (low != test_pat[chan][i] ||
2504 high != test_pat[chan][i+1]) {
2505 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2521 for (chan = 0; chan < 4; chan++) {
2524 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525 (chan * 0x2000) | 0x0200);
2526 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527 for (i = 0; i < 6; i++)
2528 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530 if (tg3_wait_macro_done(tp))
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2539 u32 reg32, phy9_orig;
2540 int retries, do_phy_reset, err;
2546 err = tg3_bmcr_reset(tp);
2552 /* Disable transmitter and interrupt. */
2553 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2557 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2559 /* Set full-duplex, 1000 mbps. */
2560 tg3_writephy(tp, MII_BMCR,
2561 BMCR_FULLDPLX | BMCR_SPEED1000);
2563 /* Set to master mode. */
2564 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2567 tg3_writephy(tp, MII_CTRL1000,
2568 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2570 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2574 /* Block the PHY control access. */
2575 tg3_phydsp_write(tp, 0x8005, 0x0800);
2577 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2580 } while (--retries);
2582 err = tg3_phy_reset_chanpat(tp);
2586 tg3_phydsp_write(tp, 0x8005, 0x0000);
2588 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2591 tg3_phy_toggle_auxctl_smdsp(tp, false);
2593 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2595 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2597 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2604 static void tg3_carrier_off(struct tg3 *tp)
2606 netif_carrier_off(tp->dev);
2607 tp->link_up = false;
2610 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2612 if (tg3_flag(tp, ENABLE_ASF))
2613 netdev_warn(tp->dev,
2614 "Management side-band traffic will be interrupted during phy settings change\n");
2617 /* This will reset the tigon3 PHY if there is no valid
2618 * link unless the FORCE argument is non-zero.
2620 static int tg3_phy_reset(struct tg3 *tp)
2625 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2626 val = tr32(GRC_MISC_CFG);
2627 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2630 err = tg3_readphy(tp, MII_BMSR, &val);
2631 err |= tg3_readphy(tp, MII_BMSR, &val);
2635 if (netif_running(tp->dev) && tp->link_up) {
2636 netif_carrier_off(tp->dev);
2637 tg3_link_report(tp);
2640 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2641 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2642 tg3_asic_rev(tp) == ASIC_REV_5705) {
2643 err = tg3_phy_reset_5703_4_5(tp);
2650 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2651 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2652 cpmuctrl = tr32(TG3_CPMU_CTRL);
2653 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2655 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2658 err = tg3_bmcr_reset(tp);
2662 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2663 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2664 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2666 tw32(TG3_CPMU_CTRL, cpmuctrl);
2669 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2670 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2671 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2672 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2673 CPMU_LSPD_1000MB_MACCLK_12_5) {
2674 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2676 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2680 if (tg3_flag(tp, 5717_PLUS) &&
2681 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2684 tg3_phy_apply_otp(tp);
2686 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2687 tg3_phy_toggle_apd(tp, true);
2689 tg3_phy_toggle_apd(tp, false);
2692 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2693 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2694 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2695 tg3_phydsp_write(tp, 0x000a, 0x0323);
2696 tg3_phy_toggle_auxctl_smdsp(tp, false);
2699 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2700 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2701 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2704 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2705 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 tg3_phydsp_write(tp, 0x000a, 0x310b);
2707 tg3_phydsp_write(tp, 0x201f, 0x9506);
2708 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2709 tg3_phy_toggle_auxctl_smdsp(tp, false);
2711 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2714 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2715 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2716 tg3_writephy(tp, MII_TG3_TEST1,
2717 MII_TG3_TEST1_TRIM_EN | 0x4);
2719 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2721 tg3_phy_toggle_auxctl_smdsp(tp, false);
2725 /* Set Extended packet length bit (bit 14) on all chips that */
2726 /* support jumbo frames */
2727 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2728 /* Cannot do read-modify-write on 5401 */
2729 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2730 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2731 /* Set bit 14 with read-modify-write to preserve other bits */
2732 err = tg3_phy_auxctl_read(tp,
2733 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2735 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2736 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2739 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740 * jumbo frames transmission.
2742 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2744 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2745 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2748 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2749 /* adjust output voltage */
2750 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2753 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2754 tg3_phydsp_write(tp, 0xffb, 0x4000);
2756 tg3_phy_toggle_automdix(tp, true);
2757 tg3_phy_set_wirespeed(tp);
2761 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2763 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2764 TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769 (TG3_GPIO_MSG_DRVR_PRES << 12))
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775 (TG3_GPIO_MSG_NEED_VAUX << 12))
2777 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2781 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2782 tg3_asic_rev(tp) == ASIC_REV_5719)
2783 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2785 status = tr32(TG3_CPMU_DRV_STATUS);
2787 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2788 status &= ~(TG3_GPIO_MSG_MASK << shift);
2789 status |= (newstat << shift);
2791 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792 tg3_asic_rev(tp) == ASIC_REV_5719)
2793 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2795 tw32(TG3_CPMU_DRV_STATUS, status);
2797 return status >> TG3_APE_GPIO_MSG_SHIFT;
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2802 if (!tg3_flag(tp, IS_NIC))
2805 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5720) {
2808 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2811 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2813 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2816 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2818 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY);
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2829 if (!tg3_flag(tp, IS_NIC) ||
2830 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2831 tg3_asic_rev(tp) == ASIC_REV_5701)
2834 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2836 tw32_wait_f(GRC_LOCAL_CTRL,
2837 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY);
2840 tw32_wait_f(GRC_LOCAL_CTRL,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2851 if (!tg3_flag(tp, IS_NIC))
2854 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855 tg3_asic_rev(tp) == ASIC_REV_5701) {
2856 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2857 (GRC_LCLCTRL_GPIO_OE0 |
2858 GRC_LCLCTRL_GPIO_OE1 |
2859 GRC_LCLCTRL_GPIO_OE2 |
2860 GRC_LCLCTRL_GPIO_OUTPUT0 |
2861 GRC_LCLCTRL_GPIO_OUTPUT1),
2862 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2864 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2865 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2867 GRC_LCLCTRL_GPIO_OE1 |
2868 GRC_LCLCTRL_GPIO_OE2 |
2869 GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 GRC_LCLCTRL_GPIO_OUTPUT1 |
2872 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2875 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2876 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877 TG3_GRC_LCLCTL_PWRSW_DELAY);
2879 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2884 u32 grc_local_ctrl = 0;
2886 /* Workaround to prevent overdrawing Amps. */
2887 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2888 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2889 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 /* On 5753 and variants, GPIO2 cannot be used. */
2895 no_gpio2 = tp->nic_sram_data_cfg &
2896 NIC_SRAM_DATA_CFG_NO_GPIO2;
2898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2899 GRC_LCLCTRL_GPIO_OE1 |
2900 GRC_LCLCTRL_GPIO_OE2 |
2901 GRC_LCLCTRL_GPIO_OUTPUT1 |
2902 GRC_LCLCTRL_GPIO_OUTPUT2;
2904 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2905 GRC_LCLCTRL_GPIO_OUTPUT2);
2907 tw32_wait_f(GRC_LOCAL_CTRL,
2908 tp->grc_local_ctrl | grc_local_ctrl,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY);
2911 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2913 tw32_wait_f(GRC_LOCAL_CTRL,
2914 tp->grc_local_ctrl | grc_local_ctrl,
2915 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2919 tw32_wait_f(GRC_LOCAL_CTRL,
2920 tp->grc_local_ctrl | grc_local_ctrl,
2921 TG3_GRC_LCLCTL_PWRSW_DELAY);
2926 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2930 /* Serialize power state transitions */
2931 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2934 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2935 msg = TG3_GPIO_MSG_NEED_VAUX;
2937 msg = tg3_set_function_status(tp, msg);
2939 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2942 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2943 tg3_pwrsrc_switch_to_vaux(tp);
2945 tg3_pwrsrc_die_with_vmain(tp);
2948 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2951 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2953 bool need_vaux = false;
2955 /* The GPIOs do something completely different on 57765. */
2956 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2959 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2960 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2961 tg3_asic_rev(tp) == ASIC_REV_5720) {
2962 tg3_frob_aux_power_5717(tp, include_wol ?
2963 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2967 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2968 struct net_device *dev_peer;
2970 dev_peer = pci_get_drvdata(tp->pdev_peer);
2972 /* remove_one() may have been run on the peer. */
2974 struct tg3 *tp_peer = netdev_priv(dev_peer);
2976 if (tg3_flag(tp_peer, INIT_COMPLETE))
2979 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2980 tg3_flag(tp_peer, ENABLE_ASF))
2985 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2986 tg3_flag(tp, ENABLE_ASF))
2990 tg3_pwrsrc_switch_to_vaux(tp);
2992 tg3_pwrsrc_die_with_vmain(tp);
2995 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2997 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2999 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3000 if (speed != SPEED_10)
3002 } else if (speed == SPEED_10)
3008 static bool tg3_phy_power_bug(struct tg3 *tp)
3010 switch (tg3_asic_rev(tp)) {
3015 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3037 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3040 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3041 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3042 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3043 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3046 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3047 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3048 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3053 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3055 val = tr32(GRC_MISC_CFG);
3056 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3059 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3061 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3064 tg3_writephy(tp, MII_ADVERTISE, 0);
3065 tg3_writephy(tp, MII_BMCR,
3066 BMCR_ANENABLE | BMCR_ANRESTART);
3068 tg3_writephy(tp, MII_TG3_FET_TEST,
3069 phytest | MII_TG3_FET_SHADOW_EN);
3070 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3071 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3073 MII_TG3_FET_SHDW_AUXMODE4,
3076 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3079 } else if (do_low_power) {
3080 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3081 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3083 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3084 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3085 MII_TG3_AUXCTL_PCTL_VREG_11V;
3086 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3089 /* The PHY should not be powered down on some chips because
3092 if (tg3_phy_power_bug(tp))
3095 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3096 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3097 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3098 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3099 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3100 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3103 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3106 /* tp->lock is held. */
3107 static int tg3_nvram_lock(struct tg3 *tp)
3109 if (tg3_flag(tp, NVRAM)) {
3112 if (tp->nvram_lock_cnt == 0) {
3113 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3114 for (i = 0; i < 8000; i++) {
3115 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3120 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3124 tp->nvram_lock_cnt++;
3129 /* tp->lock is held. */
3130 static void tg3_nvram_unlock(struct tg3 *tp)
3132 if (tg3_flag(tp, NVRAM)) {
3133 if (tp->nvram_lock_cnt > 0)
3134 tp->nvram_lock_cnt--;
3135 if (tp->nvram_lock_cnt == 0)
3136 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3140 /* tp->lock is held. */
3141 static void tg3_enable_nvram_access(struct tg3 *tp)
3143 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3144 u32 nvaccess = tr32(NVRAM_ACCESS);
3146 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3150 /* tp->lock is held. */
3151 static void tg3_disable_nvram_access(struct tg3 *tp)
3153 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154 u32 nvaccess = tr32(NVRAM_ACCESS);
3156 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3160 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3161 u32 offset, u32 *val)
3166 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3169 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3170 EEPROM_ADDR_DEVID_MASK |
3172 tw32(GRC_EEPROM_ADDR,
3174 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3175 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3176 EEPROM_ADDR_ADDR_MASK) |
3177 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3179 for (i = 0; i < 1000; i++) {
3180 tmp = tr32(GRC_EEPROM_ADDR);
3182 if (tmp & EEPROM_ADDR_COMPLETE)
3186 if (!(tmp & EEPROM_ADDR_COMPLETE))
3189 tmp = tr32(GRC_EEPROM_DATA);
3192 * The data will always be opposite the native endian
3193 * format. Perform a blind byteswap to compensate.
3200 #define NVRAM_CMD_TIMEOUT 10000
3202 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3206 tw32(NVRAM_CMD, nvram_cmd);
3207 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3209 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3215 if (i == NVRAM_CMD_TIMEOUT)
3221 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3223 if (tg3_flag(tp, NVRAM) &&
3224 tg3_flag(tp, NVRAM_BUFFERED) &&
3225 tg3_flag(tp, FLASH) &&
3226 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3227 (tp->nvram_jedecnum == JEDEC_ATMEL))
3229 addr = ((addr / tp->nvram_pagesize) <<
3230 ATMEL_AT45DB0X1B_PAGE_POS) +
3231 (addr % tp->nvram_pagesize);
3236 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3238 if (tg3_flag(tp, NVRAM) &&
3239 tg3_flag(tp, NVRAM_BUFFERED) &&
3240 tg3_flag(tp, FLASH) &&
3241 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3242 (tp->nvram_jedecnum == JEDEC_ATMEL))
3244 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3245 tp->nvram_pagesize) +
3246 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3251 /* NOTE: Data read in from NVRAM is byteswapped according to
3252 * the byteswapping settings for all other register accesses.
3253 * tg3 devices are BE devices, so on a BE machine, the data
3254 * returned will be exactly as it is seen in NVRAM. On a LE
3255 * machine, the 32-bit value will be byteswapped.
3257 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3261 if (!tg3_flag(tp, NVRAM))
3262 return tg3_nvram_read_using_eeprom(tp, offset, val);
3264 offset = tg3_nvram_phys_addr(tp, offset);
3266 if (offset > NVRAM_ADDR_MSK)
3269 ret = tg3_nvram_lock(tp);
3273 tg3_enable_nvram_access(tp);
3275 tw32(NVRAM_ADDR, offset);
3276 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3277 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3280 *val = tr32(NVRAM_RDDATA);
3282 tg3_disable_nvram_access(tp);
3284 tg3_nvram_unlock(tp);
3289 /* Ensures NVRAM data is in bytestream format. */
3290 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3293 int res = tg3_nvram_read(tp, offset, &v);
3295 *val = cpu_to_be32(v);
3299 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3300 u32 offset, u32 len, u8 *buf)
3305 for (i = 0; i < len; i += 4) {
3311 memcpy(&data, buf + i, 4);
3314 * The SEEPROM interface expects the data to always be opposite
3315 * the native endian format. We accomplish this by reversing
3316 * all the operations that would have been performed on the
3317 * data from a call to tg3_nvram_read_be32().
3319 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3321 val = tr32(GRC_EEPROM_ADDR);
3322 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3324 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3326 tw32(GRC_EEPROM_ADDR, val |
3327 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3328 (addr & EEPROM_ADDR_ADDR_MASK) |
3332 for (j = 0; j < 1000; j++) {
3333 val = tr32(GRC_EEPROM_ADDR);
3335 if (val & EEPROM_ADDR_COMPLETE)
3339 if (!(val & EEPROM_ADDR_COMPLETE)) {
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3353 u32 pagesize = tp->nvram_pagesize;
3354 u32 pagemask = pagesize - 1;
3358 tmp = kmalloc(pagesize, GFP_KERNEL);
3364 u32 phy_addr, page_off, size;
3366 phy_addr = offset & ~pagemask;
3368 for (j = 0; j < pagesize; j += 4) {
3369 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3370 (__be32 *) (tmp + j));
3377 page_off = offset & pagemask;
3384 memcpy(tmp + page_off, buf, size);
3386 offset = offset + (pagesize - page_off);
3388 tg3_enable_nvram_access(tp);
3391 * Before we can erase the flash page, we need
3392 * to issue a special "write enable" command.
3394 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3396 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3399 /* Erase the target page */
3400 tw32(NVRAM_ADDR, phy_addr);
3402 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3403 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3405 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3408 /* Issue another write enable to start the write. */
3409 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3411 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3414 for (j = 0; j < pagesize; j += 4) {
3417 data = *((__be32 *) (tmp + j));
3419 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3421 tw32(NVRAM_ADDR, phy_addr + j);
3423 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3427 nvram_cmd |= NVRAM_CMD_FIRST;
3428 else if (j == (pagesize - 4))
3429 nvram_cmd |= NVRAM_CMD_LAST;
3431 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3439 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3440 tg3_nvram_exec_cmd(tp, nvram_cmd);
3447 /* offset and length are dword aligned */
3448 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3453 for (i = 0; i < len; i += 4, offset += 4) {
3454 u32 page_off, phy_addr, nvram_cmd;
3457 memcpy(&data, buf + i, 4);
3458 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3460 page_off = offset % tp->nvram_pagesize;
3462 phy_addr = tg3_nvram_phys_addr(tp, offset);
3464 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3466 if (page_off == 0 || i == 0)
3467 nvram_cmd |= NVRAM_CMD_FIRST;
3468 if (page_off == (tp->nvram_pagesize - 4))
3469 nvram_cmd |= NVRAM_CMD_LAST;
3472 nvram_cmd |= NVRAM_CMD_LAST;
3474 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3475 !tg3_flag(tp, FLASH) ||
3476 !tg3_flag(tp, 57765_PLUS))
3477 tw32(NVRAM_ADDR, phy_addr);
3479 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3480 !tg3_flag(tp, 5755_PLUS) &&
3481 (tp->nvram_jedecnum == JEDEC_ST) &&
3482 (nvram_cmd & NVRAM_CMD_FIRST)) {
3485 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3486 ret = tg3_nvram_exec_cmd(tp, cmd);
3490 if (!tg3_flag(tp, FLASH)) {
3491 /* We always do complete word writes to eeprom. */
3492 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3495 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3502 /* offset and length are dword aligned */
3503 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3507 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3508 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3509 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3513 if (!tg3_flag(tp, NVRAM)) {
3514 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3518 ret = tg3_nvram_lock(tp);
3522 tg3_enable_nvram_access(tp);
3523 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3524 tw32(NVRAM_WRITE1, 0x406);
3526 grc_mode = tr32(GRC_MODE);
3527 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3529 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3530 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3533 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3537 grc_mode = tr32(GRC_MODE);
3538 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3540 tg3_disable_nvram_access(tp);
3541 tg3_nvram_unlock(tp);
3544 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3545 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3552 #define RX_CPU_SCRATCH_BASE 0x30000
3553 #define RX_CPU_SCRATCH_SIZE 0x04000
3554 #define TX_CPU_SCRATCH_BASE 0x34000
3555 #define TX_CPU_SCRATCH_SIZE 0x04000
3557 /* tp->lock is held. */
3558 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3561 const int iters = 10000;
3563 for (i = 0; i < iters; i++) {
3564 tw32(cpu_base + CPU_STATE, 0xffffffff);
3565 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3566 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3568 if (pci_channel_offline(tp->pdev))
3572 return (i == iters) ? -EBUSY : 0;
3575 /* tp->lock is held. */
3576 static int tg3_rxcpu_pause(struct tg3 *tp)
3578 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3580 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3581 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3587 /* tp->lock is held. */
3588 static int tg3_txcpu_pause(struct tg3 *tp)
3590 return tg3_pause_cpu(tp, TX_CPU_BASE);
3593 /* tp->lock is held. */
3594 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3596 tw32(cpu_base + CPU_STATE, 0xffffffff);
3597 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3600 /* tp->lock is held. */
3601 static void tg3_rxcpu_resume(struct tg3 *tp)
3603 tg3_resume_cpu(tp, RX_CPU_BASE);
3606 /* tp->lock is held. */
3607 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3611 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3613 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3614 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3616 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3619 if (cpu_base == RX_CPU_BASE) {
3620 rc = tg3_rxcpu_pause(tp);
3623 * There is only an Rx CPU for the 5750 derivative in the
3626 if (tg3_flag(tp, IS_SSB_CORE))
3629 rc = tg3_txcpu_pause(tp);
3633 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3634 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3638 /* Clear firmware's nvram arbitration. */
3639 if (tg3_flag(tp, NVRAM))
3640 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3644 static int tg3_fw_data_len(struct tg3 *tp,
3645 const struct tg3_firmware_hdr *fw_hdr)
3649 /* Non fragmented firmware have one firmware header followed by a
3650 * contiguous chunk of data to be written. The length field in that
3651 * header is not the length of data to be written but the complete
3652 * length of the bss. The data length is determined based on
3653 * tp->fw->size minus headers.
3655 * Fragmented firmware have a main header followed by multiple
3656 * fragments. Each fragment is identical to non fragmented firmware
3657 * with a firmware header followed by a contiguous chunk of data. In
3658 * the main header, the length field is unused and set to 0xffffffff.
3659 * In each fragment header the length is the entire size of that
3660 * fragment i.e. fragment data + header length. Data length is
3661 * therefore length field in the header minus TG3_FW_HDR_LEN.
3663 if (tp->fw_len == 0xffffffff)
3664 fw_len = be32_to_cpu(fw_hdr->len);
3666 fw_len = tp->fw->size;
3668 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3671 /* tp->lock is held. */
3672 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3673 u32 cpu_scratch_base, int cpu_scratch_size,
3674 const struct tg3_firmware_hdr *fw_hdr)
3677 void (*write_op)(struct tg3 *, u32, u32);
3678 int total_len = tp->fw->size;
3680 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3682 "%s: Trying to load TX cpu firmware which is 5705\n",
3687 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3688 write_op = tg3_write_mem;
3690 write_op = tg3_write_indirect_reg32;
3692 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3693 /* It is possible that bootcode is still loading at this point.
3694 * Get the nvram lock first before halting the cpu.
3696 int lock_err = tg3_nvram_lock(tp);
3697 err = tg3_halt_cpu(tp, cpu_base);
3699 tg3_nvram_unlock(tp);
3703 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3704 write_op(tp, cpu_scratch_base + i, 0);
3705 tw32(cpu_base + CPU_STATE, 0xffffffff);
3706 tw32(cpu_base + CPU_MODE,
3707 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3709 /* Subtract additional main header for fragmented firmware and
3710 * advance to the first fragment
3712 total_len -= TG3_FW_HDR_LEN;
3717 u32 *fw_data = (u32 *)(fw_hdr + 1);
3718 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3719 write_op(tp, cpu_scratch_base +
3720 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3722 be32_to_cpu(fw_data[i]));
3724 total_len -= be32_to_cpu(fw_hdr->len);
3726 /* Advance to next fragment */
3727 fw_hdr = (struct tg3_firmware_hdr *)
3728 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3729 } while (total_len > 0);
3737 /* tp->lock is held. */
3738 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3741 const int iters = 5;
3743 tw32(cpu_base + CPU_STATE, 0xffffffff);
3744 tw32_f(cpu_base + CPU_PC, pc);
3746 for (i = 0; i < iters; i++) {
3747 if (tr32(cpu_base + CPU_PC) == pc)
3749 tw32(cpu_base + CPU_STATE, 0xffffffff);
3750 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3751 tw32_f(cpu_base + CPU_PC, pc);
3755 return (i == iters) ? -EBUSY : 0;
3758 /* tp->lock is held. */
3759 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3761 const struct tg3_firmware_hdr *fw_hdr;
3764 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3766 /* Firmware blob starts with version numbers, followed by
3767 start address and length. We are setting complete length.
3768 length = end_address_of_bss - start_address_of_text.
3769 Remainder is the blob to be loaded contiguously
3770 from start address. */
3772 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3773 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3778 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3779 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3784 /* Now startup only the RX cpu. */
3785 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3786 be32_to_cpu(fw_hdr->base_addr));
3788 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3789 "should be %08x\n", __func__,
3790 tr32(RX_CPU_BASE + CPU_PC),
3791 be32_to_cpu(fw_hdr->base_addr));
3795 tg3_rxcpu_resume(tp);
3800 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3802 const int iters = 1000;
3806 /* Wait for boot code to complete initialization and enter service
3807 * loop. It is then safe to download service patches
3809 for (i = 0; i < iters; i++) {
3810 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3817 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3821 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3823 netdev_warn(tp->dev,
3824 "Other patches exist. Not downloading EEE patch\n");
3831 /* tp->lock is held. */
3832 static void tg3_load_57766_firmware(struct tg3 *tp)
3834 struct tg3_firmware_hdr *fw_hdr;
3836 if (!tg3_flag(tp, NO_NVRAM))
3839 if (tg3_validate_rxcpu_state(tp))
3845 /* This firmware blob has a different format than older firmware
3846 * releases as given below. The main difference is we have fragmented
3847 * data to be written to non-contiguous locations.
3849 * In the beginning we have a firmware header identical to other
3850 * firmware which consists of version, base addr and length. The length
3851 * here is unused and set to 0xffffffff.
3853 * This is followed by a series of firmware fragments which are
3854 * individually identical to previous firmware. i.e. they have the
3855 * firmware header and followed by data for that fragment. The version
3856 * field of the individual fragment header is unused.
3859 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3860 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3863 if (tg3_rxcpu_pause(tp))
3866 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3867 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3869 tg3_rxcpu_resume(tp);
3872 /* tp->lock is held. */
3873 static int tg3_load_tso_firmware(struct tg3 *tp)
3875 const struct tg3_firmware_hdr *fw_hdr;
3876 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3879 if (!tg3_flag(tp, FW_TSO))
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 /* Firmware blob starts with version numbers, followed by
3885 start address and length. We are setting complete length.
3886 length = end_address_of_bss - start_address_of_text.
3887 Remainder is the blob to be loaded contiguously
3888 from start address. */
3890 cpu_scratch_size = tp->fw_len;
3892 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3893 cpu_base = RX_CPU_BASE;
3894 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3896 cpu_base = TX_CPU_BASE;
3897 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3898 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3901 err = tg3_load_firmware_cpu(tp, cpu_base,
3902 cpu_scratch_base, cpu_scratch_size,
3907 /* Now startup the cpu. */
3908 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3909 be32_to_cpu(fw_hdr->base_addr));
3912 "%s fails to set CPU PC, is %08x should be %08x\n",
3913 __func__, tr32(cpu_base + CPU_PC),
3914 be32_to_cpu(fw_hdr->base_addr));
3918 tg3_resume_cpu(tp, cpu_base);
3923 /* tp->lock is held. */
3924 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3926 u32 addr_high, addr_low;
3929 addr_high = ((tp->dev->dev_addr[0] << 8) |
3930 tp->dev->dev_addr[1]);
3931 addr_low = ((tp->dev->dev_addr[2] << 24) |
3932 (tp->dev->dev_addr[3] << 16) |
3933 (tp->dev->dev_addr[4] << 8) |
3934 (tp->dev->dev_addr[5] << 0));
3935 for (i = 0; i < 4; i++) {
3936 if (i == 1 && skip_mac_1)
3938 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3939 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3942 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3943 tg3_asic_rev(tp) == ASIC_REV_5704) {
3944 for (i = 0; i < 12; i++) {
3945 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3946 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3950 addr_high = (tp->dev->dev_addr[0] +
3951 tp->dev->dev_addr[1] +
3952 tp->dev->dev_addr[2] +
3953 tp->dev->dev_addr[3] +
3954 tp->dev->dev_addr[4] +
3955 tp->dev->dev_addr[5]) &
3956 TX_BACKOFF_SEED_MASK;
3957 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3960 static void tg3_enable_register_access(struct tg3 *tp)
3963 * Make sure register accesses (indirect or otherwise) will function
3966 pci_write_config_dword(tp->pdev,
3967 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3970 static int tg3_power_up(struct tg3 *tp)
3974 tg3_enable_register_access(tp);
3976 err = pci_set_power_state(tp->pdev, PCI_D0);
3978 /* Switch out of Vaux if it is a NIC */
3979 tg3_pwrsrc_switch_to_vmain(tp);
3981 netdev_err(tp->dev, "Transition to D0 failed\n");
3987 static int tg3_setup_phy(struct tg3 *, bool);
3989 static int tg3_power_down_prepare(struct tg3 *tp)
3992 bool device_should_wake, do_low_power;
3994 tg3_enable_register_access(tp);
3996 /* Restore the CLKREQ setting. */
3997 if (tg3_flag(tp, CLKREQ_BUG))
3998 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3999 PCI_EXP_LNKCTL_CLKREQ_EN);
4001 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4002 tw32(TG3PCI_MISC_HOST_CTRL,
4003 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4005 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4006 tg3_flag(tp, WOL_ENABLE);
4008 if (tg3_flag(tp, USE_PHYLIB)) {
4009 do_low_power = false;
4010 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4011 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4012 struct phy_device *phydev;
4013 u32 phyid, advertising;
4015 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4017 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4019 tp->link_config.speed = phydev->speed;
4020 tp->link_config.duplex = phydev->duplex;
4021 tp->link_config.autoneg = phydev->autoneg;
4022 tp->link_config.advertising = phydev->advertising;
4024 advertising = ADVERTISED_TP |
4026 ADVERTISED_Autoneg |
4027 ADVERTISED_10baseT_Half;
4029 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4030 if (tg3_flag(tp, WOL_SPEED_100MB))
4032 ADVERTISED_100baseT_Half |
4033 ADVERTISED_100baseT_Full |
4034 ADVERTISED_10baseT_Full;
4036 advertising |= ADVERTISED_10baseT_Full;
4039 phydev->advertising = advertising;
4041 phy_start_aneg(phydev);
4043 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4044 if (phyid != PHY_ID_BCMAC131) {
4045 phyid &= PHY_BCM_OUI_MASK;
4046 if (phyid == PHY_BCM_OUI_1 ||
4047 phyid == PHY_BCM_OUI_2 ||
4048 phyid == PHY_BCM_OUI_3)
4049 do_low_power = true;
4053 do_low_power = true;
4055 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4056 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4058 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4059 tg3_setup_phy(tp, false);
4062 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4065 val = tr32(GRC_VCPU_EXT_CTRL);
4066 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4067 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4071 for (i = 0; i < 200; i++) {
4072 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4073 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4078 if (tg3_flag(tp, WOL_CAP))
4079 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4080 WOL_DRV_STATE_SHUTDOWN |
4084 if (device_should_wake) {
4087 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4089 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4090 tg3_phy_auxctl_write(tp,
4091 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4092 MII_TG3_AUXCTL_PCTL_WOL_EN |
4093 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4094 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4098 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4099 mac_mode = MAC_MODE_PORT_MODE_GMII;
4100 else if (tp->phy_flags &
4101 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4102 if (tp->link_config.active_speed == SPEED_1000)
4103 mac_mode = MAC_MODE_PORT_MODE_GMII;
4105 mac_mode = MAC_MODE_PORT_MODE_MII;
4107 mac_mode = MAC_MODE_PORT_MODE_MII;
4109 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4110 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4111 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4112 SPEED_100 : SPEED_10;
4113 if (tg3_5700_link_polarity(tp, speed))
4114 mac_mode |= MAC_MODE_LINK_POLARITY;
4116 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4119 mac_mode = MAC_MODE_PORT_MODE_TBI;
4122 if (!tg3_flag(tp, 5750_PLUS))
4123 tw32(MAC_LED_CTRL, tp->led_ctrl);
4125 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4126 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4127 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4128 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4130 if (tg3_flag(tp, ENABLE_APE))
4131 mac_mode |= MAC_MODE_APE_TX_EN |
4132 MAC_MODE_APE_RX_EN |
4133 MAC_MODE_TDE_ENABLE;
4135 tw32_f(MAC_MODE, mac_mode);
4138 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4142 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4143 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4144 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4147 base_val = tp->pci_clock_ctrl;
4148 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4149 CLOCK_CTRL_TXCLK_DISABLE);
4151 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4152 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4153 } else if (tg3_flag(tp, 5780_CLASS) ||
4154 tg3_flag(tp, CPMU_PRESENT) ||
4155 tg3_asic_rev(tp) == ASIC_REV_5906) {
4157 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4158 u32 newbits1, newbits2;
4160 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4161 tg3_asic_rev(tp) == ASIC_REV_5701) {
4162 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4163 CLOCK_CTRL_TXCLK_DISABLE |
4165 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4166 } else if (tg3_flag(tp, 5705_PLUS)) {
4167 newbits1 = CLOCK_CTRL_625_CORE;
4168 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4170 newbits1 = CLOCK_CTRL_ALTCLK;
4171 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4174 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4177 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4180 if (!tg3_flag(tp, 5705_PLUS)) {
4183 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184 tg3_asic_rev(tp) == ASIC_REV_5701) {
4185 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4186 CLOCK_CTRL_TXCLK_DISABLE |
4187 CLOCK_CTRL_44MHZ_CORE);
4189 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4192 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4193 tp->pci_clock_ctrl | newbits3, 40);
4197 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4198 tg3_power_down_phy(tp, do_low_power);
4200 tg3_frob_aux_power(tp, true);
4202 /* Workaround for unstable PLL clock */
4203 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4204 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4205 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4206 u32 val = tr32(0x7d00);
4208 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4210 if (!tg3_flag(tp, ENABLE_ASF)) {
4213 err = tg3_nvram_lock(tp);
4214 tg3_halt_cpu(tp, RX_CPU_BASE);
4216 tg3_nvram_unlock(tp);
4220 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4222 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4227 static void tg3_power_down(struct tg3 *tp)
4229 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4230 pci_set_power_state(tp->pdev, PCI_D3hot);
4233 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4235 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4236 case MII_TG3_AUX_STAT_10HALF:
4238 *duplex = DUPLEX_HALF;
4241 case MII_TG3_AUX_STAT_10FULL:
4243 *duplex = DUPLEX_FULL;
4246 case MII_TG3_AUX_STAT_100HALF:
4248 *duplex = DUPLEX_HALF;
4251 case MII_TG3_AUX_STAT_100FULL:
4253 *duplex = DUPLEX_FULL;
4256 case MII_TG3_AUX_STAT_1000HALF:
4257 *speed = SPEED_1000;
4258 *duplex = DUPLEX_HALF;
4261 case MII_TG3_AUX_STAT_1000FULL:
4262 *speed = SPEED_1000;
4263 *duplex = DUPLEX_FULL;
4267 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4268 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4270 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4274 *speed = SPEED_UNKNOWN;
4275 *duplex = DUPLEX_UNKNOWN;
4280 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4285 new_adv = ADVERTISE_CSMA;
4286 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4287 new_adv |= mii_advertise_flowctrl(flowctrl);
4289 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4293 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4294 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4296 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4297 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4298 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4300 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4305 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4308 tw32(TG3_CPMU_EEE_MODE,
4309 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4311 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4316 /* Advertise 100-BaseTX EEE ability */
4317 if (advertise & ADVERTISED_100baseT_Full)
4318 val |= MDIO_AN_EEE_ADV_100TX;
4319 /* Advertise 1000-BaseT EEE ability */
4320 if (advertise & ADVERTISED_1000baseT_Full)
4321 val |= MDIO_AN_EEE_ADV_1000T;
4323 if (!tp->eee.eee_enabled) {
4325 tp->eee.advertised = 0;
4327 tp->eee.advertised = advertise &
4328 (ADVERTISED_100baseT_Full |
4329 ADVERTISED_1000baseT_Full);
4332 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4336 switch (tg3_asic_rev(tp)) {
4338 case ASIC_REV_57765:
4339 case ASIC_REV_57766:
4341 /* If we advertised any eee advertisements above... */
4343 val = MII_TG3_DSP_TAP26_ALNOKO |
4344 MII_TG3_DSP_TAP26_RMRXSTO |
4345 MII_TG3_DSP_TAP26_OPCSINPT;
4346 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4350 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4351 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4352 MII_TG3_DSP_CH34TP2_HIBW01);
4355 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4364 static void tg3_phy_copper_begin(struct tg3 *tp)
4366 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4367 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4370 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4371 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4372 adv = ADVERTISED_10baseT_Half |
4373 ADVERTISED_10baseT_Full;
4374 if (tg3_flag(tp, WOL_SPEED_100MB))
4375 adv |= ADVERTISED_100baseT_Half |
4376 ADVERTISED_100baseT_Full;
4377 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4378 adv |= ADVERTISED_1000baseT_Half |
4379 ADVERTISED_1000baseT_Full;
4381 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4383 adv = tp->link_config.advertising;
4384 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4385 adv &= ~(ADVERTISED_1000baseT_Half |
4386 ADVERTISED_1000baseT_Full);
4388 fc = tp->link_config.flowctrl;
4391 tg3_phy_autoneg_cfg(tp, adv, fc);
4393 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4394 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4395 /* Normally during power down we want to autonegotiate
4396 * the lowest possible speed for WOL. However, to avoid
4397 * link flap, we leave it untouched.
4402 tg3_writephy(tp, MII_BMCR,
4403 BMCR_ANENABLE | BMCR_ANRESTART);
4406 u32 bmcr, orig_bmcr;
4408 tp->link_config.active_speed = tp->link_config.speed;
4409 tp->link_config.active_duplex = tp->link_config.duplex;
4411 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4412 /* With autoneg disabled, 5715 only links up when the
4413 * advertisement register has the configured speed
4416 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4420 switch (tp->link_config.speed) {
4426 bmcr |= BMCR_SPEED100;
4430 bmcr |= BMCR_SPEED1000;
4434 if (tp->link_config.duplex == DUPLEX_FULL)
4435 bmcr |= BMCR_FULLDPLX;
4437 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4438 (bmcr != orig_bmcr)) {
4439 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4440 for (i = 0; i < 1500; i++) {
4444 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4445 tg3_readphy(tp, MII_BMSR, &tmp))
4447 if (!(tmp & BMSR_LSTATUS)) {
4452 tg3_writephy(tp, MII_BMCR, bmcr);
4458 static int tg3_phy_pull_config(struct tg3 *tp)
4463 err = tg3_readphy(tp, MII_BMCR, &val);
4467 if (!(val & BMCR_ANENABLE)) {
4468 tp->link_config.autoneg = AUTONEG_DISABLE;
4469 tp->link_config.advertising = 0;
4470 tg3_flag_clear(tp, PAUSE_AUTONEG);
4474 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4476 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4479 tp->link_config.speed = SPEED_10;
4482 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4485 tp->link_config.speed = SPEED_100;
4487 case BMCR_SPEED1000:
4488 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4489 tp->link_config.speed = SPEED_1000;
4497 if (val & BMCR_FULLDPLX)
4498 tp->link_config.duplex = DUPLEX_FULL;
4500 tp->link_config.duplex = DUPLEX_HALF;
4502 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4508 tp->link_config.autoneg = AUTONEG_ENABLE;
4509 tp->link_config.advertising = ADVERTISED_Autoneg;
4510 tg3_flag_set(tp, PAUSE_AUTONEG);
4512 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4515 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4519 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4520 tp->link_config.advertising |= adv | ADVERTISED_TP;
4522 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4524 tp->link_config.advertising |= ADVERTISED_FIBRE;
4527 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4530 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4531 err = tg3_readphy(tp, MII_CTRL1000, &val);
4535 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4537 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4541 adv = tg3_decode_flowctrl_1000X(val);
4542 tp->link_config.flowctrl = adv;
4544 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4545 adv = mii_adv_to_ethtool_adv_x(val);
4548 tp->link_config.advertising |= adv;
4555 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4559 /* Turn off tap power management. */
4560 /* Set Extended packet length bit */
4561 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4563 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4564 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4565 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4566 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4567 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4574 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4576 struct ethtool_eee eee;
4578 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4581 tg3_eee_pull_config(tp, &eee);
4583 if (tp->eee.eee_enabled) {
4584 if (tp->eee.advertised != eee.advertised ||
4585 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4586 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4589 /* EEE is disabled but we're advertising */
4597 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4599 u32 advmsk, tgtadv, advertising;
4601 advertising = tp->link_config.advertising;
4602 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4604 advmsk = ADVERTISE_ALL;
4605 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4606 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4607 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4610 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4613 if ((*lcladv & advmsk) != tgtadv)
4616 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4619 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4621 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4625 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4626 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4627 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4628 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4629 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4631 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4634 if (tg3_ctrl != tgtadv)
4641 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4645 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4648 if (tg3_readphy(tp, MII_STAT1000, &val))
4651 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4654 if (tg3_readphy(tp, MII_LPA, rmtadv))
4657 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4658 tp->link_config.rmt_adv = lpeth;
4663 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4665 if (curr_link_up != tp->link_up) {
4667 netif_carrier_on(tp->dev);
4669 netif_carrier_off(tp->dev);
4670 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4671 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4674 tg3_link_report(tp);
4681 static void tg3_clear_mac_status(struct tg3 *tp)
4686 MAC_STATUS_SYNC_CHANGED |
4687 MAC_STATUS_CFG_CHANGED |
4688 MAC_STATUS_MI_COMPLETION |
4689 MAC_STATUS_LNKSTATE_CHANGED);
4693 static void tg3_setup_eee(struct tg3 *tp)
4697 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4698 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4699 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4700 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4702 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4704 tw32_f(TG3_CPMU_EEE_CTRL,
4705 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4707 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4708 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4709 TG3_CPMU_EEEMD_LPI_IN_RX |
4710 TG3_CPMU_EEEMD_EEE_ENABLE;
4712 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4713 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4715 if (tg3_flag(tp, ENABLE_APE))
4716 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4718 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4720 tw32_f(TG3_CPMU_EEE_DBTMR1,
4721 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4722 (tp->eee.tx_lpi_timer & 0xffff));
4724 tw32_f(TG3_CPMU_EEE_DBTMR2,
4725 TG3_CPMU_DBTMR2_APE_TX_2047US |
4726 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4729 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4731 bool current_link_up;
4733 u32 lcl_adv, rmt_adv;
4738 tg3_clear_mac_status(tp);
4740 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4742 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4746 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4748 /* Some third-party PHYs need to be reset on link going
4751 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4752 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4753 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4755 tg3_readphy(tp, MII_BMSR, &bmsr);
4756 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4757 !(bmsr & BMSR_LSTATUS))
4763 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4764 tg3_readphy(tp, MII_BMSR, &bmsr);
4765 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4766 !tg3_flag(tp, INIT_COMPLETE))
4769 if (!(bmsr & BMSR_LSTATUS)) {
4770 err = tg3_init_5401phy_dsp(tp);
4774 tg3_readphy(tp, MII_BMSR, &bmsr);
4775 for (i = 0; i < 1000; i++) {
4777 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4778 (bmsr & BMSR_LSTATUS)) {
4784 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4785 TG3_PHY_REV_BCM5401_B0 &&
4786 !(bmsr & BMSR_LSTATUS) &&
4787 tp->link_config.active_speed == SPEED_1000) {
4788 err = tg3_phy_reset(tp);
4790 err = tg3_init_5401phy_dsp(tp);
4795 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4796 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4797 /* 5701 {A0,B0} CRC bug workaround */
4798 tg3_writephy(tp, 0x15, 0x0a75);
4799 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4800 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4801 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4804 /* Clear pending interrupts... */
4805 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4806 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4808 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4809 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4810 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4811 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4813 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4814 tg3_asic_rev(tp) == ASIC_REV_5701) {
4815 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4816 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4817 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4819 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4822 current_link_up = false;
4823 current_speed = SPEED_UNKNOWN;
4824 current_duplex = DUPLEX_UNKNOWN;
4825 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4826 tp->link_config.rmt_adv = 0;
4828 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4829 err = tg3_phy_auxctl_read(tp,
4830 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4832 if (!err && !(val & (1 << 10))) {
4833 tg3_phy_auxctl_write(tp,
4834 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4841 for (i = 0; i < 100; i++) {
4842 tg3_readphy(tp, MII_BMSR, &bmsr);
4843 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4844 (bmsr & BMSR_LSTATUS))
4849 if (bmsr & BMSR_LSTATUS) {
4852 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4853 for (i = 0; i < 2000; i++) {
4855 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4860 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4865 for (i = 0; i < 200; i++) {
4866 tg3_readphy(tp, MII_BMCR, &bmcr);
4867 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4869 if (bmcr && bmcr != 0x7fff)
4877 tp->link_config.active_speed = current_speed;
4878 tp->link_config.active_duplex = current_duplex;
4880 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4881 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4883 if ((bmcr & BMCR_ANENABLE) &&
4885 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4886 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4887 current_link_up = true;
4889 /* EEE settings changes take effect only after a phy
4890 * reset. If we have skipped a reset due to Link Flap
4891 * Avoidance being enabled, do it now.
4893 if (!eee_config_ok &&
4894 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4900 if (!(bmcr & BMCR_ANENABLE) &&
4901 tp->link_config.speed == current_speed &&
4902 tp->link_config.duplex == current_duplex) {
4903 current_link_up = true;
4907 if (current_link_up &&
4908 tp->link_config.active_duplex == DUPLEX_FULL) {
4911 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4912 reg = MII_TG3_FET_GEN_STAT;
4913 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4915 reg = MII_TG3_EXT_STAT;
4916 bit = MII_TG3_EXT_STAT_MDIX;
4919 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4920 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4922 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4927 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4928 tg3_phy_copper_begin(tp);
4930 if (tg3_flag(tp, ROBOSWITCH)) {
4931 current_link_up = true;
4932 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4933 current_speed = SPEED_1000;
4934 current_duplex = DUPLEX_FULL;
4935 tp->link_config.active_speed = current_speed;
4936 tp->link_config.active_duplex = current_duplex;
4939 tg3_readphy(tp, MII_BMSR, &bmsr);
4940 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4941 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4942 current_link_up = true;
4945 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4946 if (current_link_up) {
4947 if (tp->link_config.active_speed == SPEED_100 ||
4948 tp->link_config.active_speed == SPEED_10)
4949 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4951 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4952 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4953 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4955 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4957 /* In order for the 5750 core in BCM4785 chip to work properly
4958 * in RGMII mode, the Led Control Register must be set up.
4960 if (tg3_flag(tp, RGMII_MODE)) {
4961 u32 led_ctrl = tr32(MAC_LED_CTRL);
4962 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4964 if (tp->link_config.active_speed == SPEED_10)
4965 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4966 else if (tp->link_config.active_speed == SPEED_100)
4967 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4968 LED_CTRL_100MBPS_ON);
4969 else if (tp->link_config.active_speed == SPEED_1000)
4970 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4971 LED_CTRL_1000MBPS_ON);
4973 tw32(MAC_LED_CTRL, led_ctrl);
4977 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4978 if (tp->link_config.active_duplex == DUPLEX_HALF)
4979 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4981 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4982 if (current_link_up &&
4983 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4984 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4986 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4989 /* ??? Without this setting Netgear GA302T PHY does not
4990 * ??? send/receive packets...
4992 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4993 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4994 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4995 tw32_f(MAC_MI_MODE, tp->mi_mode);
4999 tw32_f(MAC_MODE, tp->mac_mode);
5002 tg3_phy_eee_adjust(tp, current_link_up);
5004 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5005 /* Polled via timer. */
5006 tw32_f(MAC_EVENT, 0);
5008 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5012 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5014 tp->link_config.active_speed == SPEED_1000 &&
5015 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5018 (MAC_STATUS_SYNC_CHANGED |
5019 MAC_STATUS_CFG_CHANGED));
5022 NIC_SRAM_FIRMWARE_MBOX,
5023 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5026 /* Prevent send BD corruption. */
5027 if (tg3_flag(tp, CLKREQ_BUG)) {
5028 if (tp->link_config.active_speed == SPEED_100 ||
5029 tp->link_config.active_speed == SPEED_10)
5030 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5031 PCI_EXP_LNKCTL_CLKREQ_EN);
5033 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5034 PCI_EXP_LNKCTL_CLKREQ_EN);
5037 tg3_test_and_report_link_chg(tp, current_link_up);
5042 struct tg3_fiber_aneginfo {
5044 #define ANEG_STATE_UNKNOWN 0
5045 #define ANEG_STATE_AN_ENABLE 1
5046 #define ANEG_STATE_RESTART_INIT 2
5047 #define ANEG_STATE_RESTART 3
5048 #define ANEG_STATE_DISABLE_LINK_OK 4
5049 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5050 #define ANEG_STATE_ABILITY_DETECT 6
5051 #define ANEG_STATE_ACK_DETECT_INIT 7
5052 #define ANEG_STATE_ACK_DETECT 8
5053 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5054 #define ANEG_STATE_COMPLETE_ACK 10
5055 #define ANEG_STATE_IDLE_DETECT_INIT 11
5056 #define ANEG_STATE_IDLE_DETECT 12
5057 #define ANEG_STATE_LINK_OK 13
5058 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5059 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5062 #define MR_AN_ENABLE 0x00000001
5063 #define MR_RESTART_AN 0x00000002
5064 #define MR_AN_COMPLETE 0x00000004
5065 #define MR_PAGE_RX 0x00000008
5066 #define MR_NP_LOADED 0x00000010
5067 #define MR_TOGGLE_TX 0x00000020
5068 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5069 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5070 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5071 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5072 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5073 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5074 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5075 #define MR_TOGGLE_RX 0x00002000
5076 #define MR_NP_RX 0x00004000
5078 #define MR_LINK_OK 0x80000000
5080 unsigned long link_time, cur_time;
5082 u32 ability_match_cfg;
5083 int ability_match_count;
5085 char ability_match, idle_match, ack_match;
5087 u32 txconfig, rxconfig;
5088 #define ANEG_CFG_NP 0x00000080
5089 #define ANEG_CFG_ACK 0x00000040
5090 #define ANEG_CFG_RF2 0x00000020
5091 #define ANEG_CFG_RF1 0x00000010
5092 #define ANEG_CFG_PS2 0x00000001
5093 #define ANEG_CFG_PS1 0x00008000
5094 #define ANEG_CFG_HD 0x00004000
5095 #define ANEG_CFG_FD 0x00002000
5096 #define ANEG_CFG_INVAL 0x00001f06
5101 #define ANEG_TIMER_ENAB 2
5102 #define ANEG_FAILED -1
5104 #define ANEG_STATE_SETTLE_TIME 10000
5106 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5107 struct tg3_fiber_aneginfo *ap)
5110 unsigned long delta;
5114 if (ap->state == ANEG_STATE_UNKNOWN) {
5118 ap->ability_match_cfg = 0;
5119 ap->ability_match_count = 0;
5120 ap->ability_match = 0;
5126 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5127 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5129 if (rx_cfg_reg != ap->ability_match_cfg) {
5130 ap->ability_match_cfg = rx_cfg_reg;
5131 ap->ability_match = 0;
5132 ap->ability_match_count = 0;
5134 if (++ap->ability_match_count > 1) {
5135 ap->ability_match = 1;
5136 ap->ability_match_cfg = rx_cfg_reg;
5139 if (rx_cfg_reg & ANEG_CFG_ACK)
5147 ap->ability_match_cfg = 0;
5148 ap->ability_match_count = 0;
5149 ap->ability_match = 0;
5155 ap->rxconfig = rx_cfg_reg;
5158 switch (ap->state) {
5159 case ANEG_STATE_UNKNOWN:
5160 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5161 ap->state = ANEG_STATE_AN_ENABLE;
5164 case ANEG_STATE_AN_ENABLE:
5165 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5166 if (ap->flags & MR_AN_ENABLE) {
5169 ap->ability_match_cfg = 0;
5170 ap->ability_match_count = 0;
5171 ap->ability_match = 0;
5175 ap->state = ANEG_STATE_RESTART_INIT;
5177 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5181 case ANEG_STATE_RESTART_INIT:
5182 ap->link_time = ap->cur_time;
5183 ap->flags &= ~(MR_NP_LOADED);
5185 tw32(MAC_TX_AUTO_NEG, 0);
5186 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5187 tw32_f(MAC_MODE, tp->mac_mode);
5190 ret = ANEG_TIMER_ENAB;
5191 ap->state = ANEG_STATE_RESTART;
5194 case ANEG_STATE_RESTART:
5195 delta = ap->cur_time - ap->link_time;
5196 if (delta > ANEG_STATE_SETTLE_TIME)
5197 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5199 ret = ANEG_TIMER_ENAB;
5202 case ANEG_STATE_DISABLE_LINK_OK:
5206 case ANEG_STATE_ABILITY_DETECT_INIT:
5207 ap->flags &= ~(MR_TOGGLE_TX);
5208 ap->txconfig = ANEG_CFG_FD;
5209 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5210 if (flowctrl & ADVERTISE_1000XPAUSE)
5211 ap->txconfig |= ANEG_CFG_PS1;
5212 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5213 ap->txconfig |= ANEG_CFG_PS2;
5214 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5215 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5216 tw32_f(MAC_MODE, tp->mac_mode);
5219 ap->state = ANEG_STATE_ABILITY_DETECT;
5222 case ANEG_STATE_ABILITY_DETECT:
5223 if (ap->ability_match != 0 && ap->rxconfig != 0)
5224 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5227 case ANEG_STATE_ACK_DETECT_INIT:
5228 ap->txconfig |= ANEG_CFG_ACK;
5229 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5230 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5231 tw32_f(MAC_MODE, tp->mac_mode);
5234 ap->state = ANEG_STATE_ACK_DETECT;
5237 case ANEG_STATE_ACK_DETECT:
5238 if (ap->ack_match != 0) {
5239 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5240 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5241 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5243 ap->state = ANEG_STATE_AN_ENABLE;
5245 } else if (ap->ability_match != 0 &&
5246 ap->rxconfig == 0) {
5247 ap->state = ANEG_STATE_AN_ENABLE;
5251 case ANEG_STATE_COMPLETE_ACK_INIT:
5252 if (ap->rxconfig & ANEG_CFG_INVAL) {
5256 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5257 MR_LP_ADV_HALF_DUPLEX |
5258 MR_LP_ADV_SYM_PAUSE |
5259 MR_LP_ADV_ASYM_PAUSE |
5260 MR_LP_ADV_REMOTE_FAULT1 |
5261 MR_LP_ADV_REMOTE_FAULT2 |
5262 MR_LP_ADV_NEXT_PAGE |
5265 if (ap->rxconfig & ANEG_CFG_FD)
5266 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5267 if (ap->rxconfig & ANEG_CFG_HD)
5268 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5269 if (ap->rxconfig & ANEG_CFG_PS1)
5270 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5271 if (ap->rxconfig & ANEG_CFG_PS2)
5272 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5273 if (ap->rxconfig & ANEG_CFG_RF1)
5274 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5275 if (ap->rxconfig & ANEG_CFG_RF2)
5276 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5277 if (ap->rxconfig & ANEG_CFG_NP)
5278 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5280 ap->link_time = ap->cur_time;
5282 ap->flags ^= (MR_TOGGLE_TX);
5283 if (ap->rxconfig & 0x0008)
5284 ap->flags |= MR_TOGGLE_RX;
5285 if (ap->rxconfig & ANEG_CFG_NP)
5286 ap->flags |= MR_NP_RX;
5287 ap->flags |= MR_PAGE_RX;
5289 ap->state = ANEG_STATE_COMPLETE_ACK;
5290 ret = ANEG_TIMER_ENAB;
5293 case ANEG_STATE_COMPLETE_ACK:
5294 if (ap->ability_match != 0 &&
5295 ap->rxconfig == 0) {
5296 ap->state = ANEG_STATE_AN_ENABLE;
5299 delta = ap->cur_time - ap->link_time;
5300 if (delta > ANEG_STATE_SETTLE_TIME) {
5301 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5302 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5304 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5305 !(ap->flags & MR_NP_RX)) {
5306 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5314 case ANEG_STATE_IDLE_DETECT_INIT:
5315 ap->link_time = ap->cur_time;
5316 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5317 tw32_f(MAC_MODE, tp->mac_mode);
5320 ap->state = ANEG_STATE_IDLE_DETECT;
5321 ret = ANEG_TIMER_ENAB;
5324 case ANEG_STATE_IDLE_DETECT:
5325 if (ap->ability_match != 0 &&
5326 ap->rxconfig == 0) {
5327 ap->state = ANEG_STATE_AN_ENABLE;
5330 delta = ap->cur_time - ap->link_time;
5331 if (delta > ANEG_STATE_SETTLE_TIME) {
5332 /* XXX another gem from the Broadcom driver :( */
5333 ap->state = ANEG_STATE_LINK_OK;
5337 case ANEG_STATE_LINK_OK:
5338 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5342 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5343 /* ??? unimplemented */
5346 case ANEG_STATE_NEXT_PAGE_WAIT:
5347 /* ??? unimplemented */
5358 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5361 struct tg3_fiber_aneginfo aninfo;
5362 int status = ANEG_FAILED;
5366 tw32_f(MAC_TX_AUTO_NEG, 0);
5368 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5369 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5372 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5375 memset(&aninfo, 0, sizeof(aninfo));
5376 aninfo.flags |= MR_AN_ENABLE;
5377 aninfo.state = ANEG_STATE_UNKNOWN;
5378 aninfo.cur_time = 0;
5380 while (++tick < 195000) {
5381 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5382 if (status == ANEG_DONE || status == ANEG_FAILED)
5388 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5389 tw32_f(MAC_MODE, tp->mac_mode);
5392 *txflags = aninfo.txconfig;
5393 *rxflags = aninfo.flags;
5395 if (status == ANEG_DONE &&
5396 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5397 MR_LP_ADV_FULL_DUPLEX)))
5403 static void tg3_init_bcm8002(struct tg3 *tp)
5405 u32 mac_status = tr32(MAC_STATUS);
5408 /* Reset when initting first time or we have a link. */
5409 if (tg3_flag(tp, INIT_COMPLETE) &&
5410 !(mac_status & MAC_STATUS_PCS_SYNCED))
5413 /* Set PLL lock range. */
5414 tg3_writephy(tp, 0x16, 0x8007);
5417 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5419 /* Wait for reset to complete. */
5420 /* XXX schedule_timeout() ... */
5421 for (i = 0; i < 500; i++)
5424 /* Config mode; select PMA/Ch 1 regs. */
5425 tg3_writephy(tp, 0x10, 0x8411);
5427 /* Enable auto-lock and comdet, select txclk for tx. */
5428 tg3_writephy(tp, 0x11, 0x0a10);
5430 tg3_writephy(tp, 0x18, 0x00a0);
5431 tg3_writephy(tp, 0x16, 0x41ff);
5433 /* Assert and deassert POR. */
5434 tg3_writephy(tp, 0x13, 0x0400);
5436 tg3_writephy(tp, 0x13, 0x0000);
5438 tg3_writephy(tp, 0x11, 0x0a50);
5440 tg3_writephy(tp, 0x11, 0x0a10);
5442 /* Wait for signal to stabilize */
5443 /* XXX schedule_timeout() ... */
5444 for (i = 0; i < 15000; i++)
5447 /* Deselect the channel register so we can read the PHYID
5450 tg3_writephy(tp, 0x10, 0x8011);
5453 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5456 bool current_link_up;
5457 u32 sg_dig_ctrl, sg_dig_status;
5458 u32 serdes_cfg, expected_sg_dig_ctrl;
5459 int workaround, port_a;
5462 expected_sg_dig_ctrl = 0;
5465 current_link_up = false;
5467 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5468 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5470 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5473 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5474 /* preserve bits 20-23 for voltage regulator */
5475 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5478 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5480 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5481 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5483 u32 val = serdes_cfg;
5489 tw32_f(MAC_SERDES_CFG, val);
5492 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5494 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5495 tg3_setup_flow_control(tp, 0, 0);
5496 current_link_up = true;
5501 /* Want auto-negotiation. */
5502 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5504 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5505 if (flowctrl & ADVERTISE_1000XPAUSE)
5506 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5507 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5508 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5510 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5511 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5512 tp->serdes_counter &&
5513 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5514 MAC_STATUS_RCVD_CFG)) ==
5515 MAC_STATUS_PCS_SYNCED)) {
5516 tp->serdes_counter--;
5517 current_link_up = true;
5522 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5523 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5525 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5527 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5528 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5529 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5530 MAC_STATUS_SIGNAL_DET)) {
5531 sg_dig_status = tr32(SG_DIG_STATUS);
5532 mac_status = tr32(MAC_STATUS);
5534 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5535 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5536 u32 local_adv = 0, remote_adv = 0;
5538 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5539 local_adv |= ADVERTISE_1000XPAUSE;
5540 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5541 local_adv |= ADVERTISE_1000XPSE_ASYM;
5543 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5544 remote_adv |= LPA_1000XPAUSE;
5545 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5546 remote_adv |= LPA_1000XPAUSE_ASYM;
5548 tp->link_config.rmt_adv =
5549 mii_adv_to_ethtool_adv_x(remote_adv);
5551 tg3_setup_flow_control(tp, local_adv, remote_adv);
5552 current_link_up = true;
5553 tp->serdes_counter = 0;
5554 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5555 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5556 if (tp->serdes_counter)
5557 tp->serdes_counter--;
5560 u32 val = serdes_cfg;
5567 tw32_f(MAC_SERDES_CFG, val);
5570 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5573 /* Link parallel detection - link is up */
5574 /* only if we have PCS_SYNC and not */
5575 /* receiving config code words */
5576 mac_status = tr32(MAC_STATUS);
5577 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5578 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5579 tg3_setup_flow_control(tp, 0, 0);
5580 current_link_up = true;
5582 TG3_PHYFLG_PARALLEL_DETECT;
5583 tp->serdes_counter =
5584 SERDES_PARALLEL_DET_TIMEOUT;
5586 goto restart_autoneg;
5590 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5591 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5595 return current_link_up;
5598 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5600 bool current_link_up = false;
5602 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5605 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5606 u32 txflags, rxflags;
5609 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5610 u32 local_adv = 0, remote_adv = 0;
5612 if (txflags & ANEG_CFG_PS1)
5613 local_adv |= ADVERTISE_1000XPAUSE;
5614 if (txflags & ANEG_CFG_PS2)
5615 local_adv |= ADVERTISE_1000XPSE_ASYM;
5617 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5618 remote_adv |= LPA_1000XPAUSE;
5619 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5620 remote_adv |= LPA_1000XPAUSE_ASYM;
5622 tp->link_config.rmt_adv =
5623 mii_adv_to_ethtool_adv_x(remote_adv);
5625 tg3_setup_flow_control(tp, local_adv, remote_adv);
5627 current_link_up = true;
5629 for (i = 0; i < 30; i++) {
5632 (MAC_STATUS_SYNC_CHANGED |
5633 MAC_STATUS_CFG_CHANGED));
5635 if ((tr32(MAC_STATUS) &
5636 (MAC_STATUS_SYNC_CHANGED |
5637 MAC_STATUS_CFG_CHANGED)) == 0)
5641 mac_status = tr32(MAC_STATUS);
5642 if (!current_link_up &&
5643 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5644 !(mac_status & MAC_STATUS_RCVD_CFG))
5645 current_link_up = true;
5647 tg3_setup_flow_control(tp, 0, 0);
5649 /* Forcing 1000FD link up. */
5650 current_link_up = true;
5652 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5655 tw32_f(MAC_MODE, tp->mac_mode);
5660 return current_link_up;
5663 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5666 u16 orig_active_speed;
5667 u8 orig_active_duplex;
5669 bool current_link_up;
5672 orig_pause_cfg = tp->link_config.active_flowctrl;
5673 orig_active_speed = tp->link_config.active_speed;
5674 orig_active_duplex = tp->link_config.active_duplex;
5676 if (!tg3_flag(tp, HW_AUTONEG) &&
5678 tg3_flag(tp, INIT_COMPLETE)) {
5679 mac_status = tr32(MAC_STATUS);
5680 mac_status &= (MAC_STATUS_PCS_SYNCED |
5681 MAC_STATUS_SIGNAL_DET |
5682 MAC_STATUS_CFG_CHANGED |
5683 MAC_STATUS_RCVD_CFG);
5684 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5685 MAC_STATUS_SIGNAL_DET)) {
5686 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5687 MAC_STATUS_CFG_CHANGED));
5692 tw32_f(MAC_TX_AUTO_NEG, 0);
5694 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5695 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5696 tw32_f(MAC_MODE, tp->mac_mode);
5699 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5700 tg3_init_bcm8002(tp);
5702 /* Enable link change event even when serdes polling. */
5703 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5706 current_link_up = false;
5707 tp->link_config.rmt_adv = 0;
5708 mac_status = tr32(MAC_STATUS);
5710 if (tg3_flag(tp, HW_AUTONEG))
5711 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5713 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5715 tp->napi[0].hw_status->status =
5716 (SD_STATUS_UPDATED |
5717 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5719 for (i = 0; i < 100; i++) {
5720 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5721 MAC_STATUS_CFG_CHANGED));
5723 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5724 MAC_STATUS_CFG_CHANGED |
5725 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5729 mac_status = tr32(MAC_STATUS);
5730 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5731 current_link_up = false;
5732 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5733 tp->serdes_counter == 0) {
5734 tw32_f(MAC_MODE, (tp->mac_mode |
5735 MAC_MODE_SEND_CONFIGS));
5737 tw32_f(MAC_MODE, tp->mac_mode);
5741 if (current_link_up) {
5742 tp->link_config.active_speed = SPEED_1000;
5743 tp->link_config.active_duplex = DUPLEX_FULL;
5744 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5745 LED_CTRL_LNKLED_OVERRIDE |
5746 LED_CTRL_1000MBPS_ON));
5748 tp->link_config.active_speed = SPEED_UNKNOWN;
5749 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5750 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5751 LED_CTRL_LNKLED_OVERRIDE |
5752 LED_CTRL_TRAFFIC_OVERRIDE));
5755 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5756 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5757 if (orig_pause_cfg != now_pause_cfg ||
5758 orig_active_speed != tp->link_config.active_speed ||
5759 orig_active_duplex != tp->link_config.active_duplex)
5760 tg3_link_report(tp);
5766 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5770 u16 current_speed = SPEED_UNKNOWN;
5771 u8 current_duplex = DUPLEX_UNKNOWN;
5772 bool current_link_up = false;
5773 u32 local_adv, remote_adv, sgsr;
5775 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5776 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5777 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5778 (sgsr & SERDES_TG3_SGMII_MODE)) {
5783 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5785 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5786 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5788 current_link_up = true;
5789 if (sgsr & SERDES_TG3_SPEED_1000) {
5790 current_speed = SPEED_1000;
5791 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5792 } else if (sgsr & SERDES_TG3_SPEED_100) {
5793 current_speed = SPEED_100;
5794 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5796 current_speed = SPEED_10;
5797 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5800 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5801 current_duplex = DUPLEX_FULL;
5803 current_duplex = DUPLEX_HALF;
5806 tw32_f(MAC_MODE, tp->mac_mode);
5809 tg3_clear_mac_status(tp);
5811 goto fiber_setup_done;
5814 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5815 tw32_f(MAC_MODE, tp->mac_mode);
5818 tg3_clear_mac_status(tp);
5823 tp->link_config.rmt_adv = 0;
5825 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5826 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5827 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5828 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5829 bmsr |= BMSR_LSTATUS;
5831 bmsr &= ~BMSR_LSTATUS;
5834 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5836 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5837 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5838 /* do nothing, just check for link up at the end */
5839 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5842 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5843 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5844 ADVERTISE_1000XPAUSE |
5845 ADVERTISE_1000XPSE_ASYM |
5848 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5849 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5851 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5852 tg3_writephy(tp, MII_ADVERTISE, newadv);
5853 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5854 tg3_writephy(tp, MII_BMCR, bmcr);
5856 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5857 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5858 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5865 bmcr &= ~BMCR_SPEED1000;
5866 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5868 if (tp->link_config.duplex == DUPLEX_FULL)
5869 new_bmcr |= BMCR_FULLDPLX;
5871 if (new_bmcr != bmcr) {
5872 /* BMCR_SPEED1000 is a reserved bit that needs
5873 * to be set on write.
5875 new_bmcr |= BMCR_SPEED1000;
5877 /* Force a linkdown */
5881 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5882 adv &= ~(ADVERTISE_1000XFULL |
5883 ADVERTISE_1000XHALF |
5885 tg3_writephy(tp, MII_ADVERTISE, adv);
5886 tg3_writephy(tp, MII_BMCR, bmcr |
5890 tg3_carrier_off(tp);
5892 tg3_writephy(tp, MII_BMCR, new_bmcr);
5894 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5895 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5896 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5897 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5898 bmsr |= BMSR_LSTATUS;
5900 bmsr &= ~BMSR_LSTATUS;
5902 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5906 if (bmsr & BMSR_LSTATUS) {
5907 current_speed = SPEED_1000;
5908 current_link_up = true;
5909 if (bmcr & BMCR_FULLDPLX)
5910 current_duplex = DUPLEX_FULL;
5912 current_duplex = DUPLEX_HALF;
5917 if (bmcr & BMCR_ANENABLE) {
5920 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5921 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5922 common = local_adv & remote_adv;
5923 if (common & (ADVERTISE_1000XHALF |
5924 ADVERTISE_1000XFULL)) {
5925 if (common & ADVERTISE_1000XFULL)
5926 current_duplex = DUPLEX_FULL;
5928 current_duplex = DUPLEX_HALF;
5930 tp->link_config.rmt_adv =
5931 mii_adv_to_ethtool_adv_x(remote_adv);
5932 } else if (!tg3_flag(tp, 5780_CLASS)) {
5933 /* Link is up via parallel detect */
5935 current_link_up = false;
5941 if (current_link_up && current_duplex == DUPLEX_FULL)
5942 tg3_setup_flow_control(tp, local_adv, remote_adv);
5944 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5945 if (tp->link_config.active_duplex == DUPLEX_HALF)
5946 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5948 tw32_f(MAC_MODE, tp->mac_mode);
5951 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5953 tp->link_config.active_speed = current_speed;
5954 tp->link_config.active_duplex = current_duplex;
5956 tg3_test_and_report_link_chg(tp, current_link_up);
5960 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5962 if (tp->serdes_counter) {
5963 /* Give autoneg time to complete. */
5964 tp->serdes_counter--;
5969 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5972 tg3_readphy(tp, MII_BMCR, &bmcr);
5973 if (bmcr & BMCR_ANENABLE) {
5976 /* Select shadow register 0x1f */
5977 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5978 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5980 /* Select expansion interrupt status register */
5981 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5982 MII_TG3_DSP_EXP1_INT_STAT);
5983 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5984 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5986 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5987 /* We have signal detect and not receiving
5988 * config code words, link is up by parallel
5992 bmcr &= ~BMCR_ANENABLE;
5993 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5994 tg3_writephy(tp, MII_BMCR, bmcr);
5995 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5998 } else if (tp->link_up &&
5999 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6000 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6003 /* Select expansion interrupt status register */
6004 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6005 MII_TG3_DSP_EXP1_INT_STAT);
6006 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6010 /* Config code words received, turn on autoneg. */
6011 tg3_readphy(tp, MII_BMCR, &bmcr);
6012 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6014 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6020 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6025 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6026 err = tg3_setup_fiber_phy(tp, force_reset);
6027 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6028 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6030 err = tg3_setup_copper_phy(tp, force_reset);
6032 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6035 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6036 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6038 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6043 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6044 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6045 tw32(GRC_MISC_CFG, val);
6048 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6049 (6 << TX_LENGTHS_IPG_SHIFT);
6050 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6051 tg3_asic_rev(tp) == ASIC_REV_5762)
6052 val |= tr32(MAC_TX_LENGTHS) &
6053 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6054 TX_LENGTHS_CNT_DWN_VAL_MSK);
6056 if (tp->link_config.active_speed == SPEED_1000 &&
6057 tp->link_config.active_duplex == DUPLEX_HALF)
6058 tw32(MAC_TX_LENGTHS, val |
6059 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6061 tw32(MAC_TX_LENGTHS, val |
6062 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6064 if (!tg3_flag(tp, 5705_PLUS)) {
6066 tw32(HOSTCC_STAT_COAL_TICKS,
6067 tp->coal.stats_block_coalesce_usecs);
6069 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6073 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6074 val = tr32(PCIE_PWR_MGMT_THRESH);
6076 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6079 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6080 tw32(PCIE_PWR_MGMT_THRESH, val);
6086 /* tp->lock must be held */
6087 static u64 tg3_refclk_read(struct tg3 *tp)
6089 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6090 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6093 /* tp->lock must be held */
6094 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6096 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6098 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6099 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6100 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6101 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6104 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6105 static inline void tg3_full_unlock(struct tg3 *tp);
6106 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6108 struct tg3 *tp = netdev_priv(dev);
6110 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6111 SOF_TIMESTAMPING_RX_SOFTWARE |
6112 SOF_TIMESTAMPING_SOFTWARE;
6114 if (tg3_flag(tp, PTP_CAPABLE)) {
6115 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6116 SOF_TIMESTAMPING_RX_HARDWARE |
6117 SOF_TIMESTAMPING_RAW_HARDWARE;
6121 info->phc_index = ptp_clock_index(tp->ptp_clock);
6123 info->phc_index = -1;
6125 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6127 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6128 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6129 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6130 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6134 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6136 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6137 bool neg_adj = false;
6145 /* Frequency adjustment is performed using hardware with a 24 bit
6146 * accumulator and a programmable correction value. On each clk, the
6147 * correction value gets added to the accumulator and when it
6148 * overflows, the time counter is incremented/decremented.
6150 * So conversion from ppb to correction value is
6151 * ppb * (1 << 24) / 1000000000
6153 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6154 TG3_EAV_REF_CLK_CORRECT_MASK;
6156 tg3_full_lock(tp, 0);
6159 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6160 TG3_EAV_REF_CLK_CORRECT_EN |
6161 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6163 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6165 tg3_full_unlock(tp);
6170 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6172 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6174 tg3_full_lock(tp, 0);
6175 tp->ptp_adjust += delta;
6176 tg3_full_unlock(tp);
6181 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6185 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6187 tg3_full_lock(tp, 0);
6188 ns = tg3_refclk_read(tp);
6189 ns += tp->ptp_adjust;
6190 tg3_full_unlock(tp);
6192 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6193 ts->tv_nsec = remainder;
6198 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6199 const struct timespec *ts)
6202 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6204 ns = timespec_to_ns(ts);
6206 tg3_full_lock(tp, 0);
6207 tg3_refclk_write(tp, ns);
6209 tg3_full_unlock(tp);
6214 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6215 struct ptp_clock_request *rq, int on)
6217 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222 case PTP_CLK_REQ_PEROUT:
6223 if (rq->perout.index != 0)
6226 tg3_full_lock(tp, 0);
6227 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6228 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6233 nsec = rq->perout.start.sec * 1000000000ULL +
6234 rq->perout.start.nsec;
6236 if (rq->perout.period.sec || rq->perout.period.nsec) {
6237 netdev_warn(tp->dev,
6238 "Device supports only a one-shot timesync output, period must be 0\n");
6243 if (nsec & (1ULL << 63)) {
6244 netdev_warn(tp->dev,
6245 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6250 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6251 tw32(TG3_EAV_WATCHDOG0_MSB,
6252 TG3_EAV_WATCHDOG0_EN |
6253 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6255 tw32(TG3_EAV_REF_CLCK_CTL,
6256 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6258 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6259 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6263 tg3_full_unlock(tp);
6273 static const struct ptp_clock_info tg3_ptp_caps = {
6274 .owner = THIS_MODULE,
6275 .name = "tg3 clock",
6276 .max_adj = 250000000,
6281 .adjfreq = tg3_ptp_adjfreq,
6282 .adjtime = tg3_ptp_adjtime,
6283 .gettime = tg3_ptp_gettime,
6284 .settime = tg3_ptp_settime,
6285 .enable = tg3_ptp_enable,
6288 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6289 struct skb_shared_hwtstamps *timestamp)
6291 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6292 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6296 /* tp->lock must be held */
6297 static void tg3_ptp_init(struct tg3 *tp)
6299 if (!tg3_flag(tp, PTP_CAPABLE))
6302 /* Initialize the hardware clock to the system time. */
6303 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6305 tp->ptp_info = tg3_ptp_caps;
6308 /* tp->lock must be held */
6309 static void tg3_ptp_resume(struct tg3 *tp)
6311 if (!tg3_flag(tp, PTP_CAPABLE))
6314 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6318 static void tg3_ptp_fini(struct tg3 *tp)
6320 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6323 ptp_clock_unregister(tp->ptp_clock);
6324 tp->ptp_clock = NULL;
6328 static inline int tg3_irq_sync(struct tg3 *tp)
6330 return tp->irq_sync;
6333 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6337 dst = (u32 *)((u8 *)dst + off);
6338 for (i = 0; i < len; i += sizeof(u32))
6339 *dst++ = tr32(off + i);
6342 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6344 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6345 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6346 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6347 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6348 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6349 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6350 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6351 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6352 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6353 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6354 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6355 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6356 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6357 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6358 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6359 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6360 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6361 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6362 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6364 if (tg3_flag(tp, SUPPORT_MSIX))
6365 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6367 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6368 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6369 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6370 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6371 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6372 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6373 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6374 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6376 if (!tg3_flag(tp, 5705_PLUS)) {
6377 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6378 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6379 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6382 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6383 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6384 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6385 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6386 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6388 if (tg3_flag(tp, NVRAM))
6389 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6392 static void tg3_dump_state(struct tg3 *tp)
6397 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6401 if (tg3_flag(tp, PCI_EXPRESS)) {
6402 /* Read up to but not including private PCI registers */
6403 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6404 regs[i / sizeof(u32)] = tr32(i);
6406 tg3_dump_legacy_regs(tp, regs);
6408 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6409 if (!regs[i + 0] && !regs[i + 1] &&
6410 !regs[i + 2] && !regs[i + 3])
6413 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6415 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6420 for (i = 0; i < tp->irq_cnt; i++) {
6421 struct tg3_napi *tnapi = &tp->napi[i];
6423 /* SW status block */
6425 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6427 tnapi->hw_status->status,
6428 tnapi->hw_status->status_tag,
6429 tnapi->hw_status->rx_jumbo_consumer,
6430 tnapi->hw_status->rx_consumer,
6431 tnapi->hw_status->rx_mini_consumer,
6432 tnapi->hw_status->idx[0].rx_producer,
6433 tnapi->hw_status->idx[0].tx_consumer);
6436 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6438 tnapi->last_tag, tnapi->last_irq_tag,
6439 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6441 tnapi->prodring.rx_std_prod_idx,
6442 tnapi->prodring.rx_std_cons_idx,
6443 tnapi->prodring.rx_jmb_prod_idx,
6444 tnapi->prodring.rx_jmb_cons_idx);
6448 /* This is called whenever we suspect that the system chipset is re-
6449 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6450 * is bogus tx completions. We try to recover by setting the
6451 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6454 static void tg3_tx_recover(struct tg3 *tp)
6456 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6457 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6459 netdev_warn(tp->dev,
6460 "The system may be re-ordering memory-mapped I/O "
6461 "cycles to the network device, attempting to recover. "
6462 "Please report the problem to the driver maintainer "
6463 "and include system chipset information.\n");
6465 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6468 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6470 /* Tell compiler to fetch tx indices from memory. */
6472 return tnapi->tx_pending -
6473 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6476 /* Tigon3 never reports partial packet sends. So we do not
6477 * need special logic to handle SKBs that have not had all
6478 * of their frags sent yet, like SunGEM does.
6480 static void tg3_tx(struct tg3_napi *tnapi)
6482 struct tg3 *tp = tnapi->tp;
6483 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6484 u32 sw_idx = tnapi->tx_cons;
6485 struct netdev_queue *txq;
6486 int index = tnapi - tp->napi;
6487 unsigned int pkts_compl = 0, bytes_compl = 0;
6489 if (tg3_flag(tp, ENABLE_TSS))
6492 txq = netdev_get_tx_queue(tp->dev, index);
6494 while (sw_idx != hw_idx) {
6495 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6496 struct sk_buff *skb = ri->skb;
6499 if (unlikely(skb == NULL)) {
6504 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6505 struct skb_shared_hwtstamps timestamp;
6506 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6507 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6509 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6511 skb_tstamp_tx(skb, ×tamp);
6514 pci_unmap_single(tp->pdev,
6515 dma_unmap_addr(ri, mapping),
6521 while (ri->fragmented) {
6522 ri->fragmented = false;
6523 sw_idx = NEXT_TX(sw_idx);
6524 ri = &tnapi->tx_buffers[sw_idx];
6527 sw_idx = NEXT_TX(sw_idx);
6529 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6530 ri = &tnapi->tx_buffers[sw_idx];
6531 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6534 pci_unmap_page(tp->pdev,
6535 dma_unmap_addr(ri, mapping),
6536 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6539 while (ri->fragmented) {
6540 ri->fragmented = false;
6541 sw_idx = NEXT_TX(sw_idx);
6542 ri = &tnapi->tx_buffers[sw_idx];
6545 sw_idx = NEXT_TX(sw_idx);
6549 bytes_compl += skb->len;
6553 if (unlikely(tx_bug)) {
6559 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6561 tnapi->tx_cons = sw_idx;
6563 /* Need to make the tx_cons update visible to tg3_start_xmit()
6564 * before checking for netif_queue_stopped(). Without the
6565 * memory barrier, there is a small possibility that tg3_start_xmit()
6566 * will miss it and cause the queue to be stopped forever.
6570 if (unlikely(netif_tx_queue_stopped(txq) &&
6571 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6572 __netif_tx_lock(txq, smp_processor_id());
6573 if (netif_tx_queue_stopped(txq) &&
6574 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6575 netif_tx_wake_queue(txq);
6576 __netif_tx_unlock(txq);
6580 static void tg3_frag_free(bool is_frag, void *data)
6583 put_page(virt_to_head_page(data));
6588 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6590 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6591 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6596 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6597 map_sz, PCI_DMA_FROMDEVICE);
6598 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6603 /* Returns size of skb allocated or < 0 on error.
6605 * We only need to fill in the address because the other members
6606 * of the RX descriptor are invariant, see tg3_init_rings.
6608 * Note the purposeful assymetry of cpu vs. chip accesses. For
6609 * posting buffers we only dirty the first cache line of the RX
6610 * descriptor (containing the address). Whereas for the RX status
6611 * buffers the cpu only reads the last cacheline of the RX descriptor
6612 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6614 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6615 u32 opaque_key, u32 dest_idx_unmasked,
6616 unsigned int *frag_size)
6618 struct tg3_rx_buffer_desc *desc;
6619 struct ring_info *map;
6622 int skb_size, data_size, dest_idx;
6624 switch (opaque_key) {
6625 case RXD_OPAQUE_RING_STD:
6626 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6627 desc = &tpr->rx_std[dest_idx];
6628 map = &tpr->rx_std_buffers[dest_idx];
6629 data_size = tp->rx_pkt_map_sz;
6632 case RXD_OPAQUE_RING_JUMBO:
6633 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6634 desc = &tpr->rx_jmb[dest_idx].std;
6635 map = &tpr->rx_jmb_buffers[dest_idx];
6636 data_size = TG3_RX_JMB_MAP_SZ;
6643 /* Do not overwrite any of the map or rp information
6644 * until we are sure we can commit to a new buffer.
6646 * Callers depend upon this behavior and assume that
6647 * we leave everything unchanged if we fail.
6649 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6650 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6651 if (skb_size <= PAGE_SIZE) {
6652 data = netdev_alloc_frag(skb_size);
6653 *frag_size = skb_size;
6655 data = kmalloc(skb_size, GFP_ATOMIC);
6661 mapping = pci_map_single(tp->pdev,
6662 data + TG3_RX_OFFSET(tp),
6664 PCI_DMA_FROMDEVICE);
6665 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6666 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6671 dma_unmap_addr_set(map, mapping, mapping);
6673 desc->addr_hi = ((u64)mapping >> 32);
6674 desc->addr_lo = ((u64)mapping & 0xffffffff);
6679 /* We only need to move over in the address because the other
6680 * members of the RX descriptor are invariant. See notes above
6681 * tg3_alloc_rx_data for full details.
6683 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6684 struct tg3_rx_prodring_set *dpr,
6685 u32 opaque_key, int src_idx,
6686 u32 dest_idx_unmasked)
6688 struct tg3 *tp = tnapi->tp;
6689 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6690 struct ring_info *src_map, *dest_map;
6691 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6694 switch (opaque_key) {
6695 case RXD_OPAQUE_RING_STD:
6696 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6697 dest_desc = &dpr->rx_std[dest_idx];
6698 dest_map = &dpr->rx_std_buffers[dest_idx];
6699 src_desc = &spr->rx_std[src_idx];
6700 src_map = &spr->rx_std_buffers[src_idx];
6703 case RXD_OPAQUE_RING_JUMBO:
6704 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6705 dest_desc = &dpr->rx_jmb[dest_idx].std;
6706 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6707 src_desc = &spr->rx_jmb[src_idx].std;
6708 src_map = &spr->rx_jmb_buffers[src_idx];
6715 dest_map->data = src_map->data;
6716 dma_unmap_addr_set(dest_map, mapping,
6717 dma_unmap_addr(src_map, mapping));
6718 dest_desc->addr_hi = src_desc->addr_hi;
6719 dest_desc->addr_lo = src_desc->addr_lo;
6721 /* Ensure that the update to the skb happens after the physical
6722 * addresses have been transferred to the new BD location.
6726 src_map->data = NULL;
6729 /* The RX ring scheme is composed of multiple rings which post fresh
6730 * buffers to the chip, and one special ring the chip uses to report
6731 * status back to the host.
6733 * The special ring reports the status of received packets to the
6734 * host. The chip does not write into the original descriptor the
6735 * RX buffer was obtained from. The chip simply takes the original
6736 * descriptor as provided by the host, updates the status and length
6737 * field, then writes this into the next status ring entry.
6739 * Each ring the host uses to post buffers to the chip is described
6740 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6741 * it is first placed into the on-chip ram. When the packet's length
6742 * is known, it walks down the TG3_BDINFO entries to select the ring.
6743 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6744 * which is within the range of the new packet's length is chosen.
6746 * The "separate ring for rx status" scheme may sound queer, but it makes
6747 * sense from a cache coherency perspective. If only the host writes
6748 * to the buffer post rings, and only the chip writes to the rx status
6749 * rings, then cache lines never move beyond shared-modified state.
6750 * If both the host and chip were to write into the same ring, cache line
6751 * eviction could occur since both entities want it in an exclusive state.
6753 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6755 struct tg3 *tp = tnapi->tp;
6756 u32 work_mask, rx_std_posted = 0;
6757 u32 std_prod_idx, jmb_prod_idx;
6758 u32 sw_idx = tnapi->rx_rcb_ptr;
6761 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6763 hw_idx = *(tnapi->rx_rcb_prod_idx);
6765 * We need to order the read of hw_idx and the read of
6766 * the opaque cookie.
6771 std_prod_idx = tpr->rx_std_prod_idx;
6772 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6773 while (sw_idx != hw_idx && budget > 0) {
6774 struct ring_info *ri;
6775 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6777 struct sk_buff *skb;
6778 dma_addr_t dma_addr;
6779 u32 opaque_key, desc_idx, *post_ptr;
6783 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6784 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6785 if (opaque_key == RXD_OPAQUE_RING_STD) {
6786 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6787 dma_addr = dma_unmap_addr(ri, mapping);
6789 post_ptr = &std_prod_idx;
6791 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6792 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6793 dma_addr = dma_unmap_addr(ri, mapping);
6795 post_ptr = &jmb_prod_idx;
6797 goto next_pkt_nopost;
6799 work_mask |= opaque_key;
6801 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6802 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6804 tg3_recycle_rx(tnapi, tpr, opaque_key,
6805 desc_idx, *post_ptr);
6807 /* Other statistics kept track of by card. */
6812 prefetch(data + TG3_RX_OFFSET(tp));
6813 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6816 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6817 RXD_FLAG_PTPSTAT_PTPV1 ||
6818 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6819 RXD_FLAG_PTPSTAT_PTPV2) {
6820 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6821 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6824 if (len > TG3_RX_COPY_THRESH(tp)) {
6826 unsigned int frag_size;
6828 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6829 *post_ptr, &frag_size);
6833 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6834 PCI_DMA_FROMDEVICE);
6836 skb = build_skb(data, frag_size);
6838 tg3_frag_free(frag_size != 0, data);
6839 goto drop_it_no_recycle;
6841 skb_reserve(skb, TG3_RX_OFFSET(tp));
6842 /* Ensure that the update to the data happens
6843 * after the usage of the old DMA mapping.
6850 tg3_recycle_rx(tnapi, tpr, opaque_key,
6851 desc_idx, *post_ptr);
6853 skb = netdev_alloc_skb(tp->dev,
6854 len + TG3_RAW_IP_ALIGN);
6856 goto drop_it_no_recycle;
6858 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6859 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6861 data + TG3_RX_OFFSET(tp),
6863 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6868 tg3_hwclock_to_timestamp(tp, tstamp,
6869 skb_hwtstamps(skb));
6871 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6872 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6873 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6874 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6875 skb->ip_summed = CHECKSUM_UNNECESSARY;
6877 skb_checksum_none_assert(skb);
6879 skb->protocol = eth_type_trans(skb, tp->dev);
6881 if (len > (tp->dev->mtu + ETH_HLEN) &&
6882 skb->protocol != htons(ETH_P_8021Q)) {
6884 goto drop_it_no_recycle;
6887 if (desc->type_flags & RXD_FLAG_VLAN &&
6888 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6889 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6890 desc->err_vlan & RXD_VLAN_MASK);
6892 napi_gro_receive(&tnapi->napi, skb);
6900 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6901 tpr->rx_std_prod_idx = std_prod_idx &
6902 tp->rx_std_ring_mask;
6903 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6904 tpr->rx_std_prod_idx);
6905 work_mask &= ~RXD_OPAQUE_RING_STD;
6910 sw_idx &= tp->rx_ret_ring_mask;
6912 /* Refresh hw_idx to see if there is new work */
6913 if (sw_idx == hw_idx) {
6914 hw_idx = *(tnapi->rx_rcb_prod_idx);
6919 /* ACK the status ring. */
6920 tnapi->rx_rcb_ptr = sw_idx;
6921 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6923 /* Refill RX ring(s). */
6924 if (!tg3_flag(tp, ENABLE_RSS)) {
6925 /* Sync BD data before updating mailbox */
6928 if (work_mask & RXD_OPAQUE_RING_STD) {
6929 tpr->rx_std_prod_idx = std_prod_idx &
6930 tp->rx_std_ring_mask;
6931 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6932 tpr->rx_std_prod_idx);
6934 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6935 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6936 tp->rx_jmb_ring_mask;
6937 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6938 tpr->rx_jmb_prod_idx);
6941 } else if (work_mask) {
6942 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6943 * updated before the producer indices can be updated.
6947 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6948 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6950 if (tnapi != &tp->napi[1]) {
6951 tp->rx_refill = true;
6952 napi_schedule(&tp->napi[1].napi);
6959 static void tg3_poll_link(struct tg3 *tp)
6961 /* handle link change and other phy events */
6962 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6963 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6965 if (sblk->status & SD_STATUS_LINK_CHG) {
6966 sblk->status = SD_STATUS_UPDATED |
6967 (sblk->status & ~SD_STATUS_LINK_CHG);
6968 spin_lock(&tp->lock);
6969 if (tg3_flag(tp, USE_PHYLIB)) {
6971 (MAC_STATUS_SYNC_CHANGED |
6972 MAC_STATUS_CFG_CHANGED |
6973 MAC_STATUS_MI_COMPLETION |
6974 MAC_STATUS_LNKSTATE_CHANGED));
6977 tg3_setup_phy(tp, false);
6978 spin_unlock(&tp->lock);
6983 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6984 struct tg3_rx_prodring_set *dpr,
6985 struct tg3_rx_prodring_set *spr)
6987 u32 si, di, cpycnt, src_prod_idx;
6991 src_prod_idx = spr->rx_std_prod_idx;
6993 /* Make sure updates to the rx_std_buffers[] entries and the
6994 * standard producer index are seen in the correct order.
6998 if (spr->rx_std_cons_idx == src_prod_idx)
7001 if (spr->rx_std_cons_idx < src_prod_idx)
7002 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7004 cpycnt = tp->rx_std_ring_mask + 1 -
7005 spr->rx_std_cons_idx;
7007 cpycnt = min(cpycnt,
7008 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7010 si = spr->rx_std_cons_idx;
7011 di = dpr->rx_std_prod_idx;
7013 for (i = di; i < di + cpycnt; i++) {
7014 if (dpr->rx_std_buffers[i].data) {
7024 /* Ensure that updates to the rx_std_buffers ring and the
7025 * shadowed hardware producer ring from tg3_recycle_skb() are
7026 * ordered correctly WRT the skb check above.
7030 memcpy(&dpr->rx_std_buffers[di],
7031 &spr->rx_std_buffers[si],
7032 cpycnt * sizeof(struct ring_info));
7034 for (i = 0; i < cpycnt; i++, di++, si++) {
7035 struct tg3_rx_buffer_desc *sbd, *dbd;
7036 sbd = &spr->rx_std[si];
7037 dbd = &dpr->rx_std[di];
7038 dbd->addr_hi = sbd->addr_hi;
7039 dbd->addr_lo = sbd->addr_lo;
7042 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7043 tp->rx_std_ring_mask;
7044 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7045 tp->rx_std_ring_mask;
7049 src_prod_idx = spr->rx_jmb_prod_idx;
7051 /* Make sure updates to the rx_jmb_buffers[] entries and
7052 * the jumbo producer index are seen in the correct order.
7056 if (spr->rx_jmb_cons_idx == src_prod_idx)
7059 if (spr->rx_jmb_cons_idx < src_prod_idx)
7060 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7062 cpycnt = tp->rx_jmb_ring_mask + 1 -
7063 spr->rx_jmb_cons_idx;
7065 cpycnt = min(cpycnt,
7066 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7068 si = spr->rx_jmb_cons_idx;
7069 di = dpr->rx_jmb_prod_idx;
7071 for (i = di; i < di + cpycnt; i++) {
7072 if (dpr->rx_jmb_buffers[i].data) {
7082 /* Ensure that updates to the rx_jmb_buffers ring and the
7083 * shadowed hardware producer ring from tg3_recycle_skb() are
7084 * ordered correctly WRT the skb check above.
7088 memcpy(&dpr->rx_jmb_buffers[di],
7089 &spr->rx_jmb_buffers[si],
7090 cpycnt * sizeof(struct ring_info));
7092 for (i = 0; i < cpycnt; i++, di++, si++) {
7093 struct tg3_rx_buffer_desc *sbd, *dbd;
7094 sbd = &spr->rx_jmb[si].std;
7095 dbd = &dpr->rx_jmb[di].std;
7096 dbd->addr_hi = sbd->addr_hi;
7097 dbd->addr_lo = sbd->addr_lo;
7100 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7101 tp->rx_jmb_ring_mask;
7102 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7103 tp->rx_jmb_ring_mask;
7109 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7111 struct tg3 *tp = tnapi->tp;
7113 /* run TX completion thread */
7114 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7116 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7120 if (!tnapi->rx_rcb_prod_idx)
7123 /* run RX thread, within the bounds set by NAPI.
7124 * All RX "locking" is done by ensuring outside
7125 * code synchronizes with tg3->napi.poll()
7127 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7128 work_done += tg3_rx(tnapi, budget - work_done);
7130 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7131 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7133 u32 std_prod_idx = dpr->rx_std_prod_idx;
7134 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7136 tp->rx_refill = false;
7137 for (i = 1; i <= tp->rxq_cnt; i++)
7138 err |= tg3_rx_prodring_xfer(tp, dpr,
7139 &tp->napi[i].prodring);
7143 if (std_prod_idx != dpr->rx_std_prod_idx)
7144 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7145 dpr->rx_std_prod_idx);
7147 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7148 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7149 dpr->rx_jmb_prod_idx);
7154 tw32_f(HOSTCC_MODE, tp->coal_now);
7160 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7162 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7163 schedule_work(&tp->reset_task);
7166 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7168 cancel_work_sync(&tp->reset_task);
7169 tg3_flag_clear(tp, RESET_TASK_PENDING);
7170 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7173 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7175 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7176 struct tg3 *tp = tnapi->tp;
7178 struct tg3_hw_status *sblk = tnapi->hw_status;
7181 work_done = tg3_poll_work(tnapi, work_done, budget);
7183 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7186 if (unlikely(work_done >= budget))
7189 /* tp->last_tag is used in tg3_int_reenable() below
7190 * to tell the hw how much work has been processed,
7191 * so we must read it before checking for more work.
7193 tnapi->last_tag = sblk->status_tag;
7194 tnapi->last_irq_tag = tnapi->last_tag;
7197 /* check for RX/TX work to do */
7198 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7199 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7201 /* This test here is not race free, but will reduce
7202 * the number of interrupts by looping again.
7204 if (tnapi == &tp->napi[1] && tp->rx_refill)
7207 napi_complete(napi);
7208 /* Reenable interrupts. */
7209 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7211 /* This test here is synchronized by napi_schedule()
7212 * and napi_complete() to close the race condition.
7214 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7215 tw32(HOSTCC_MODE, tp->coalesce_mode |
7216 HOSTCC_MODE_ENABLE |
7227 /* work_done is guaranteed to be less than budget. */
7228 napi_complete(napi);
7229 tg3_reset_task_schedule(tp);
7233 static void tg3_process_error(struct tg3 *tp)
7236 bool real_error = false;
7238 if (tg3_flag(tp, ERROR_PROCESSED))
7241 /* Check Flow Attention register */
7242 val = tr32(HOSTCC_FLOW_ATTN);
7243 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7244 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7248 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7249 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7253 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7254 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7263 tg3_flag_set(tp, ERROR_PROCESSED);
7264 tg3_reset_task_schedule(tp);
7267 static int tg3_poll(struct napi_struct *napi, int budget)
7269 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7270 struct tg3 *tp = tnapi->tp;
7272 struct tg3_hw_status *sblk = tnapi->hw_status;
7275 if (sblk->status & SD_STATUS_ERROR)
7276 tg3_process_error(tp);
7280 work_done = tg3_poll_work(tnapi, work_done, budget);
7282 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7285 if (unlikely(work_done >= budget))
7288 if (tg3_flag(tp, TAGGED_STATUS)) {
7289 /* tp->last_tag is used in tg3_int_reenable() below
7290 * to tell the hw how much work has been processed,
7291 * so we must read it before checking for more work.
7293 tnapi->last_tag = sblk->status_tag;
7294 tnapi->last_irq_tag = tnapi->last_tag;
7297 sblk->status &= ~SD_STATUS_UPDATED;
7299 if (likely(!tg3_has_work(tnapi))) {
7300 napi_complete(napi);
7301 tg3_int_reenable(tnapi);
7309 /* work_done is guaranteed to be less than budget. */
7310 napi_complete(napi);
7311 tg3_reset_task_schedule(tp);
7315 static void tg3_napi_disable(struct tg3 *tp)
7319 for (i = tp->irq_cnt - 1; i >= 0; i--)
7320 napi_disable(&tp->napi[i].napi);
7323 static void tg3_napi_enable(struct tg3 *tp)
7327 for (i = 0; i < tp->irq_cnt; i++)
7328 napi_enable(&tp->napi[i].napi);
7331 static void tg3_napi_init(struct tg3 *tp)
7335 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7336 for (i = 1; i < tp->irq_cnt; i++)
7337 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7340 static void tg3_napi_fini(struct tg3 *tp)
7344 for (i = 0; i < tp->irq_cnt; i++)
7345 netif_napi_del(&tp->napi[i].napi);
7348 static inline void tg3_netif_stop(struct tg3 *tp)
7350 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7351 tg3_napi_disable(tp);
7352 netif_carrier_off(tp->dev);
7353 netif_tx_disable(tp->dev);
7356 /* tp->lock must be held */
7357 static inline void tg3_netif_start(struct tg3 *tp)
7361 /* NOTE: unconditional netif_tx_wake_all_queues is only
7362 * appropriate so long as all callers are assured to
7363 * have free tx slots (such as after tg3_init_hw)
7365 netif_tx_wake_all_queues(tp->dev);
7368 netif_carrier_on(tp->dev);
7370 tg3_napi_enable(tp);
7371 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7372 tg3_enable_ints(tp);
7375 static void tg3_irq_quiesce(struct tg3 *tp)
7379 BUG_ON(tp->irq_sync);
7384 for (i = 0; i < tp->irq_cnt; i++)
7385 synchronize_irq(tp->napi[i].irq_vec);
7388 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7389 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7390 * with as well. Most of the time, this is not necessary except when
7391 * shutting down the device.
7393 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7395 spin_lock_bh(&tp->lock);
7397 tg3_irq_quiesce(tp);
7400 static inline void tg3_full_unlock(struct tg3 *tp)
7402 spin_unlock_bh(&tp->lock);
7405 /* One-shot MSI handler - Chip automatically disables interrupt
7406 * after sending MSI so driver doesn't have to do it.
7408 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7410 struct tg3_napi *tnapi = dev_id;
7411 struct tg3 *tp = tnapi->tp;
7413 prefetch(tnapi->hw_status);
7415 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7417 if (likely(!tg3_irq_sync(tp)))
7418 napi_schedule(&tnapi->napi);
7423 /* MSI ISR - No need to check for interrupt sharing and no need to
7424 * flush status block and interrupt mailbox. PCI ordering rules
7425 * guarantee that MSI will arrive after the status block.
7427 static irqreturn_t tg3_msi(int irq, void *dev_id)
7429 struct tg3_napi *tnapi = dev_id;
7430 struct tg3 *tp = tnapi->tp;
7432 prefetch(tnapi->hw_status);
7434 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7436 * Writing any value to intr-mbox-0 clears PCI INTA# and
7437 * chip-internal interrupt pending events.
7438 * Writing non-zero to intr-mbox-0 additional tells the
7439 * NIC to stop sending us irqs, engaging "in-intr-handler"
7442 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7443 if (likely(!tg3_irq_sync(tp)))
7444 napi_schedule(&tnapi->napi);
7446 return IRQ_RETVAL(1);
7449 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7451 struct tg3_napi *tnapi = dev_id;
7452 struct tg3 *tp = tnapi->tp;
7453 struct tg3_hw_status *sblk = tnapi->hw_status;
7454 unsigned int handled = 1;
7456 /* In INTx mode, it is possible for the interrupt to arrive at
7457 * the CPU before the status block posted prior to the interrupt.
7458 * Reading the PCI State register will confirm whether the
7459 * interrupt is ours and will flush the status block.
7461 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7462 if (tg3_flag(tp, CHIP_RESETTING) ||
7463 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7470 * Writing any value to intr-mbox-0 clears PCI INTA# and
7471 * chip-internal interrupt pending events.
7472 * Writing non-zero to intr-mbox-0 additional tells the
7473 * NIC to stop sending us irqs, engaging "in-intr-handler"
7476 * Flush the mailbox to de-assert the IRQ immediately to prevent
7477 * spurious interrupts. The flush impacts performance but
7478 * excessive spurious interrupts can be worse in some cases.
7480 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7481 if (tg3_irq_sync(tp))
7483 sblk->status &= ~SD_STATUS_UPDATED;
7484 if (likely(tg3_has_work(tnapi))) {
7485 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7486 napi_schedule(&tnapi->napi);
7488 /* No work, shared interrupt perhaps? re-enable
7489 * interrupts, and flush that PCI write
7491 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7495 return IRQ_RETVAL(handled);
7498 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7500 struct tg3_napi *tnapi = dev_id;
7501 struct tg3 *tp = tnapi->tp;
7502 struct tg3_hw_status *sblk = tnapi->hw_status;
7503 unsigned int handled = 1;
7505 /* In INTx mode, it is possible for the interrupt to arrive at
7506 * the CPU before the status block posted prior to the interrupt.
7507 * Reading the PCI State register will confirm whether the
7508 * interrupt is ours and will flush the status block.
7510 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7511 if (tg3_flag(tp, CHIP_RESETTING) ||
7512 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7519 * writing any value to intr-mbox-0 clears PCI INTA# and
7520 * chip-internal interrupt pending events.
7521 * writing non-zero to intr-mbox-0 additional tells the
7522 * NIC to stop sending us irqs, engaging "in-intr-handler"
7525 * Flush the mailbox to de-assert the IRQ immediately to prevent
7526 * spurious interrupts. The flush impacts performance but
7527 * excessive spurious interrupts can be worse in some cases.
7529 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7532 * In a shared interrupt configuration, sometimes other devices'
7533 * interrupts will scream. We record the current status tag here
7534 * so that the above check can report that the screaming interrupts
7535 * are unhandled. Eventually they will be silenced.
7537 tnapi->last_irq_tag = sblk->status_tag;
7539 if (tg3_irq_sync(tp))
7542 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7544 napi_schedule(&tnapi->napi);
7547 return IRQ_RETVAL(handled);
7550 /* ISR for interrupt test */
7551 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7553 struct tg3_napi *tnapi = dev_id;
7554 struct tg3 *tp = tnapi->tp;
7555 struct tg3_hw_status *sblk = tnapi->hw_status;
7557 if ((sblk->status & SD_STATUS_UPDATED) ||
7558 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7559 tg3_disable_ints(tp);
7560 return IRQ_RETVAL(1);
7562 return IRQ_RETVAL(0);
7565 #ifdef CONFIG_NET_POLL_CONTROLLER
7566 static void tg3_poll_controller(struct net_device *dev)
7569 struct tg3 *tp = netdev_priv(dev);
7571 if (tg3_irq_sync(tp))
7574 for (i = 0; i < tp->irq_cnt; i++)
7575 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7579 static void tg3_tx_timeout(struct net_device *dev)
7581 struct tg3 *tp = netdev_priv(dev);
7583 if (netif_msg_tx_err(tp)) {
7584 netdev_err(dev, "transmit timed out, resetting\n");
7588 tg3_reset_task_schedule(tp);
7591 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7592 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7594 u32 base = (u32) mapping & 0xffffffff;
7596 return (base > 0xffffdcc0) && (base + len + 8 < base);
7599 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7600 * of any 4GB boundaries: 4G, 8G, etc
7602 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7605 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7606 u32 base = (u32) mapping & 0xffffffff;
7608 return ((base + len + (mss & 0x3fff)) < base);
7613 /* Test for DMA addresses > 40-bit */
7614 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7617 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7618 if (tg3_flag(tp, 40BIT_DMA_BUG))
7619 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7626 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7627 dma_addr_t mapping, u32 len, u32 flags,
7630 txbd->addr_hi = ((u64) mapping >> 32);
7631 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7632 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7633 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7636 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7637 dma_addr_t map, u32 len, u32 flags,
7640 struct tg3 *tp = tnapi->tp;
7643 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7646 if (tg3_4g_overflow_test(map, len))
7649 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7652 if (tg3_40bit_overflow_test(tp, map, len))
7655 if (tp->dma_limit) {
7656 u32 prvidx = *entry;
7657 u32 tmp_flag = flags & ~TXD_FLAG_END;
7658 while (len > tp->dma_limit && *budget) {
7659 u32 frag_len = tp->dma_limit;
7660 len -= tp->dma_limit;
7662 /* Avoid the 8byte DMA problem */
7664 len += tp->dma_limit / 2;
7665 frag_len = tp->dma_limit / 2;
7668 tnapi->tx_buffers[*entry].fragmented = true;
7670 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7671 frag_len, tmp_flag, mss, vlan);
7674 *entry = NEXT_TX(*entry);
7681 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7682 len, flags, mss, vlan);
7684 *entry = NEXT_TX(*entry);
7687 tnapi->tx_buffers[prvidx].fragmented = false;
7691 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7692 len, flags, mss, vlan);
7693 *entry = NEXT_TX(*entry);
7699 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7702 struct sk_buff *skb;
7703 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7708 pci_unmap_single(tnapi->tp->pdev,
7709 dma_unmap_addr(txb, mapping),
7713 while (txb->fragmented) {
7714 txb->fragmented = false;
7715 entry = NEXT_TX(entry);
7716 txb = &tnapi->tx_buffers[entry];
7719 for (i = 0; i <= last; i++) {
7720 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7722 entry = NEXT_TX(entry);
7723 txb = &tnapi->tx_buffers[entry];
7725 pci_unmap_page(tnapi->tp->pdev,
7726 dma_unmap_addr(txb, mapping),
7727 skb_frag_size(frag), PCI_DMA_TODEVICE);
7729 while (txb->fragmented) {
7730 txb->fragmented = false;
7731 entry = NEXT_TX(entry);
7732 txb = &tnapi->tx_buffers[entry];
7737 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7738 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7739 struct sk_buff **pskb,
7740 u32 *entry, u32 *budget,
7741 u32 base_flags, u32 mss, u32 vlan)
7743 struct tg3 *tp = tnapi->tp;
7744 struct sk_buff *new_skb, *skb = *pskb;
7745 dma_addr_t new_addr = 0;
7748 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7749 new_skb = skb_copy(skb, GFP_ATOMIC);
7751 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7753 new_skb = skb_copy_expand(skb,
7754 skb_headroom(skb) + more_headroom,
7755 skb_tailroom(skb), GFP_ATOMIC);
7761 /* New SKB is guaranteed to be linear. */
7762 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7764 /* Make sure the mapping succeeded */
7765 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7766 dev_kfree_skb(new_skb);
7769 u32 save_entry = *entry;
7771 base_flags |= TXD_FLAG_END;
7773 tnapi->tx_buffers[*entry].skb = new_skb;
7774 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7777 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7778 new_skb->len, base_flags,
7780 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7781 dev_kfree_skb(new_skb);
7792 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7794 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7795 * TSO header is greater than 80 bytes.
7797 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7799 struct sk_buff *segs, *nskb;
7800 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7802 /* Estimate the number of fragments in the worst case */
7803 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7804 netif_stop_queue(tp->dev);
7806 /* netif_tx_stop_queue() must be done before checking
7807 * checking tx index in tg3_tx_avail() below, because in
7808 * tg3_tx(), we update tx index before checking for
7809 * netif_tx_queue_stopped().
7812 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7813 return NETDEV_TX_BUSY;
7815 netif_wake_queue(tp->dev);
7818 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7820 goto tg3_tso_bug_end;
7826 tg3_start_xmit(nskb, tp->dev);
7832 return NETDEV_TX_OK;
7835 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7836 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7838 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7840 struct tg3 *tp = netdev_priv(dev);
7841 u32 len, entry, base_flags, mss, vlan = 0;
7843 int i = -1, would_hit_hwbug;
7845 struct tg3_napi *tnapi;
7846 struct netdev_queue *txq;
7849 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7850 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7851 if (tg3_flag(tp, ENABLE_TSS))
7854 budget = tg3_tx_avail(tnapi);
7856 /* We are running in BH disabled context with netif_tx_lock
7857 * and TX reclaim runs via tp->napi.poll inside of a software
7858 * interrupt. Furthermore, IRQ processing runs lockless so we have
7859 * no IRQ context deadlocks to worry about either. Rejoice!
7861 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7862 if (!netif_tx_queue_stopped(txq)) {
7863 netif_tx_stop_queue(txq);
7865 /* This is a hard error, log it. */
7867 "BUG! Tx Ring full when queue awake!\n");
7869 return NETDEV_TX_BUSY;
7872 entry = tnapi->tx_prod;
7874 if (skb->ip_summed == CHECKSUM_PARTIAL)
7875 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7877 mss = skb_shinfo(skb)->gso_size;
7880 u32 tcp_opt_len, hdr_len;
7882 if (skb_header_cloned(skb) &&
7883 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7887 tcp_opt_len = tcp_optlen(skb);
7889 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7891 if (!skb_is_gso_v6(skb)) {
7893 iph->tot_len = htons(mss + hdr_len);
7896 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7897 tg3_flag(tp, TSO_BUG))
7898 return tg3_tso_bug(tp, skb);
7900 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7901 TXD_FLAG_CPU_POST_DMA);
7903 if (tg3_flag(tp, HW_TSO_1) ||
7904 tg3_flag(tp, HW_TSO_2) ||
7905 tg3_flag(tp, HW_TSO_3)) {
7906 tcp_hdr(skb)->check = 0;
7907 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7909 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7914 if (tg3_flag(tp, HW_TSO_3)) {
7915 mss |= (hdr_len & 0xc) << 12;
7917 base_flags |= 0x00000010;
7918 base_flags |= (hdr_len & 0x3e0) << 5;
7919 } else if (tg3_flag(tp, HW_TSO_2))
7920 mss |= hdr_len << 9;
7921 else if (tg3_flag(tp, HW_TSO_1) ||
7922 tg3_asic_rev(tp) == ASIC_REV_5705) {
7923 if (tcp_opt_len || iph->ihl > 5) {
7926 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7927 mss |= (tsflags << 11);
7930 if (tcp_opt_len || iph->ihl > 5) {
7933 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7934 base_flags |= tsflags << 12;
7939 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7940 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7941 base_flags |= TXD_FLAG_JMB_PKT;
7943 if (vlan_tx_tag_present(skb)) {
7944 base_flags |= TXD_FLAG_VLAN;
7945 vlan = vlan_tx_tag_get(skb);
7948 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7949 tg3_flag(tp, TX_TSTAMP_EN)) {
7950 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7951 base_flags |= TXD_FLAG_HWTSTAMP;
7954 len = skb_headlen(skb);
7956 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7957 if (pci_dma_mapping_error(tp->pdev, mapping))
7961 tnapi->tx_buffers[entry].skb = skb;
7962 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7964 would_hit_hwbug = 0;
7966 if (tg3_flag(tp, 5701_DMA_BUG))
7967 would_hit_hwbug = 1;
7969 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7970 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7972 would_hit_hwbug = 1;
7973 } else if (skb_shinfo(skb)->nr_frags > 0) {
7976 if (!tg3_flag(tp, HW_TSO_1) &&
7977 !tg3_flag(tp, HW_TSO_2) &&
7978 !tg3_flag(tp, HW_TSO_3))
7981 /* Now loop through additional data
7982 * fragments, and queue them.
7984 last = skb_shinfo(skb)->nr_frags - 1;
7985 for (i = 0; i <= last; i++) {
7986 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7988 len = skb_frag_size(frag);
7989 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7990 len, DMA_TO_DEVICE);
7992 tnapi->tx_buffers[entry].skb = NULL;
7993 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7995 if (dma_mapping_error(&tp->pdev->dev, mapping))
7999 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8001 ((i == last) ? TXD_FLAG_END : 0),
8003 would_hit_hwbug = 1;
8009 if (would_hit_hwbug) {
8010 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8012 /* If the workaround fails due to memory/mapping
8013 * failure, silently drop this packet.
8015 entry = tnapi->tx_prod;
8016 budget = tg3_tx_avail(tnapi);
8017 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8018 base_flags, mss, vlan))
8022 skb_tx_timestamp(skb);
8023 netdev_tx_sent_queue(txq, skb->len);
8025 /* Sync BD data before updating mailbox */
8028 /* Packets are ready, update Tx producer idx local and on card. */
8029 tw32_tx_mbox(tnapi->prodmbox, entry);
8031 tnapi->tx_prod = entry;
8032 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8033 netif_tx_stop_queue(txq);
8035 /* netif_tx_stop_queue() must be done before checking
8036 * checking tx index in tg3_tx_avail() below, because in
8037 * tg3_tx(), we update tx index before checking for
8038 * netif_tx_queue_stopped().
8041 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8042 netif_tx_wake_queue(txq);
8046 return NETDEV_TX_OK;
8049 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8050 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8055 return NETDEV_TX_OK;
8058 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8061 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8062 MAC_MODE_PORT_MODE_MASK);
8064 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8066 if (!tg3_flag(tp, 5705_PLUS))
8067 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8069 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8070 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8072 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8074 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8076 if (tg3_flag(tp, 5705_PLUS) ||
8077 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8078 tg3_asic_rev(tp) == ASIC_REV_5700)
8079 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8082 tw32(MAC_MODE, tp->mac_mode);
8086 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8088 u32 val, bmcr, mac_mode, ptest = 0;
8090 tg3_phy_toggle_apd(tp, false);
8091 tg3_phy_toggle_automdix(tp, false);
8093 if (extlpbk && tg3_phy_set_extloopbk(tp))
8096 bmcr = BMCR_FULLDPLX;
8101 bmcr |= BMCR_SPEED100;
8105 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8107 bmcr |= BMCR_SPEED100;
8110 bmcr |= BMCR_SPEED1000;
8115 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8116 tg3_readphy(tp, MII_CTRL1000, &val);
8117 val |= CTL1000_AS_MASTER |
8118 CTL1000_ENABLE_MASTER;
8119 tg3_writephy(tp, MII_CTRL1000, val);
8121 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8122 MII_TG3_FET_PTEST_TRIM_2;
8123 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8126 bmcr |= BMCR_LOOPBACK;
8128 tg3_writephy(tp, MII_BMCR, bmcr);
8130 /* The write needs to be flushed for the FETs */
8131 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8132 tg3_readphy(tp, MII_BMCR, &bmcr);
8136 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8137 tg3_asic_rev(tp) == ASIC_REV_5785) {
8138 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8139 MII_TG3_FET_PTEST_FRC_TX_LINK |
8140 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8142 /* The write needs to be flushed for the AC131 */
8143 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8146 /* Reset to prevent losing 1st rx packet intermittently */
8147 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8148 tg3_flag(tp, 5780_CLASS)) {
8149 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8151 tw32_f(MAC_RX_MODE, tp->rx_mode);
8154 mac_mode = tp->mac_mode &
8155 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8156 if (speed == SPEED_1000)
8157 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8159 mac_mode |= MAC_MODE_PORT_MODE_MII;
8161 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8162 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8164 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8165 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8166 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8167 mac_mode |= MAC_MODE_LINK_POLARITY;
8169 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8170 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8173 tw32(MAC_MODE, mac_mode);
8179 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8181 struct tg3 *tp = netdev_priv(dev);
8183 if (features & NETIF_F_LOOPBACK) {
8184 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8187 spin_lock_bh(&tp->lock);
8188 tg3_mac_loopback(tp, true);
8189 netif_carrier_on(tp->dev);
8190 spin_unlock_bh(&tp->lock);
8191 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8193 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8196 spin_lock_bh(&tp->lock);
8197 tg3_mac_loopback(tp, false);
8198 /* Force link status check */
8199 tg3_setup_phy(tp, true);
8200 spin_unlock_bh(&tp->lock);
8201 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8205 static netdev_features_t tg3_fix_features(struct net_device *dev,
8206 netdev_features_t features)
8208 struct tg3 *tp = netdev_priv(dev);
8210 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8211 features &= ~NETIF_F_ALL_TSO;
8216 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8218 netdev_features_t changed = dev->features ^ features;
8220 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8221 tg3_set_loopback(dev, features);
8226 static void tg3_rx_prodring_free(struct tg3 *tp,
8227 struct tg3_rx_prodring_set *tpr)
8231 if (tpr != &tp->napi[0].prodring) {
8232 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8233 i = (i + 1) & tp->rx_std_ring_mask)
8234 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8237 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8238 for (i = tpr->rx_jmb_cons_idx;
8239 i != tpr->rx_jmb_prod_idx;
8240 i = (i + 1) & tp->rx_jmb_ring_mask) {
8241 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8249 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8250 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8253 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8254 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8255 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8260 /* Initialize rx rings for packet processing.
8262 * The chip has been shut down and the driver detached from
8263 * the networking, so no interrupts or new tx packets will
8264 * end up in the driver. tp->{tx,}lock are held and thus
8267 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8268 struct tg3_rx_prodring_set *tpr)
8270 u32 i, rx_pkt_dma_sz;
8272 tpr->rx_std_cons_idx = 0;
8273 tpr->rx_std_prod_idx = 0;
8274 tpr->rx_jmb_cons_idx = 0;
8275 tpr->rx_jmb_prod_idx = 0;
8277 if (tpr != &tp->napi[0].prodring) {
8278 memset(&tpr->rx_std_buffers[0], 0,
8279 TG3_RX_STD_BUFF_RING_SIZE(tp));
8280 if (tpr->rx_jmb_buffers)
8281 memset(&tpr->rx_jmb_buffers[0], 0,
8282 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8286 /* Zero out all descriptors. */
8287 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8289 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8290 if (tg3_flag(tp, 5780_CLASS) &&
8291 tp->dev->mtu > ETH_DATA_LEN)
8292 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8293 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8295 /* Initialize invariants of the rings, we only set this
8296 * stuff once. This works because the card does not
8297 * write into the rx buffer posting rings.
8299 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8300 struct tg3_rx_buffer_desc *rxd;
8302 rxd = &tpr->rx_std[i];
8303 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8304 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8305 rxd->opaque = (RXD_OPAQUE_RING_STD |
8306 (i << RXD_OPAQUE_INDEX_SHIFT));
8309 /* Now allocate fresh SKBs for each rx ring. */
8310 for (i = 0; i < tp->rx_pending; i++) {
8311 unsigned int frag_size;
8313 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8315 netdev_warn(tp->dev,
8316 "Using a smaller RX standard ring. Only "
8317 "%d out of %d buffers were allocated "
8318 "successfully\n", i, tp->rx_pending);
8326 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8329 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8331 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8334 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8335 struct tg3_rx_buffer_desc *rxd;
8337 rxd = &tpr->rx_jmb[i].std;
8338 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8339 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8341 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8342 (i << RXD_OPAQUE_INDEX_SHIFT));
8345 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8346 unsigned int frag_size;
8348 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8350 netdev_warn(tp->dev,
8351 "Using a smaller RX jumbo ring. Only %d "
8352 "out of %d buffers were allocated "
8353 "successfully\n", i, tp->rx_jumbo_pending);
8356 tp->rx_jumbo_pending = i;
8365 tg3_rx_prodring_free(tp, tpr);
8369 static void tg3_rx_prodring_fini(struct tg3 *tp,
8370 struct tg3_rx_prodring_set *tpr)
8372 kfree(tpr->rx_std_buffers);
8373 tpr->rx_std_buffers = NULL;
8374 kfree(tpr->rx_jmb_buffers);
8375 tpr->rx_jmb_buffers = NULL;
8377 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8378 tpr->rx_std, tpr->rx_std_mapping);
8382 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8383 tpr->rx_jmb, tpr->rx_jmb_mapping);
8388 static int tg3_rx_prodring_init(struct tg3 *tp,
8389 struct tg3_rx_prodring_set *tpr)
8391 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8393 if (!tpr->rx_std_buffers)
8396 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8397 TG3_RX_STD_RING_BYTES(tp),
8398 &tpr->rx_std_mapping,
8403 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8404 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8406 if (!tpr->rx_jmb_buffers)
8409 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8410 TG3_RX_JMB_RING_BYTES(tp),
8411 &tpr->rx_jmb_mapping,
8420 tg3_rx_prodring_fini(tp, tpr);
8424 /* Free up pending packets in all rx/tx rings.
8426 * The chip has been shut down and the driver detached from
8427 * the networking, so no interrupts or new tx packets will
8428 * end up in the driver. tp->{tx,}lock is not held and we are not
8429 * in an interrupt context and thus may sleep.
8431 static void tg3_free_rings(struct tg3 *tp)
8435 for (j = 0; j < tp->irq_cnt; j++) {
8436 struct tg3_napi *tnapi = &tp->napi[j];
8438 tg3_rx_prodring_free(tp, &tnapi->prodring);
8440 if (!tnapi->tx_buffers)
8443 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8444 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8449 tg3_tx_skb_unmap(tnapi, i,
8450 skb_shinfo(skb)->nr_frags - 1);
8452 dev_kfree_skb_any(skb);
8454 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8458 /* Initialize tx/rx rings for packet processing.
8460 * The chip has been shut down and the driver detached from
8461 * the networking, so no interrupts or new tx packets will
8462 * end up in the driver. tp->{tx,}lock are held and thus
8465 static int tg3_init_rings(struct tg3 *tp)
8469 /* Free up all the SKBs. */
8472 for (i = 0; i < tp->irq_cnt; i++) {
8473 struct tg3_napi *tnapi = &tp->napi[i];
8475 tnapi->last_tag = 0;
8476 tnapi->last_irq_tag = 0;
8477 tnapi->hw_status->status = 0;
8478 tnapi->hw_status->status_tag = 0;
8479 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8484 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8486 tnapi->rx_rcb_ptr = 0;
8488 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8490 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8499 static void tg3_mem_tx_release(struct tg3 *tp)
8503 for (i = 0; i < tp->irq_max; i++) {
8504 struct tg3_napi *tnapi = &tp->napi[i];
8506 if (tnapi->tx_ring) {
8507 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8508 tnapi->tx_ring, tnapi->tx_desc_mapping);
8509 tnapi->tx_ring = NULL;
8512 kfree(tnapi->tx_buffers);
8513 tnapi->tx_buffers = NULL;
8517 static int tg3_mem_tx_acquire(struct tg3 *tp)
8520 struct tg3_napi *tnapi = &tp->napi[0];
8522 /* If multivector TSS is enabled, vector 0 does not handle
8523 * tx interrupts. Don't allocate any resources for it.
8525 if (tg3_flag(tp, ENABLE_TSS))
8528 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8529 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8530 TG3_TX_RING_SIZE, GFP_KERNEL);
8531 if (!tnapi->tx_buffers)
8534 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8536 &tnapi->tx_desc_mapping,
8538 if (!tnapi->tx_ring)
8545 tg3_mem_tx_release(tp);
8549 static void tg3_mem_rx_release(struct tg3 *tp)
8553 for (i = 0; i < tp->irq_max; i++) {
8554 struct tg3_napi *tnapi = &tp->napi[i];
8556 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8561 dma_free_coherent(&tp->pdev->dev,
8562 TG3_RX_RCB_RING_BYTES(tp),
8564 tnapi->rx_rcb_mapping);
8565 tnapi->rx_rcb = NULL;
8569 static int tg3_mem_rx_acquire(struct tg3 *tp)
8571 unsigned int i, limit;
8573 limit = tp->rxq_cnt;
8575 /* If RSS is enabled, we need a (dummy) producer ring
8576 * set on vector zero. This is the true hw prodring.
8578 if (tg3_flag(tp, ENABLE_RSS))
8581 for (i = 0; i < limit; i++) {
8582 struct tg3_napi *tnapi = &tp->napi[i];
8584 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8587 /* If multivector RSS is enabled, vector 0
8588 * does not handle rx or tx interrupts.
8589 * Don't allocate any resources for it.
8591 if (!i && tg3_flag(tp, ENABLE_RSS))
8594 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8595 TG3_RX_RCB_RING_BYTES(tp),
8596 &tnapi->rx_rcb_mapping,
8597 GFP_KERNEL | __GFP_ZERO);
8605 tg3_mem_rx_release(tp);
8610 * Must not be invoked with interrupt sources disabled and
8611 * the hardware shutdown down.
8613 static void tg3_free_consistent(struct tg3 *tp)
8617 for (i = 0; i < tp->irq_cnt; i++) {
8618 struct tg3_napi *tnapi = &tp->napi[i];
8620 if (tnapi->hw_status) {
8621 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8623 tnapi->status_mapping);
8624 tnapi->hw_status = NULL;
8628 tg3_mem_rx_release(tp);
8629 tg3_mem_tx_release(tp);
8632 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8633 tp->hw_stats, tp->stats_mapping);
8634 tp->hw_stats = NULL;
8639 * Must not be invoked with interrupt sources disabled and
8640 * the hardware shutdown down. Can sleep.
8642 static int tg3_alloc_consistent(struct tg3 *tp)
8646 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8647 sizeof(struct tg3_hw_stats),
8649 GFP_KERNEL | __GFP_ZERO);
8653 for (i = 0; i < tp->irq_cnt; i++) {
8654 struct tg3_napi *tnapi = &tp->napi[i];
8655 struct tg3_hw_status *sblk;
8657 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8659 &tnapi->status_mapping,
8660 GFP_KERNEL | __GFP_ZERO);
8661 if (!tnapi->hw_status)
8664 sblk = tnapi->hw_status;
8666 if (tg3_flag(tp, ENABLE_RSS)) {
8667 u16 *prodptr = NULL;
8670 * When RSS is enabled, the status block format changes
8671 * slightly. The "rx_jumbo_consumer", "reserved",
8672 * and "rx_mini_consumer" members get mapped to the
8673 * other three rx return ring producer indexes.
8677 prodptr = &sblk->idx[0].rx_producer;
8680 prodptr = &sblk->rx_jumbo_consumer;
8683 prodptr = &sblk->reserved;
8686 prodptr = &sblk->rx_mini_consumer;
8689 tnapi->rx_rcb_prod_idx = prodptr;
8691 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8695 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8701 tg3_free_consistent(tp);
8705 #define MAX_WAIT_CNT 1000
8707 /* To stop a block, clear the enable bit and poll till it
8708 * clears. tp->lock is held.
8710 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8715 if (tg3_flag(tp, 5705_PLUS)) {
8722 /* We can't enable/disable these bits of the
8723 * 5705/5750, just say success.
8736 for (i = 0; i < MAX_WAIT_CNT; i++) {
8737 if (pci_channel_offline(tp->pdev)) {
8738 dev_err(&tp->pdev->dev,
8739 "tg3_stop_block device offline, "
8740 "ofs=%lx enable_bit=%x\n",
8747 if ((val & enable_bit) == 0)
8751 if (i == MAX_WAIT_CNT && !silent) {
8752 dev_err(&tp->pdev->dev,
8753 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8761 /* tp->lock is held. */
8762 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8766 tg3_disable_ints(tp);
8768 if (pci_channel_offline(tp->pdev)) {
8769 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8770 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8775 tp->rx_mode &= ~RX_MODE_ENABLE;
8776 tw32_f(MAC_RX_MODE, tp->rx_mode);
8779 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8780 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8781 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8782 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8783 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8784 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8786 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8787 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8788 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8789 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8790 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8791 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8792 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8794 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8795 tw32_f(MAC_MODE, tp->mac_mode);
8798 tp->tx_mode &= ~TX_MODE_ENABLE;
8799 tw32_f(MAC_TX_MODE, tp->tx_mode);
8801 for (i = 0; i < MAX_WAIT_CNT; i++) {
8803 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8806 if (i >= MAX_WAIT_CNT) {
8807 dev_err(&tp->pdev->dev,
8808 "%s timed out, TX_MODE_ENABLE will not clear "
8809 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8813 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8814 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8815 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8817 tw32(FTQ_RESET, 0xffffffff);
8818 tw32(FTQ_RESET, 0x00000000);
8820 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8821 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8824 for (i = 0; i < tp->irq_cnt; i++) {
8825 struct tg3_napi *tnapi = &tp->napi[i];
8826 if (tnapi->hw_status)
8827 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8833 /* Save PCI command register before chip reset */
8834 static void tg3_save_pci_state(struct tg3 *tp)
8836 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8839 /* Restore PCI state after chip reset */
8840 static void tg3_restore_pci_state(struct tg3 *tp)
8844 /* Re-enable indirect register accesses. */
8845 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8846 tp->misc_host_ctrl);
8848 /* Set MAX PCI retry to zero. */
8849 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8850 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8851 tg3_flag(tp, PCIX_MODE))
8852 val |= PCISTATE_RETRY_SAME_DMA;
8853 /* Allow reads and writes to the APE register and memory space. */
8854 if (tg3_flag(tp, ENABLE_APE))
8855 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8856 PCISTATE_ALLOW_APE_SHMEM_WR |
8857 PCISTATE_ALLOW_APE_PSPACE_WR;
8858 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8860 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8862 if (!tg3_flag(tp, PCI_EXPRESS)) {
8863 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8864 tp->pci_cacheline_sz);
8865 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8869 /* Make sure PCI-X relaxed ordering bit is clear. */
8870 if (tg3_flag(tp, PCIX_MODE)) {
8873 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8875 pcix_cmd &= ~PCI_X_CMD_ERO;
8876 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8880 if (tg3_flag(tp, 5780_CLASS)) {
8882 /* Chip reset on 5780 will reset MSI enable bit,
8883 * so need to restore it.
8885 if (tg3_flag(tp, USING_MSI)) {
8888 pci_read_config_word(tp->pdev,
8889 tp->msi_cap + PCI_MSI_FLAGS,
8891 pci_write_config_word(tp->pdev,
8892 tp->msi_cap + PCI_MSI_FLAGS,
8893 ctrl | PCI_MSI_FLAGS_ENABLE);
8894 val = tr32(MSGINT_MODE);
8895 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8900 /* tp->lock is held. */
8901 static int tg3_chip_reset(struct tg3 *tp)
8904 void (*write_op)(struct tg3 *, u32, u32);
8909 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8911 /* No matching tg3_nvram_unlock() after this because
8912 * chip reset below will undo the nvram lock.
8914 tp->nvram_lock_cnt = 0;
8916 /* GRC_MISC_CFG core clock reset will clear the memory
8917 * enable bit in PCI register 4 and the MSI enable bit
8918 * on some chips, so we save relevant registers here.
8920 tg3_save_pci_state(tp);
8922 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8923 tg3_flag(tp, 5755_PLUS))
8924 tw32(GRC_FASTBOOT_PC, 0);
8927 * We must avoid the readl() that normally takes place.
8928 * It locks machines, causes machine checks, and other
8929 * fun things. So, temporarily disable the 5701
8930 * hardware workaround, while we do the reset.
8932 write_op = tp->write32;
8933 if (write_op == tg3_write_flush_reg32)
8934 tp->write32 = tg3_write32;
8936 /* Prevent the irq handler from reading or writing PCI registers
8937 * during chip reset when the memory enable bit in the PCI command
8938 * register may be cleared. The chip does not generate interrupt
8939 * at this time, but the irq handler may still be called due to irq
8940 * sharing or irqpoll.
8942 tg3_flag_set(tp, CHIP_RESETTING);
8943 for (i = 0; i < tp->irq_cnt; i++) {
8944 struct tg3_napi *tnapi = &tp->napi[i];
8945 if (tnapi->hw_status) {
8946 tnapi->hw_status->status = 0;
8947 tnapi->hw_status->status_tag = 0;
8949 tnapi->last_tag = 0;
8950 tnapi->last_irq_tag = 0;
8954 for (i = 0; i < tp->irq_cnt; i++)
8955 synchronize_irq(tp->napi[i].irq_vec);
8957 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8958 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8959 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8963 val = GRC_MISC_CFG_CORECLK_RESET;
8965 if (tg3_flag(tp, PCI_EXPRESS)) {
8966 /* Force PCIe 1.0a mode */
8967 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8968 !tg3_flag(tp, 57765_PLUS) &&
8969 tr32(TG3_PCIE_PHY_TSTCTL) ==
8970 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8971 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8973 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8974 tw32(GRC_MISC_CFG, (1 << 29));
8979 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8980 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8981 tw32(GRC_VCPU_EXT_CTRL,
8982 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8985 /* Manage gphy power for all CPMU absent PCIe devices. */
8986 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8987 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8989 tw32(GRC_MISC_CFG, val);
8991 /* restore 5701 hardware bug workaround write method */
8992 tp->write32 = write_op;
8994 /* Unfortunately, we have to delay before the PCI read back.
8995 * Some 575X chips even will not respond to a PCI cfg access
8996 * when the reset command is given to the chip.
8998 * How do these hardware designers expect things to work
8999 * properly if the PCI write is posted for a long period
9000 * of time? It is always necessary to have some method by
9001 * which a register read back can occur to push the write
9002 * out which does the reset.
9004 * For most tg3 variants the trick below was working.
9009 /* Flush PCI posted writes. The normal MMIO registers
9010 * are inaccessible at this time so this is the only
9011 * way to make this reliably (actually, this is no longer
9012 * the case, see above). I tried to use indirect
9013 * register read/write but this upset some 5701 variants.
9015 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9019 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9022 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9026 /* Wait for link training to complete. */
9027 for (j = 0; j < 5000; j++)
9030 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9031 pci_write_config_dword(tp->pdev, 0xc4,
9032 cfg_val | (1 << 15));
9035 /* Clear the "no snoop" and "relaxed ordering" bits. */
9036 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9038 * Older PCIe devices only support the 128 byte
9039 * MPS setting. Enforce the restriction.
9041 if (!tg3_flag(tp, CPMU_PRESENT))
9042 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9043 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9045 /* Clear error status */
9046 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9047 PCI_EXP_DEVSTA_CED |
9048 PCI_EXP_DEVSTA_NFED |
9049 PCI_EXP_DEVSTA_FED |
9050 PCI_EXP_DEVSTA_URD);
9053 tg3_restore_pci_state(tp);
9055 tg3_flag_clear(tp, CHIP_RESETTING);
9056 tg3_flag_clear(tp, ERROR_PROCESSED);
9059 if (tg3_flag(tp, 5780_CLASS))
9060 val = tr32(MEMARB_MODE);
9061 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9063 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9065 tw32(0x5000, 0x400);
9068 if (tg3_flag(tp, IS_SSB_CORE)) {
9070 * BCM4785: In order to avoid repercussions from using
9071 * potentially defective internal ROM, stop the Rx RISC CPU,
9072 * which is not required.
9075 tg3_halt_cpu(tp, RX_CPU_BASE);
9078 err = tg3_poll_fw(tp);
9082 tw32(GRC_MODE, tp->grc_mode);
9084 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9087 tw32(0xc4, val | (1 << 15));
9090 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9091 tg3_asic_rev(tp) == ASIC_REV_5705) {
9092 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9093 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9094 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9095 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9098 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9099 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9101 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9102 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9107 tw32_f(MAC_MODE, val);
9110 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9114 if (tg3_flag(tp, PCI_EXPRESS) &&
9115 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9116 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9117 !tg3_flag(tp, 57765_PLUS)) {
9120 tw32(0x7c00, val | (1 << 25));
9123 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9124 val = tr32(TG3_CPMU_CLCK_ORIDE);
9125 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9128 /* Reprobe ASF enable state. */
9129 tg3_flag_clear(tp, ENABLE_ASF);
9130 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9131 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9133 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9134 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9135 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9138 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9139 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9140 tg3_flag_set(tp, ENABLE_ASF);
9141 tp->last_event_jiffies = jiffies;
9142 if (tg3_flag(tp, 5750_PLUS))
9143 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9145 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9146 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9147 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9148 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9149 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9156 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9157 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9159 /* tp->lock is held. */
9160 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9166 tg3_write_sig_pre_reset(tp, kind);
9168 tg3_abort_hw(tp, silent);
9169 err = tg3_chip_reset(tp);
9171 __tg3_set_mac_addr(tp, false);
9173 tg3_write_sig_legacy(tp, kind);
9174 tg3_write_sig_post_reset(tp, kind);
9177 /* Save the stats across chip resets... */
9178 tg3_get_nstats(tp, &tp->net_stats_prev);
9179 tg3_get_estats(tp, &tp->estats_prev);
9181 /* And make sure the next sample is new data */
9182 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9191 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9193 struct tg3 *tp = netdev_priv(dev);
9194 struct sockaddr *addr = p;
9196 bool skip_mac_1 = false;
9198 if (!is_valid_ether_addr(addr->sa_data))
9199 return -EADDRNOTAVAIL;
9201 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9203 if (!netif_running(dev))
9206 if (tg3_flag(tp, ENABLE_ASF)) {
9207 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9209 addr0_high = tr32(MAC_ADDR_0_HIGH);
9210 addr0_low = tr32(MAC_ADDR_0_LOW);
9211 addr1_high = tr32(MAC_ADDR_1_HIGH);
9212 addr1_low = tr32(MAC_ADDR_1_LOW);
9214 /* Skip MAC addr 1 if ASF is using it. */
9215 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9216 !(addr1_high == 0 && addr1_low == 0))
9219 spin_lock_bh(&tp->lock);
9220 __tg3_set_mac_addr(tp, skip_mac_1);
9221 spin_unlock_bh(&tp->lock);
9226 /* tp->lock is held. */
9227 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9228 dma_addr_t mapping, u32 maxlen_flags,
9232 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9233 ((u64) mapping >> 32));
9235 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9236 ((u64) mapping & 0xffffffff));
9238 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9241 if (!tg3_flag(tp, 5705_PLUS))
9243 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9248 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9252 if (!tg3_flag(tp, ENABLE_TSS)) {
9253 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9254 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9255 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9257 tw32(HOSTCC_TXCOL_TICKS, 0);
9258 tw32(HOSTCC_TXMAX_FRAMES, 0);
9259 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9261 for (; i < tp->txq_cnt; i++) {
9264 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9265 tw32(reg, ec->tx_coalesce_usecs);
9266 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9267 tw32(reg, ec->tx_max_coalesced_frames);
9268 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9269 tw32(reg, ec->tx_max_coalesced_frames_irq);
9273 for (; i < tp->irq_max - 1; i++) {
9274 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9275 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9276 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9280 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9283 u32 limit = tp->rxq_cnt;
9285 if (!tg3_flag(tp, ENABLE_RSS)) {
9286 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9287 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9288 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9291 tw32(HOSTCC_RXCOL_TICKS, 0);
9292 tw32(HOSTCC_RXMAX_FRAMES, 0);
9293 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9296 for (; i < limit; i++) {
9299 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9300 tw32(reg, ec->rx_coalesce_usecs);
9301 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9302 tw32(reg, ec->rx_max_coalesced_frames);
9303 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9304 tw32(reg, ec->rx_max_coalesced_frames_irq);
9307 for (; i < tp->irq_max - 1; i++) {
9308 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9309 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9310 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9314 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9316 tg3_coal_tx_init(tp, ec);
9317 tg3_coal_rx_init(tp, ec);
9319 if (!tg3_flag(tp, 5705_PLUS)) {
9320 u32 val = ec->stats_block_coalesce_usecs;
9322 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9323 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9328 tw32(HOSTCC_STAT_COAL_TICKS, val);
9332 /* tp->lock is held. */
9333 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9337 /* Disable all transmit rings but the first. */
9338 if (!tg3_flag(tp, 5705_PLUS))
9339 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9340 else if (tg3_flag(tp, 5717_PLUS))
9341 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9342 else if (tg3_flag(tp, 57765_CLASS) ||
9343 tg3_asic_rev(tp) == ASIC_REV_5762)
9344 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9346 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9348 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9349 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9350 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9351 BDINFO_FLAGS_DISABLED);
9354 /* tp->lock is held. */
9355 static void tg3_tx_rcbs_init(struct tg3 *tp)
9358 u32 txrcb = NIC_SRAM_SEND_RCB;
9360 if (tg3_flag(tp, ENABLE_TSS))
9363 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9364 struct tg3_napi *tnapi = &tp->napi[i];
9366 if (!tnapi->tx_ring)
9369 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9370 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9371 NIC_SRAM_TX_BUFFER_DESC);
9375 /* tp->lock is held. */
9376 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9380 /* Disable all receive return rings but the first. */
9381 if (tg3_flag(tp, 5717_PLUS))
9382 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9383 else if (!tg3_flag(tp, 5705_PLUS))
9384 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9385 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9386 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9387 tg3_flag(tp, 57765_CLASS))
9388 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9390 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9392 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9393 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9394 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9395 BDINFO_FLAGS_DISABLED);
9398 /* tp->lock is held. */
9399 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9402 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9404 if (tg3_flag(tp, ENABLE_RSS))
9407 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9408 struct tg3_napi *tnapi = &tp->napi[i];
9413 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9414 (tp->rx_ret_ring_mask + 1) <<
9415 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9419 /* tp->lock is held. */
9420 static void tg3_rings_reset(struct tg3 *tp)
9424 struct tg3_napi *tnapi = &tp->napi[0];
9426 tg3_tx_rcbs_disable(tp);
9428 tg3_rx_ret_rcbs_disable(tp);
9430 /* Disable interrupts */
9431 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9432 tp->napi[0].chk_msi_cnt = 0;
9433 tp->napi[0].last_rx_cons = 0;
9434 tp->napi[0].last_tx_cons = 0;
9436 /* Zero mailbox registers. */
9437 if (tg3_flag(tp, SUPPORT_MSIX)) {
9438 for (i = 1; i < tp->irq_max; i++) {
9439 tp->napi[i].tx_prod = 0;
9440 tp->napi[i].tx_cons = 0;
9441 if (tg3_flag(tp, ENABLE_TSS))
9442 tw32_mailbox(tp->napi[i].prodmbox, 0);
9443 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9444 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9445 tp->napi[i].chk_msi_cnt = 0;
9446 tp->napi[i].last_rx_cons = 0;
9447 tp->napi[i].last_tx_cons = 0;
9449 if (!tg3_flag(tp, ENABLE_TSS))
9450 tw32_mailbox(tp->napi[0].prodmbox, 0);
9452 tp->napi[0].tx_prod = 0;
9453 tp->napi[0].tx_cons = 0;
9454 tw32_mailbox(tp->napi[0].prodmbox, 0);
9455 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9458 /* Make sure the NIC-based send BD rings are disabled. */
9459 if (!tg3_flag(tp, 5705_PLUS)) {
9460 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9461 for (i = 0; i < 16; i++)
9462 tw32_tx_mbox(mbox + i * 8, 0);
9465 /* Clear status block in ram. */
9466 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9468 /* Set status block DMA address */
9469 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9470 ((u64) tnapi->status_mapping >> 32));
9471 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9472 ((u64) tnapi->status_mapping & 0xffffffff));
9474 stblk = HOSTCC_STATBLCK_RING1;
9476 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9477 u64 mapping = (u64)tnapi->status_mapping;
9478 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9479 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9482 /* Clear status block in ram. */
9483 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9486 tg3_tx_rcbs_init(tp);
9487 tg3_rx_ret_rcbs_init(tp);
9490 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9492 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9494 if (!tg3_flag(tp, 5750_PLUS) ||
9495 tg3_flag(tp, 5780_CLASS) ||
9496 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9497 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9498 tg3_flag(tp, 57765_PLUS))
9499 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9500 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9501 tg3_asic_rev(tp) == ASIC_REV_5787)
9502 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9504 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9506 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9507 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9509 val = min(nic_rep_thresh, host_rep_thresh);
9510 tw32(RCVBDI_STD_THRESH, val);
9512 if (tg3_flag(tp, 57765_PLUS))
9513 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9515 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9518 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9520 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9522 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9523 tw32(RCVBDI_JUMBO_THRESH, val);
9525 if (tg3_flag(tp, 57765_PLUS))
9526 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9529 static inline u32 calc_crc(unsigned char *buf, int len)
9537 for (j = 0; j < len; j++) {
9540 for (k = 0; k < 8; k++) {
9553 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9555 /* accept or reject all multicast frames */
9556 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9557 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9558 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9559 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9562 static void __tg3_set_rx_mode(struct net_device *dev)
9564 struct tg3 *tp = netdev_priv(dev);
9567 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9568 RX_MODE_KEEP_VLAN_TAG);
9570 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9571 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9574 if (!tg3_flag(tp, ENABLE_ASF))
9575 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9578 if (dev->flags & IFF_PROMISC) {
9579 /* Promiscuous mode. */
9580 rx_mode |= RX_MODE_PROMISC;
9581 } else if (dev->flags & IFF_ALLMULTI) {
9582 /* Accept all multicast. */
9583 tg3_set_multi(tp, 1);
9584 } else if (netdev_mc_empty(dev)) {
9585 /* Reject all multicast. */
9586 tg3_set_multi(tp, 0);
9588 /* Accept one or more multicast(s). */
9589 struct netdev_hw_addr *ha;
9590 u32 mc_filter[4] = { 0, };
9595 netdev_for_each_mc_addr(ha, dev) {
9596 crc = calc_crc(ha->addr, ETH_ALEN);
9598 regidx = (bit & 0x60) >> 5;
9600 mc_filter[regidx] |= (1 << bit);
9603 tw32(MAC_HASH_REG_0, mc_filter[0]);
9604 tw32(MAC_HASH_REG_1, mc_filter[1]);
9605 tw32(MAC_HASH_REG_2, mc_filter[2]);
9606 tw32(MAC_HASH_REG_3, mc_filter[3]);
9609 if (rx_mode != tp->rx_mode) {
9610 tp->rx_mode = rx_mode;
9611 tw32_f(MAC_RX_MODE, rx_mode);
9616 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9620 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9621 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9624 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9628 if (!tg3_flag(tp, SUPPORT_MSIX))
9631 if (tp->rxq_cnt == 1) {
9632 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9636 /* Validate table against current IRQ count */
9637 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9638 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9642 if (i != TG3_RSS_INDIR_TBL_SIZE)
9643 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9646 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9649 u32 reg = MAC_RSS_INDIR_TBL_0;
9651 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9652 u32 val = tp->rss_ind_tbl[i];
9654 for (; i % 8; i++) {
9656 val |= tp->rss_ind_tbl[i];
9663 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9665 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9666 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9668 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9671 /* tp->lock is held. */
9672 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9674 u32 val, rdmac_mode;
9676 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9678 tg3_disable_ints(tp);
9682 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9684 if (tg3_flag(tp, INIT_COMPLETE))
9685 tg3_abort_hw(tp, 1);
9687 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9688 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9689 tg3_phy_pull_config(tp);
9690 tg3_eee_pull_config(tp, NULL);
9691 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9694 /* Enable MAC control of LPI */
9695 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9701 err = tg3_chip_reset(tp);
9705 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9707 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9708 val = tr32(TG3_CPMU_CTRL);
9709 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9710 tw32(TG3_CPMU_CTRL, val);
9712 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9713 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9714 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9715 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9717 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9718 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9719 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9720 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9722 val = tr32(TG3_CPMU_HST_ACC);
9723 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9724 val |= CPMU_HST_ACC_MACCLK_6_25;
9725 tw32(TG3_CPMU_HST_ACC, val);
9728 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9729 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9730 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9731 PCIE_PWR_MGMT_L1_THRESH_4MS;
9732 tw32(PCIE_PWR_MGMT_THRESH, val);
9734 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9735 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9737 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9739 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9740 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9743 if (tg3_flag(tp, L1PLLPD_EN)) {
9744 u32 grc_mode = tr32(GRC_MODE);
9746 /* Access the lower 1K of PL PCIE block registers. */
9747 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9748 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9750 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9751 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9752 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9754 tw32(GRC_MODE, grc_mode);
9757 if (tg3_flag(tp, 57765_CLASS)) {
9758 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9759 u32 grc_mode = tr32(GRC_MODE);
9761 /* Access the lower 1K of PL PCIE block registers. */
9762 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9763 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9765 val = tr32(TG3_PCIE_TLDLPL_PORT +
9766 TG3_PCIE_PL_LO_PHYCTL5);
9767 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9768 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9770 tw32(GRC_MODE, grc_mode);
9773 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9776 /* Fix transmit hangs */
9777 val = tr32(TG3_CPMU_PADRNG_CTL);
9778 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9779 tw32(TG3_CPMU_PADRNG_CTL, val);
9781 grc_mode = tr32(GRC_MODE);
9783 /* Access the lower 1K of DL PCIE block registers. */
9784 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9785 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9787 val = tr32(TG3_PCIE_TLDLPL_PORT +
9788 TG3_PCIE_DL_LO_FTSMAX);
9789 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9790 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9791 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9793 tw32(GRC_MODE, grc_mode);
9796 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9797 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9798 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9799 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9802 /* This works around an issue with Athlon chipsets on
9803 * B3 tigon3 silicon. This bit has no effect on any
9804 * other revision. But do not set this on PCI Express
9805 * chips and don't even touch the clocks if the CPMU is present.
9807 if (!tg3_flag(tp, CPMU_PRESENT)) {
9808 if (!tg3_flag(tp, PCI_EXPRESS))
9809 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9810 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9813 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9814 tg3_flag(tp, PCIX_MODE)) {
9815 val = tr32(TG3PCI_PCISTATE);
9816 val |= PCISTATE_RETRY_SAME_DMA;
9817 tw32(TG3PCI_PCISTATE, val);
9820 if (tg3_flag(tp, ENABLE_APE)) {
9821 /* Allow reads and writes to the
9822 * APE register and memory space.
9824 val = tr32(TG3PCI_PCISTATE);
9825 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9826 PCISTATE_ALLOW_APE_SHMEM_WR |
9827 PCISTATE_ALLOW_APE_PSPACE_WR;
9828 tw32(TG3PCI_PCISTATE, val);
9831 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9832 /* Enable some hw fixes. */
9833 val = tr32(TG3PCI_MSI_DATA);
9834 val |= (1 << 26) | (1 << 28) | (1 << 29);
9835 tw32(TG3PCI_MSI_DATA, val);
9838 /* Descriptor ring init may make accesses to the
9839 * NIC SRAM area to setup the TX descriptors, so we
9840 * can only do this after the hardware has been
9841 * successfully reset.
9843 err = tg3_init_rings(tp);
9847 if (tg3_flag(tp, 57765_PLUS)) {
9848 val = tr32(TG3PCI_DMA_RW_CTRL) &
9849 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9850 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9851 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9852 if (!tg3_flag(tp, 57765_CLASS) &&
9853 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9854 tg3_asic_rev(tp) != ASIC_REV_5762)
9855 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9856 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9857 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9858 tg3_asic_rev(tp) != ASIC_REV_5761) {
9859 /* This value is determined during the probe time DMA
9860 * engine test, tg3_test_dma.
9862 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9865 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9866 GRC_MODE_4X_NIC_SEND_RINGS |
9867 GRC_MODE_NO_TX_PHDR_CSUM |
9868 GRC_MODE_NO_RX_PHDR_CSUM);
9869 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9871 /* Pseudo-header checksum is done by hardware logic and not
9872 * the offload processers, so make the chip do the pseudo-
9873 * header checksums on receive. For transmit it is more
9874 * convenient to do the pseudo-header checksum in software
9875 * as Linux does that on transmit for us in all cases.
9877 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9879 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9881 tw32(TG3_RX_PTP_CTL,
9882 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9884 if (tg3_flag(tp, PTP_CAPABLE))
9885 val |= GRC_MODE_TIME_SYNC_ENABLE;
9887 tw32(GRC_MODE, tp->grc_mode | val);
9889 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9890 val = tr32(GRC_MISC_CFG);
9892 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9893 tw32(GRC_MISC_CFG, val);
9895 /* Initialize MBUF/DESC pool. */
9896 if (tg3_flag(tp, 5750_PLUS)) {
9898 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9899 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9900 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9901 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9903 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9904 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9905 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9906 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9909 fw_len = tp->fw_len;
9910 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9911 tw32(BUFMGR_MB_POOL_ADDR,
9912 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9913 tw32(BUFMGR_MB_POOL_SIZE,
9914 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9917 if (tp->dev->mtu <= ETH_DATA_LEN) {
9918 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9919 tp->bufmgr_config.mbuf_read_dma_low_water);
9920 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9921 tp->bufmgr_config.mbuf_mac_rx_low_water);
9922 tw32(BUFMGR_MB_HIGH_WATER,
9923 tp->bufmgr_config.mbuf_high_water);
9925 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9926 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9927 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9928 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9929 tw32(BUFMGR_MB_HIGH_WATER,
9930 tp->bufmgr_config.mbuf_high_water_jumbo);
9932 tw32(BUFMGR_DMA_LOW_WATER,
9933 tp->bufmgr_config.dma_low_water);
9934 tw32(BUFMGR_DMA_HIGH_WATER,
9935 tp->bufmgr_config.dma_high_water);
9937 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9938 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9939 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9940 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9941 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9942 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9943 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9944 tw32(BUFMGR_MODE, val);
9945 for (i = 0; i < 2000; i++) {
9946 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9951 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9955 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9956 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9958 tg3_setup_rxbd_thresholds(tp);
9960 /* Initialize TG3_BDINFO's at:
9961 * RCVDBDI_STD_BD: standard eth size rx ring
9962 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9963 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9966 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9967 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9968 * ring attribute flags
9969 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9971 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9972 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9974 * The size of each ring is fixed in the firmware, but the location is
9977 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9978 ((u64) tpr->rx_std_mapping >> 32));
9979 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9980 ((u64) tpr->rx_std_mapping & 0xffffffff));
9981 if (!tg3_flag(tp, 5717_PLUS))
9982 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9983 NIC_SRAM_RX_BUFFER_DESC);
9985 /* Disable the mini ring */
9986 if (!tg3_flag(tp, 5705_PLUS))
9987 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9988 BDINFO_FLAGS_DISABLED);
9990 /* Program the jumbo buffer descriptor ring control
9991 * blocks on those devices that have them.
9993 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9994 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9996 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9997 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9998 ((u64) tpr->rx_jmb_mapping >> 32));
9999 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10000 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10001 val = TG3_RX_JMB_RING_SIZE(tp) <<
10002 BDINFO_FLAGS_MAXLEN_SHIFT;
10003 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10004 val | BDINFO_FLAGS_USE_EXT_RECV);
10005 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10006 tg3_flag(tp, 57765_CLASS) ||
10007 tg3_asic_rev(tp) == ASIC_REV_5762)
10008 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10009 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10011 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10012 BDINFO_FLAGS_DISABLED);
10015 if (tg3_flag(tp, 57765_PLUS)) {
10016 val = TG3_RX_STD_RING_SIZE(tp);
10017 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10018 val |= (TG3_RX_STD_DMA_SZ << 2);
10020 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10022 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10024 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10026 tpr->rx_std_prod_idx = tp->rx_pending;
10027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10029 tpr->rx_jmb_prod_idx =
10030 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10031 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10033 tg3_rings_reset(tp);
10035 /* Initialize MAC address and backoff seed. */
10036 __tg3_set_mac_addr(tp, false);
10038 /* MTU + ethernet header + FCS + optional VLAN tag */
10039 tw32(MAC_RX_MTU_SIZE,
10040 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10042 /* The slot time is changed by tg3_setup_phy if we
10043 * run at gigabit with half duplex.
10045 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10046 (6 << TX_LENGTHS_IPG_SHIFT) |
10047 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10049 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10050 tg3_asic_rev(tp) == ASIC_REV_5762)
10051 val |= tr32(MAC_TX_LENGTHS) &
10052 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10053 TX_LENGTHS_CNT_DWN_VAL_MSK);
10055 tw32(MAC_TX_LENGTHS, val);
10057 /* Receive rules. */
10058 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10059 tw32(RCVLPC_CONFIG, 0x0181);
10061 /* Calculate RDMAC_MODE setting early, we need it to determine
10062 * the RCVLPC_STATE_ENABLE mask.
10064 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10065 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10066 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10067 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10068 RDMAC_MODE_LNGREAD_ENAB);
10070 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10071 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10073 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10074 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10075 tg3_asic_rev(tp) == ASIC_REV_57780)
10076 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10077 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10078 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10080 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10081 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10082 if (tg3_flag(tp, TSO_CAPABLE) &&
10083 tg3_asic_rev(tp) == ASIC_REV_5705) {
10084 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10085 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10086 !tg3_flag(tp, IS_5788)) {
10087 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10091 if (tg3_flag(tp, PCI_EXPRESS))
10092 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10094 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10096 if (tp->dev->mtu <= ETH_DATA_LEN) {
10097 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10098 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10102 if (tg3_flag(tp, HW_TSO_1) ||
10103 tg3_flag(tp, HW_TSO_2) ||
10104 tg3_flag(tp, HW_TSO_3))
10105 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10107 if (tg3_flag(tp, 57765_PLUS) ||
10108 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10109 tg3_asic_rev(tp) == ASIC_REV_57780)
10110 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10112 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10113 tg3_asic_rev(tp) == ASIC_REV_5762)
10114 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10116 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10117 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10118 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10119 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10120 tg3_flag(tp, 57765_PLUS)) {
10123 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10124 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10126 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10128 val = tr32(tgtreg);
10129 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10130 tg3_asic_rev(tp) == ASIC_REV_5762) {
10131 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10132 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10133 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10134 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10135 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10136 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10138 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10141 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10142 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10143 tg3_asic_rev(tp) == ASIC_REV_5762) {
10146 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10147 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10149 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10151 val = tr32(tgtreg);
10153 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10154 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10157 /* Receive/send statistics. */
10158 if (tg3_flag(tp, 5750_PLUS)) {
10159 val = tr32(RCVLPC_STATS_ENABLE);
10160 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10161 tw32(RCVLPC_STATS_ENABLE, val);
10162 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10163 tg3_flag(tp, TSO_CAPABLE)) {
10164 val = tr32(RCVLPC_STATS_ENABLE);
10165 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10166 tw32(RCVLPC_STATS_ENABLE, val);
10168 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10170 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10171 tw32(SNDDATAI_STATSENAB, 0xffffff);
10172 tw32(SNDDATAI_STATSCTRL,
10173 (SNDDATAI_SCTRL_ENABLE |
10174 SNDDATAI_SCTRL_FASTUPD));
10176 /* Setup host coalescing engine. */
10177 tw32(HOSTCC_MODE, 0);
10178 for (i = 0; i < 2000; i++) {
10179 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10184 __tg3_set_coalesce(tp, &tp->coal);
10186 if (!tg3_flag(tp, 5705_PLUS)) {
10187 /* Status/statistics block address. See tg3_timer,
10188 * the tg3_periodic_fetch_stats call there, and
10189 * tg3_get_stats to see how this works for 5705/5750 chips.
10191 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10192 ((u64) tp->stats_mapping >> 32));
10193 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10194 ((u64) tp->stats_mapping & 0xffffffff));
10195 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10197 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10199 /* Clear statistics and status block memory areas */
10200 for (i = NIC_SRAM_STATS_BLK;
10201 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10202 i += sizeof(u32)) {
10203 tg3_write_mem(tp, i, 0);
10208 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10210 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10211 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10212 if (!tg3_flag(tp, 5705_PLUS))
10213 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10215 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10216 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10217 /* reset to prevent losing 1st rx packet intermittently */
10218 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10222 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10223 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10224 MAC_MODE_FHDE_ENABLE;
10225 if (tg3_flag(tp, ENABLE_APE))
10226 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10227 if (!tg3_flag(tp, 5705_PLUS) &&
10228 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10229 tg3_asic_rev(tp) != ASIC_REV_5700)
10230 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10231 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10234 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10235 * If TG3_FLAG_IS_NIC is zero, we should read the
10236 * register to preserve the GPIO settings for LOMs. The GPIOs,
10237 * whether used as inputs or outputs, are set by boot code after
10240 if (!tg3_flag(tp, IS_NIC)) {
10243 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10244 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10245 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10247 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10248 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10249 GRC_LCLCTRL_GPIO_OUTPUT3;
10251 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10252 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10254 tp->grc_local_ctrl &= ~gpio_mask;
10255 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10257 /* GPIO1 must be driven high for eeprom write protect */
10258 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10259 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10260 GRC_LCLCTRL_GPIO_OUTPUT1);
10262 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10265 if (tg3_flag(tp, USING_MSIX)) {
10266 val = tr32(MSGINT_MODE);
10267 val |= MSGINT_MODE_ENABLE;
10268 if (tp->irq_cnt > 1)
10269 val |= MSGINT_MODE_MULTIVEC_EN;
10270 if (!tg3_flag(tp, 1SHOT_MSI))
10271 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10272 tw32(MSGINT_MODE, val);
10275 if (!tg3_flag(tp, 5705_PLUS)) {
10276 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10280 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10281 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10282 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10283 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10284 WDMAC_MODE_LNGREAD_ENAB);
10286 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10287 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10288 if (tg3_flag(tp, TSO_CAPABLE) &&
10289 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10290 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10292 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10293 !tg3_flag(tp, IS_5788)) {
10294 val |= WDMAC_MODE_RX_ACCEL;
10298 /* Enable host coalescing bug fix */
10299 if (tg3_flag(tp, 5755_PLUS))
10300 val |= WDMAC_MODE_STATUS_TAG_FIX;
10302 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10303 val |= WDMAC_MODE_BURST_ALL_DATA;
10305 tw32_f(WDMAC_MODE, val);
10308 if (tg3_flag(tp, PCIX_MODE)) {
10311 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10313 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10314 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10315 pcix_cmd |= PCI_X_CMD_READ_2K;
10316 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10317 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10318 pcix_cmd |= PCI_X_CMD_READ_2K;
10320 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10324 tw32_f(RDMAC_MODE, rdmac_mode);
10327 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10328 tg3_asic_rev(tp) == ASIC_REV_5720) {
10329 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10330 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10333 if (i < TG3_NUM_RDMA_CHANNELS) {
10334 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10335 val |= tg3_lso_rd_dma_workaround_bit(tp);
10336 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10337 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10341 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10342 if (!tg3_flag(tp, 5705_PLUS))
10343 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10345 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10346 tw32(SNDDATAC_MODE,
10347 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10349 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10351 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10352 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10353 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10354 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10355 val |= RCVDBDI_MODE_LRG_RING_SZ;
10356 tw32(RCVDBDI_MODE, val);
10357 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10358 if (tg3_flag(tp, HW_TSO_1) ||
10359 tg3_flag(tp, HW_TSO_2) ||
10360 tg3_flag(tp, HW_TSO_3))
10361 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10362 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10363 if (tg3_flag(tp, ENABLE_TSS))
10364 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10365 tw32(SNDBDI_MODE, val);
10366 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10368 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10369 err = tg3_load_5701_a0_firmware_fix(tp);
10374 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10375 /* Ignore any errors for the firmware download. If download
10376 * fails, the device will operate with EEE disabled
10378 tg3_load_57766_firmware(tp);
10381 if (tg3_flag(tp, TSO_CAPABLE)) {
10382 err = tg3_load_tso_firmware(tp);
10387 tp->tx_mode = TX_MODE_ENABLE;
10389 if (tg3_flag(tp, 5755_PLUS) ||
10390 tg3_asic_rev(tp) == ASIC_REV_5906)
10391 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10393 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10394 tg3_asic_rev(tp) == ASIC_REV_5762) {
10395 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10396 tp->tx_mode &= ~val;
10397 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10400 tw32_f(MAC_TX_MODE, tp->tx_mode);
10403 if (tg3_flag(tp, ENABLE_RSS)) {
10404 tg3_rss_write_indir_tbl(tp);
10406 /* Setup the "secret" hash key. */
10407 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10408 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10409 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10410 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10411 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10412 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10413 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10414 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10415 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10416 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10419 tp->rx_mode = RX_MODE_ENABLE;
10420 if (tg3_flag(tp, 5755_PLUS))
10421 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10423 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10424 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10426 if (tg3_flag(tp, ENABLE_RSS))
10427 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10428 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10429 RX_MODE_RSS_IPV6_HASH_EN |
10430 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10431 RX_MODE_RSS_IPV4_HASH_EN |
10432 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10434 tw32_f(MAC_RX_MODE, tp->rx_mode);
10437 tw32(MAC_LED_CTRL, tp->led_ctrl);
10439 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10441 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10444 tw32_f(MAC_RX_MODE, tp->rx_mode);
10447 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10448 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10449 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10450 /* Set drive transmission level to 1.2V */
10451 /* only if the signal pre-emphasis bit is not set */
10452 val = tr32(MAC_SERDES_CFG);
10455 tw32(MAC_SERDES_CFG, val);
10457 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10458 tw32(MAC_SERDES_CFG, 0x616000);
10461 /* Prevent chip from dropping frames when flow control
10464 if (tg3_flag(tp, 57765_CLASS))
10468 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10470 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10471 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10472 /* Use hardware link auto-negotiation */
10473 tg3_flag_set(tp, HW_AUTONEG);
10476 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10477 tg3_asic_rev(tp) == ASIC_REV_5714) {
10480 tmp = tr32(SERDES_RX_CTRL);
10481 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10482 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10483 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10484 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10487 if (!tg3_flag(tp, USE_PHYLIB)) {
10488 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10489 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10491 err = tg3_setup_phy(tp, false);
10495 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10496 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10499 /* Clear CRC stats. */
10500 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10501 tg3_writephy(tp, MII_TG3_TEST1,
10502 tmp | MII_TG3_TEST1_CRC_EN);
10503 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10508 __tg3_set_rx_mode(tp->dev);
10510 /* Initialize receive rules. */
10511 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10512 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10513 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10514 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10516 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10520 if (tg3_flag(tp, ENABLE_ASF))
10524 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10526 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10528 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10530 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10532 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10534 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10536 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10538 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10540 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10542 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10544 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10546 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10548 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10550 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10558 if (tg3_flag(tp, ENABLE_APE))
10559 /* Write our heartbeat update interval to APE. */
10560 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10561 APE_HOST_HEARTBEAT_INT_DISABLE);
10563 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10568 /* Called at device open time to get the chip ready for
10569 * packet processing. Invoked with tp->lock held.
10571 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10573 /* Chip may have been just powered on. If so, the boot code may still
10574 * be running initialization. Wait for it to finish to avoid races in
10575 * accessing the hardware.
10577 tg3_enable_register_access(tp);
10580 tg3_switch_clocks(tp);
10582 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10584 return tg3_reset_hw(tp, reset_phy);
10587 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10591 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10592 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10594 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10597 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10598 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10599 memset(ocir, 0, TG3_OCIR_LEN);
10603 /* sysfs attributes for hwmon */
10604 static ssize_t tg3_show_temp(struct device *dev,
10605 struct device_attribute *devattr, char *buf)
10607 struct pci_dev *pdev = to_pci_dev(dev);
10608 struct net_device *netdev = pci_get_drvdata(pdev);
10609 struct tg3 *tp = netdev_priv(netdev);
10610 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10613 spin_lock_bh(&tp->lock);
10614 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10615 sizeof(temperature));
10616 spin_unlock_bh(&tp->lock);
10617 return sprintf(buf, "%u\n", temperature);
10621 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10622 TG3_TEMP_SENSOR_OFFSET);
10623 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10624 TG3_TEMP_CAUTION_OFFSET);
10625 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10626 TG3_TEMP_MAX_OFFSET);
10628 static struct attribute *tg3_attributes[] = {
10629 &sensor_dev_attr_temp1_input.dev_attr.attr,
10630 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10631 &sensor_dev_attr_temp1_max.dev_attr.attr,
10635 static const struct attribute_group tg3_group = {
10636 .attrs = tg3_attributes,
10639 static void tg3_hwmon_close(struct tg3 *tp)
10641 if (tp->hwmon_dev) {
10642 hwmon_device_unregister(tp->hwmon_dev);
10643 tp->hwmon_dev = NULL;
10644 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10648 static void tg3_hwmon_open(struct tg3 *tp)
10652 struct pci_dev *pdev = tp->pdev;
10653 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10655 tg3_sd_scan_scratchpad(tp, ocirs);
10657 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10658 if (!ocirs[i].src_data_length)
10661 size += ocirs[i].src_hdr_length;
10662 size += ocirs[i].src_data_length;
10668 /* Register hwmon sysfs hooks */
10669 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10671 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10675 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10676 if (IS_ERR(tp->hwmon_dev)) {
10677 tp->hwmon_dev = NULL;
10678 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10679 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10684 #define TG3_STAT_ADD32(PSTAT, REG) \
10685 do { u32 __val = tr32(REG); \
10686 (PSTAT)->low += __val; \
10687 if ((PSTAT)->low < __val) \
10688 (PSTAT)->high += 1; \
10691 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10693 struct tg3_hw_stats *sp = tp->hw_stats;
10698 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10699 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10700 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10701 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10702 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10703 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10704 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10705 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10706 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10707 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10708 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10709 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10710 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10711 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10712 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10713 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10716 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10717 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10718 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10719 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10722 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10723 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10724 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10725 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10726 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10727 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10728 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10729 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10730 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10731 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10732 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10733 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10734 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10735 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10737 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10738 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10739 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10740 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10741 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10743 u32 val = tr32(HOSTCC_FLOW_ATTN);
10744 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10746 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10747 sp->rx_discards.low += val;
10748 if (sp->rx_discards.low < val)
10749 sp->rx_discards.high += 1;
10751 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10753 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10756 static void tg3_chk_missed_msi(struct tg3 *tp)
10760 for (i = 0; i < tp->irq_cnt; i++) {
10761 struct tg3_napi *tnapi = &tp->napi[i];
10763 if (tg3_has_work(tnapi)) {
10764 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10765 tnapi->last_tx_cons == tnapi->tx_cons) {
10766 if (tnapi->chk_msi_cnt < 1) {
10767 tnapi->chk_msi_cnt++;
10773 tnapi->chk_msi_cnt = 0;
10774 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10775 tnapi->last_tx_cons = tnapi->tx_cons;
10779 static void tg3_timer(unsigned long __opaque)
10781 struct tg3 *tp = (struct tg3 *) __opaque;
10783 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10784 goto restart_timer;
10786 spin_lock(&tp->lock);
10788 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10789 tg3_flag(tp, 57765_CLASS))
10790 tg3_chk_missed_msi(tp);
10792 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10793 /* BCM4785: Flush posted writes from GbE to host memory. */
10797 if (!tg3_flag(tp, TAGGED_STATUS)) {
10798 /* All of this garbage is because when using non-tagged
10799 * IRQ status the mailbox/status_block protocol the chip
10800 * uses with the cpu is race prone.
10802 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10803 tw32(GRC_LOCAL_CTRL,
10804 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10806 tw32(HOSTCC_MODE, tp->coalesce_mode |
10807 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10810 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10811 spin_unlock(&tp->lock);
10812 tg3_reset_task_schedule(tp);
10813 goto restart_timer;
10817 /* This part only runs once per second. */
10818 if (!--tp->timer_counter) {
10819 if (tg3_flag(tp, 5705_PLUS))
10820 tg3_periodic_fetch_stats(tp);
10822 if (tp->setlpicnt && !--tp->setlpicnt)
10823 tg3_phy_eee_enable(tp);
10825 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10829 mac_stat = tr32(MAC_STATUS);
10832 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10833 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10835 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10839 tg3_setup_phy(tp, false);
10840 } else if (tg3_flag(tp, POLL_SERDES)) {
10841 u32 mac_stat = tr32(MAC_STATUS);
10842 int need_setup = 0;
10845 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10848 if (!tp->link_up &&
10849 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10850 MAC_STATUS_SIGNAL_DET))) {
10854 if (!tp->serdes_counter) {
10857 ~MAC_MODE_PORT_MODE_MASK));
10859 tw32_f(MAC_MODE, tp->mac_mode);
10862 tg3_setup_phy(tp, false);
10864 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10865 tg3_flag(tp, 5780_CLASS)) {
10866 tg3_serdes_parallel_detect(tp);
10869 tp->timer_counter = tp->timer_multiplier;
10872 /* Heartbeat is only sent once every 2 seconds.
10874 * The heartbeat is to tell the ASF firmware that the host
10875 * driver is still alive. In the event that the OS crashes,
10876 * ASF needs to reset the hardware to free up the FIFO space
10877 * that may be filled with rx packets destined for the host.
10878 * If the FIFO is full, ASF will no longer function properly.
10880 * Unintended resets have been reported on real time kernels
10881 * where the timer doesn't run on time. Netpoll will also have
10884 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10885 * to check the ring condition when the heartbeat is expiring
10886 * before doing the reset. This will prevent most unintended
10889 if (!--tp->asf_counter) {
10890 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10891 tg3_wait_for_event_ack(tp);
10893 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10894 FWCMD_NICDRV_ALIVE3);
10895 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10896 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10897 TG3_FW_UPDATE_TIMEOUT_SEC);
10899 tg3_generate_fw_event(tp);
10901 tp->asf_counter = tp->asf_multiplier;
10904 spin_unlock(&tp->lock);
10907 tp->timer.expires = jiffies + tp->timer_offset;
10908 add_timer(&tp->timer);
10911 static void tg3_timer_init(struct tg3 *tp)
10913 if (tg3_flag(tp, TAGGED_STATUS) &&
10914 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10915 !tg3_flag(tp, 57765_CLASS))
10916 tp->timer_offset = HZ;
10918 tp->timer_offset = HZ / 10;
10920 BUG_ON(tp->timer_offset > HZ);
10922 tp->timer_multiplier = (HZ / tp->timer_offset);
10923 tp->asf_multiplier = (HZ / tp->timer_offset) *
10924 TG3_FW_UPDATE_FREQ_SEC;
10926 init_timer(&tp->timer);
10927 tp->timer.data = (unsigned long) tp;
10928 tp->timer.function = tg3_timer;
10931 static void tg3_timer_start(struct tg3 *tp)
10933 tp->asf_counter = tp->asf_multiplier;
10934 tp->timer_counter = tp->timer_multiplier;
10936 tp->timer.expires = jiffies + tp->timer_offset;
10937 add_timer(&tp->timer);
10940 static void tg3_timer_stop(struct tg3 *tp)
10942 del_timer_sync(&tp->timer);
10945 /* Restart hardware after configuration changes, self-test, etc.
10946 * Invoked with tp->lock held.
10948 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10949 __releases(tp->lock)
10950 __acquires(tp->lock)
10954 err = tg3_init_hw(tp, reset_phy);
10956 netdev_err(tp->dev,
10957 "Failed to re-initialize device, aborting\n");
10958 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10959 tg3_full_unlock(tp);
10960 tg3_timer_stop(tp);
10962 tg3_napi_enable(tp);
10963 dev_close(tp->dev);
10964 tg3_full_lock(tp, 0);
10969 static void tg3_reset_task(struct work_struct *work)
10971 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10974 tg3_full_lock(tp, 0);
10976 if (!netif_running(tp->dev)) {
10977 tg3_flag_clear(tp, RESET_TASK_PENDING);
10978 tg3_full_unlock(tp);
10982 tg3_full_unlock(tp);
10986 tg3_netif_stop(tp);
10988 tg3_full_lock(tp, 1);
10990 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10991 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10992 tp->write32_rx_mbox = tg3_write_flush_reg32;
10993 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10994 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10997 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10998 err = tg3_init_hw(tp, true);
11002 tg3_netif_start(tp);
11005 tg3_full_unlock(tp);
11010 tg3_flag_clear(tp, RESET_TASK_PENDING);
11013 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11016 unsigned long flags;
11018 struct tg3_napi *tnapi = &tp->napi[irq_num];
11020 if (tp->irq_cnt == 1)
11021 name = tp->dev->name;
11023 name = &tnapi->irq_lbl[0];
11024 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
11025 name[IFNAMSIZ-1] = 0;
11028 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11030 if (tg3_flag(tp, 1SHOT_MSI))
11031 fn = tg3_msi_1shot;
11034 fn = tg3_interrupt;
11035 if (tg3_flag(tp, TAGGED_STATUS))
11036 fn = tg3_interrupt_tagged;
11037 flags = IRQF_SHARED;
11040 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11043 static int tg3_test_interrupt(struct tg3 *tp)
11045 struct tg3_napi *tnapi = &tp->napi[0];
11046 struct net_device *dev = tp->dev;
11047 int err, i, intr_ok = 0;
11050 if (!netif_running(dev))
11053 tg3_disable_ints(tp);
11055 free_irq(tnapi->irq_vec, tnapi);
11058 * Turn off MSI one shot mode. Otherwise this test has no
11059 * observable way to know whether the interrupt was delivered.
11061 if (tg3_flag(tp, 57765_PLUS)) {
11062 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11063 tw32(MSGINT_MODE, val);
11066 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11067 IRQF_SHARED, dev->name, tnapi);
11071 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11072 tg3_enable_ints(tp);
11074 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11077 for (i = 0; i < 5; i++) {
11078 u32 int_mbox, misc_host_ctrl;
11080 int_mbox = tr32_mailbox(tnapi->int_mbox);
11081 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11083 if ((int_mbox != 0) ||
11084 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11089 if (tg3_flag(tp, 57765_PLUS) &&
11090 tnapi->hw_status->status_tag != tnapi->last_tag)
11091 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11096 tg3_disable_ints(tp);
11098 free_irq(tnapi->irq_vec, tnapi);
11100 err = tg3_request_irq(tp, 0);
11106 /* Reenable MSI one shot mode. */
11107 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11108 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11109 tw32(MSGINT_MODE, val);
11117 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11118 * successfully restored
11120 static int tg3_test_msi(struct tg3 *tp)
11125 if (!tg3_flag(tp, USING_MSI))
11128 /* Turn off SERR reporting in case MSI terminates with Master
11131 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11132 pci_write_config_word(tp->pdev, PCI_COMMAND,
11133 pci_cmd & ~PCI_COMMAND_SERR);
11135 err = tg3_test_interrupt(tp);
11137 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11142 /* other failures */
11146 /* MSI test failed, go back to INTx mode */
11147 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11148 "to INTx mode. Please report this failure to the PCI "
11149 "maintainer and include system chipset information\n");
11151 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11153 pci_disable_msi(tp->pdev);
11155 tg3_flag_clear(tp, USING_MSI);
11156 tp->napi[0].irq_vec = tp->pdev->irq;
11158 err = tg3_request_irq(tp, 0);
11162 /* Need to reset the chip because the MSI cycle may have terminated
11163 * with Master Abort.
11165 tg3_full_lock(tp, 1);
11167 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11168 err = tg3_init_hw(tp, true);
11170 tg3_full_unlock(tp);
11173 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11178 static int tg3_request_firmware(struct tg3 *tp)
11180 const struct tg3_firmware_hdr *fw_hdr;
11182 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11183 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11188 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11190 /* Firmware blob starts with version numbers, followed by
11191 * start address and _full_ length including BSS sections
11192 * (which must be longer than the actual data, of course
11195 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11196 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11197 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11198 tp->fw_len, tp->fw_needed);
11199 release_firmware(tp->fw);
11204 /* We no longer need firmware; we have it. */
11205 tp->fw_needed = NULL;
11209 static u32 tg3_irq_count(struct tg3 *tp)
11211 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11214 /* We want as many rx rings enabled as there are cpus.
11215 * In multiqueue MSI-X mode, the first MSI-X vector
11216 * only deals with link interrupts, etc, so we add
11217 * one to the number of vectors we are requesting.
11219 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11225 static bool tg3_enable_msix(struct tg3 *tp)
11228 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11230 tp->txq_cnt = tp->txq_req;
11231 tp->rxq_cnt = tp->rxq_req;
11233 tp->rxq_cnt = netif_get_num_default_rss_queues();
11234 if (tp->rxq_cnt > tp->rxq_max)
11235 tp->rxq_cnt = tp->rxq_max;
11237 /* Disable multiple TX rings by default. Simple round-robin hardware
11238 * scheduling of the TX rings can cause starvation of rings with
11239 * small packets when other rings have TSO or jumbo packets.
11244 tp->irq_cnt = tg3_irq_count(tp);
11246 for (i = 0; i < tp->irq_max; i++) {
11247 msix_ent[i].entry = i;
11248 msix_ent[i].vector = 0;
11251 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11254 } else if (rc != 0) {
11255 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11257 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11260 tp->rxq_cnt = max(rc - 1, 1);
11262 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11265 for (i = 0; i < tp->irq_max; i++)
11266 tp->napi[i].irq_vec = msix_ent[i].vector;
11268 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11269 pci_disable_msix(tp->pdev);
11273 if (tp->irq_cnt == 1)
11276 tg3_flag_set(tp, ENABLE_RSS);
11278 if (tp->txq_cnt > 1)
11279 tg3_flag_set(tp, ENABLE_TSS);
11281 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11286 static void tg3_ints_init(struct tg3 *tp)
11288 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11289 !tg3_flag(tp, TAGGED_STATUS)) {
11290 /* All MSI supporting chips should support tagged
11291 * status. Assert that this is the case.
11293 netdev_warn(tp->dev,
11294 "MSI without TAGGED_STATUS? Not using MSI\n");
11298 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11299 tg3_flag_set(tp, USING_MSIX);
11300 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11301 tg3_flag_set(tp, USING_MSI);
11303 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11304 u32 msi_mode = tr32(MSGINT_MODE);
11305 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11306 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11307 if (!tg3_flag(tp, 1SHOT_MSI))
11308 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11309 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11312 if (!tg3_flag(tp, USING_MSIX)) {
11314 tp->napi[0].irq_vec = tp->pdev->irq;
11317 if (tp->irq_cnt == 1) {
11320 netif_set_real_num_tx_queues(tp->dev, 1);
11321 netif_set_real_num_rx_queues(tp->dev, 1);
11325 static void tg3_ints_fini(struct tg3 *tp)
11327 if (tg3_flag(tp, USING_MSIX))
11328 pci_disable_msix(tp->pdev);
11329 else if (tg3_flag(tp, USING_MSI))
11330 pci_disable_msi(tp->pdev);
11331 tg3_flag_clear(tp, USING_MSI);
11332 tg3_flag_clear(tp, USING_MSIX);
11333 tg3_flag_clear(tp, ENABLE_RSS);
11334 tg3_flag_clear(tp, ENABLE_TSS);
11337 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11340 struct net_device *dev = tp->dev;
11344 * Setup interrupts first so we know how
11345 * many NAPI resources to allocate
11349 tg3_rss_check_indir_tbl(tp);
11351 /* The placement of this call is tied
11352 * to the setup and use of Host TX descriptors.
11354 err = tg3_alloc_consistent(tp);
11356 goto out_ints_fini;
11360 tg3_napi_enable(tp);
11362 for (i = 0; i < tp->irq_cnt; i++) {
11363 struct tg3_napi *tnapi = &tp->napi[i];
11364 err = tg3_request_irq(tp, i);
11366 for (i--; i >= 0; i--) {
11367 tnapi = &tp->napi[i];
11368 free_irq(tnapi->irq_vec, tnapi);
11370 goto out_napi_fini;
11374 tg3_full_lock(tp, 0);
11377 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11379 err = tg3_init_hw(tp, reset_phy);
11381 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11382 tg3_free_rings(tp);
11385 tg3_full_unlock(tp);
11390 if (test_irq && tg3_flag(tp, USING_MSI)) {
11391 err = tg3_test_msi(tp);
11394 tg3_full_lock(tp, 0);
11395 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11396 tg3_free_rings(tp);
11397 tg3_full_unlock(tp);
11399 goto out_napi_fini;
11402 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11403 u32 val = tr32(PCIE_TRANSACTION_CFG);
11405 tw32(PCIE_TRANSACTION_CFG,
11406 val | PCIE_TRANS_CFG_1SHOT_MSI);
11412 tg3_hwmon_open(tp);
11414 tg3_full_lock(tp, 0);
11416 tg3_timer_start(tp);
11417 tg3_flag_set(tp, INIT_COMPLETE);
11418 tg3_enable_ints(tp);
11423 tg3_ptp_resume(tp);
11426 tg3_full_unlock(tp);
11428 netif_tx_start_all_queues(dev);
11431 * Reset loopback feature if it was turned on while the device was down
11432 * make sure that it's installed properly now.
11434 if (dev->features & NETIF_F_LOOPBACK)
11435 tg3_set_loopback(dev, dev->features);
11440 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11441 struct tg3_napi *tnapi = &tp->napi[i];
11442 free_irq(tnapi->irq_vec, tnapi);
11446 tg3_napi_disable(tp);
11448 tg3_free_consistent(tp);
11456 static void tg3_stop(struct tg3 *tp)
11460 tg3_reset_task_cancel(tp);
11461 tg3_netif_stop(tp);
11463 tg3_timer_stop(tp);
11465 tg3_hwmon_close(tp);
11469 tg3_full_lock(tp, 1);
11471 tg3_disable_ints(tp);
11473 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11474 tg3_free_rings(tp);
11475 tg3_flag_clear(tp, INIT_COMPLETE);
11477 tg3_full_unlock(tp);
11479 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11480 struct tg3_napi *tnapi = &tp->napi[i];
11481 free_irq(tnapi->irq_vec, tnapi);
11488 tg3_free_consistent(tp);
11491 static int tg3_open(struct net_device *dev)
11493 struct tg3 *tp = netdev_priv(dev);
11496 if (tp->fw_needed) {
11497 err = tg3_request_firmware(tp);
11498 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11500 netdev_warn(tp->dev, "EEE capability disabled\n");
11501 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11502 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11503 netdev_warn(tp->dev, "EEE capability restored\n");
11504 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11506 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11510 netdev_warn(tp->dev, "TSO capability disabled\n");
11511 tg3_flag_clear(tp, TSO_CAPABLE);
11512 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11513 netdev_notice(tp->dev, "TSO capability restored\n");
11514 tg3_flag_set(tp, TSO_CAPABLE);
11518 tg3_carrier_off(tp);
11520 err = tg3_power_up(tp);
11524 tg3_full_lock(tp, 0);
11526 tg3_disable_ints(tp);
11527 tg3_flag_clear(tp, INIT_COMPLETE);
11529 tg3_full_unlock(tp);
11531 err = tg3_start(tp,
11532 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11535 tg3_frob_aux_power(tp, false);
11536 pci_set_power_state(tp->pdev, PCI_D3hot);
11539 if (tg3_flag(tp, PTP_CAPABLE)) {
11540 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11542 if (IS_ERR(tp->ptp_clock))
11543 tp->ptp_clock = NULL;
11549 static int tg3_close(struct net_device *dev)
11551 struct tg3 *tp = netdev_priv(dev);
11557 /* Clear stats across close / open calls */
11558 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11559 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11561 tg3_power_down_prepare(tp);
11563 tg3_carrier_off(tp);
11568 static inline u64 get_stat64(tg3_stat64_t *val)
11570 return ((u64)val->high << 32) | ((u64)val->low);
11573 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11575 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11577 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11578 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11579 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11582 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11583 tg3_writephy(tp, MII_TG3_TEST1,
11584 val | MII_TG3_TEST1_CRC_EN);
11585 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11589 tp->phy_crc_errors += val;
11591 return tp->phy_crc_errors;
11594 return get_stat64(&hw_stats->rx_fcs_errors);
11597 #define ESTAT_ADD(member) \
11598 estats->member = old_estats->member + \
11599 get_stat64(&hw_stats->member)
11601 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11603 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11604 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11606 ESTAT_ADD(rx_octets);
11607 ESTAT_ADD(rx_fragments);
11608 ESTAT_ADD(rx_ucast_packets);
11609 ESTAT_ADD(rx_mcast_packets);
11610 ESTAT_ADD(rx_bcast_packets);
11611 ESTAT_ADD(rx_fcs_errors);
11612 ESTAT_ADD(rx_align_errors);
11613 ESTAT_ADD(rx_xon_pause_rcvd);
11614 ESTAT_ADD(rx_xoff_pause_rcvd);
11615 ESTAT_ADD(rx_mac_ctrl_rcvd);
11616 ESTAT_ADD(rx_xoff_entered);
11617 ESTAT_ADD(rx_frame_too_long_errors);
11618 ESTAT_ADD(rx_jabbers);
11619 ESTAT_ADD(rx_undersize_packets);
11620 ESTAT_ADD(rx_in_length_errors);
11621 ESTAT_ADD(rx_out_length_errors);
11622 ESTAT_ADD(rx_64_or_less_octet_packets);
11623 ESTAT_ADD(rx_65_to_127_octet_packets);
11624 ESTAT_ADD(rx_128_to_255_octet_packets);
11625 ESTAT_ADD(rx_256_to_511_octet_packets);
11626 ESTAT_ADD(rx_512_to_1023_octet_packets);
11627 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11628 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11629 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11630 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11631 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11633 ESTAT_ADD(tx_octets);
11634 ESTAT_ADD(tx_collisions);
11635 ESTAT_ADD(tx_xon_sent);
11636 ESTAT_ADD(tx_xoff_sent);
11637 ESTAT_ADD(tx_flow_control);
11638 ESTAT_ADD(tx_mac_errors);
11639 ESTAT_ADD(tx_single_collisions);
11640 ESTAT_ADD(tx_mult_collisions);
11641 ESTAT_ADD(tx_deferred);
11642 ESTAT_ADD(tx_excessive_collisions);
11643 ESTAT_ADD(tx_late_collisions);
11644 ESTAT_ADD(tx_collide_2times);
11645 ESTAT_ADD(tx_collide_3times);
11646 ESTAT_ADD(tx_collide_4times);
11647 ESTAT_ADD(tx_collide_5times);
11648 ESTAT_ADD(tx_collide_6times);
11649 ESTAT_ADD(tx_collide_7times);
11650 ESTAT_ADD(tx_collide_8times);
11651 ESTAT_ADD(tx_collide_9times);
11652 ESTAT_ADD(tx_collide_10times);
11653 ESTAT_ADD(tx_collide_11times);
11654 ESTAT_ADD(tx_collide_12times);
11655 ESTAT_ADD(tx_collide_13times);
11656 ESTAT_ADD(tx_collide_14times);
11657 ESTAT_ADD(tx_collide_15times);
11658 ESTAT_ADD(tx_ucast_packets);
11659 ESTAT_ADD(tx_mcast_packets);
11660 ESTAT_ADD(tx_bcast_packets);
11661 ESTAT_ADD(tx_carrier_sense_errors);
11662 ESTAT_ADD(tx_discards);
11663 ESTAT_ADD(tx_errors);
11665 ESTAT_ADD(dma_writeq_full);
11666 ESTAT_ADD(dma_write_prioq_full);
11667 ESTAT_ADD(rxbds_empty);
11668 ESTAT_ADD(rx_discards);
11669 ESTAT_ADD(rx_errors);
11670 ESTAT_ADD(rx_threshold_hit);
11672 ESTAT_ADD(dma_readq_full);
11673 ESTAT_ADD(dma_read_prioq_full);
11674 ESTAT_ADD(tx_comp_queue_full);
11676 ESTAT_ADD(ring_set_send_prod_index);
11677 ESTAT_ADD(ring_status_update);
11678 ESTAT_ADD(nic_irqs);
11679 ESTAT_ADD(nic_avoided_irqs);
11680 ESTAT_ADD(nic_tx_threshold_hit);
11682 ESTAT_ADD(mbuf_lwm_thresh_hit);
11685 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11687 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11688 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11690 stats->rx_packets = old_stats->rx_packets +
11691 get_stat64(&hw_stats->rx_ucast_packets) +
11692 get_stat64(&hw_stats->rx_mcast_packets) +
11693 get_stat64(&hw_stats->rx_bcast_packets);
11695 stats->tx_packets = old_stats->tx_packets +
11696 get_stat64(&hw_stats->tx_ucast_packets) +
11697 get_stat64(&hw_stats->tx_mcast_packets) +
11698 get_stat64(&hw_stats->tx_bcast_packets);
11700 stats->rx_bytes = old_stats->rx_bytes +
11701 get_stat64(&hw_stats->rx_octets);
11702 stats->tx_bytes = old_stats->tx_bytes +
11703 get_stat64(&hw_stats->tx_octets);
11705 stats->rx_errors = old_stats->rx_errors +
11706 get_stat64(&hw_stats->rx_errors);
11707 stats->tx_errors = old_stats->tx_errors +
11708 get_stat64(&hw_stats->tx_errors) +
11709 get_stat64(&hw_stats->tx_mac_errors) +
11710 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11711 get_stat64(&hw_stats->tx_discards);
11713 stats->multicast = old_stats->multicast +
11714 get_stat64(&hw_stats->rx_mcast_packets);
11715 stats->collisions = old_stats->collisions +
11716 get_stat64(&hw_stats->tx_collisions);
11718 stats->rx_length_errors = old_stats->rx_length_errors +
11719 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11720 get_stat64(&hw_stats->rx_undersize_packets);
11722 stats->rx_over_errors = old_stats->rx_over_errors +
11723 get_stat64(&hw_stats->rxbds_empty);
11724 stats->rx_frame_errors = old_stats->rx_frame_errors +
11725 get_stat64(&hw_stats->rx_align_errors);
11726 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11727 get_stat64(&hw_stats->tx_discards);
11728 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11729 get_stat64(&hw_stats->tx_carrier_sense_errors);
11731 stats->rx_crc_errors = old_stats->rx_crc_errors +
11732 tg3_calc_crc_errors(tp);
11734 stats->rx_missed_errors = old_stats->rx_missed_errors +
11735 get_stat64(&hw_stats->rx_discards);
11737 stats->rx_dropped = tp->rx_dropped;
11738 stats->tx_dropped = tp->tx_dropped;
11741 static int tg3_get_regs_len(struct net_device *dev)
11743 return TG3_REG_BLK_SIZE;
11746 static void tg3_get_regs(struct net_device *dev,
11747 struct ethtool_regs *regs, void *_p)
11749 struct tg3 *tp = netdev_priv(dev);
11753 memset(_p, 0, TG3_REG_BLK_SIZE);
11755 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11758 tg3_full_lock(tp, 0);
11760 tg3_dump_legacy_regs(tp, (u32 *)_p);
11762 tg3_full_unlock(tp);
11765 static int tg3_get_eeprom_len(struct net_device *dev)
11767 struct tg3 *tp = netdev_priv(dev);
11769 return tp->nvram_size;
11772 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11774 struct tg3 *tp = netdev_priv(dev);
11777 u32 i, offset, len, b_offset, b_count;
11780 if (tg3_flag(tp, NO_NVRAM))
11783 offset = eeprom->offset;
11787 eeprom->magic = TG3_EEPROM_MAGIC;
11790 /* adjustments to start on required 4 byte boundary */
11791 b_offset = offset & 3;
11792 b_count = 4 - b_offset;
11793 if (b_count > len) {
11794 /* i.e. offset=1 len=2 */
11797 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11800 memcpy(data, ((char *)&val) + b_offset, b_count);
11803 eeprom->len += b_count;
11806 /* read bytes up to the last 4 byte boundary */
11807 pd = &data[eeprom->len];
11808 for (i = 0; i < (len - (len & 3)); i += 4) {
11809 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11814 memcpy(pd + i, &val, 4);
11819 /* read last bytes not ending on 4 byte boundary */
11820 pd = &data[eeprom->len];
11822 b_offset = offset + len - b_count;
11823 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11826 memcpy(pd, &val, b_count);
11827 eeprom->len += b_count;
11832 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11834 struct tg3 *tp = netdev_priv(dev);
11836 u32 offset, len, b_offset, odd_len;
11840 if (tg3_flag(tp, NO_NVRAM) ||
11841 eeprom->magic != TG3_EEPROM_MAGIC)
11844 offset = eeprom->offset;
11847 if ((b_offset = (offset & 3))) {
11848 /* adjustments to start on required 4 byte boundary */
11849 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11860 /* adjustments to end on required 4 byte boundary */
11862 len = (len + 3) & ~3;
11863 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11869 if (b_offset || odd_len) {
11870 buf = kmalloc(len, GFP_KERNEL);
11874 memcpy(buf, &start, 4);
11876 memcpy(buf+len-4, &end, 4);
11877 memcpy(buf + b_offset, data, eeprom->len);
11880 ret = tg3_nvram_write_block(tp, offset, len, buf);
11888 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11890 struct tg3 *tp = netdev_priv(dev);
11892 if (tg3_flag(tp, USE_PHYLIB)) {
11893 struct phy_device *phydev;
11894 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11896 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11897 return phy_ethtool_gset(phydev, cmd);
11900 cmd->supported = (SUPPORTED_Autoneg);
11902 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11903 cmd->supported |= (SUPPORTED_1000baseT_Half |
11904 SUPPORTED_1000baseT_Full);
11906 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11907 cmd->supported |= (SUPPORTED_100baseT_Half |
11908 SUPPORTED_100baseT_Full |
11909 SUPPORTED_10baseT_Half |
11910 SUPPORTED_10baseT_Full |
11912 cmd->port = PORT_TP;
11914 cmd->supported |= SUPPORTED_FIBRE;
11915 cmd->port = PORT_FIBRE;
11918 cmd->advertising = tp->link_config.advertising;
11919 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11920 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11921 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11922 cmd->advertising |= ADVERTISED_Pause;
11924 cmd->advertising |= ADVERTISED_Pause |
11925 ADVERTISED_Asym_Pause;
11927 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11928 cmd->advertising |= ADVERTISED_Asym_Pause;
11931 if (netif_running(dev) && tp->link_up) {
11932 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11933 cmd->duplex = tp->link_config.active_duplex;
11934 cmd->lp_advertising = tp->link_config.rmt_adv;
11935 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11936 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11937 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11939 cmd->eth_tp_mdix = ETH_TP_MDI;
11942 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11943 cmd->duplex = DUPLEX_UNKNOWN;
11944 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11946 cmd->phy_address = tp->phy_addr;
11947 cmd->transceiver = XCVR_INTERNAL;
11948 cmd->autoneg = tp->link_config.autoneg;
11954 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11956 struct tg3 *tp = netdev_priv(dev);
11957 u32 speed = ethtool_cmd_speed(cmd);
11959 if (tg3_flag(tp, USE_PHYLIB)) {
11960 struct phy_device *phydev;
11961 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11963 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11964 return phy_ethtool_sset(phydev, cmd);
11967 if (cmd->autoneg != AUTONEG_ENABLE &&
11968 cmd->autoneg != AUTONEG_DISABLE)
11971 if (cmd->autoneg == AUTONEG_DISABLE &&
11972 cmd->duplex != DUPLEX_FULL &&
11973 cmd->duplex != DUPLEX_HALF)
11976 if (cmd->autoneg == AUTONEG_ENABLE) {
11977 u32 mask = ADVERTISED_Autoneg |
11979 ADVERTISED_Asym_Pause;
11981 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11982 mask |= ADVERTISED_1000baseT_Half |
11983 ADVERTISED_1000baseT_Full;
11985 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11986 mask |= ADVERTISED_100baseT_Half |
11987 ADVERTISED_100baseT_Full |
11988 ADVERTISED_10baseT_Half |
11989 ADVERTISED_10baseT_Full |
11992 mask |= ADVERTISED_FIBRE;
11994 if (cmd->advertising & ~mask)
11997 mask &= (ADVERTISED_1000baseT_Half |
11998 ADVERTISED_1000baseT_Full |
11999 ADVERTISED_100baseT_Half |
12000 ADVERTISED_100baseT_Full |
12001 ADVERTISED_10baseT_Half |
12002 ADVERTISED_10baseT_Full);
12004 cmd->advertising &= mask;
12006 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12007 if (speed != SPEED_1000)
12010 if (cmd->duplex != DUPLEX_FULL)
12013 if (speed != SPEED_100 &&
12019 tg3_full_lock(tp, 0);
12021 tp->link_config.autoneg = cmd->autoneg;
12022 if (cmd->autoneg == AUTONEG_ENABLE) {
12023 tp->link_config.advertising = (cmd->advertising |
12024 ADVERTISED_Autoneg);
12025 tp->link_config.speed = SPEED_UNKNOWN;
12026 tp->link_config.duplex = DUPLEX_UNKNOWN;
12028 tp->link_config.advertising = 0;
12029 tp->link_config.speed = speed;
12030 tp->link_config.duplex = cmd->duplex;
12033 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12035 tg3_warn_mgmt_link_flap(tp);
12037 if (netif_running(dev))
12038 tg3_setup_phy(tp, true);
12040 tg3_full_unlock(tp);
12045 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12047 struct tg3 *tp = netdev_priv(dev);
12049 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12050 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12051 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12052 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12055 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12057 struct tg3 *tp = netdev_priv(dev);
12059 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12060 wol->supported = WAKE_MAGIC;
12062 wol->supported = 0;
12064 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12065 wol->wolopts = WAKE_MAGIC;
12066 memset(&wol->sopass, 0, sizeof(wol->sopass));
12069 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12071 struct tg3 *tp = netdev_priv(dev);
12072 struct device *dp = &tp->pdev->dev;
12074 if (wol->wolopts & ~WAKE_MAGIC)
12076 if ((wol->wolopts & WAKE_MAGIC) &&
12077 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12080 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12082 spin_lock_bh(&tp->lock);
12083 if (device_may_wakeup(dp))
12084 tg3_flag_set(tp, WOL_ENABLE);
12086 tg3_flag_clear(tp, WOL_ENABLE);
12087 spin_unlock_bh(&tp->lock);
12092 static u32 tg3_get_msglevel(struct net_device *dev)
12094 struct tg3 *tp = netdev_priv(dev);
12095 return tp->msg_enable;
12098 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12100 struct tg3 *tp = netdev_priv(dev);
12101 tp->msg_enable = value;
12104 static int tg3_nway_reset(struct net_device *dev)
12106 struct tg3 *tp = netdev_priv(dev);
12109 if (!netif_running(dev))
12112 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12115 tg3_warn_mgmt_link_flap(tp);
12117 if (tg3_flag(tp, USE_PHYLIB)) {
12118 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12120 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12124 spin_lock_bh(&tp->lock);
12126 tg3_readphy(tp, MII_BMCR, &bmcr);
12127 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12128 ((bmcr & BMCR_ANENABLE) ||
12129 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12130 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12134 spin_unlock_bh(&tp->lock);
12140 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12142 struct tg3 *tp = netdev_priv(dev);
12144 ering->rx_max_pending = tp->rx_std_ring_mask;
12145 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12146 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12148 ering->rx_jumbo_max_pending = 0;
12150 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12152 ering->rx_pending = tp->rx_pending;
12153 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12154 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12156 ering->rx_jumbo_pending = 0;
12158 ering->tx_pending = tp->napi[0].tx_pending;
12161 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12163 struct tg3 *tp = netdev_priv(dev);
12164 int i, irq_sync = 0, err = 0;
12166 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12167 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12168 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12169 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12170 (tg3_flag(tp, TSO_BUG) &&
12171 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12174 if (netif_running(dev)) {
12176 tg3_netif_stop(tp);
12180 tg3_full_lock(tp, irq_sync);
12182 tp->rx_pending = ering->rx_pending;
12184 if (tg3_flag(tp, MAX_RXPEND_64) &&
12185 tp->rx_pending > 63)
12186 tp->rx_pending = 63;
12187 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12189 for (i = 0; i < tp->irq_max; i++)
12190 tp->napi[i].tx_pending = ering->tx_pending;
12192 if (netif_running(dev)) {
12193 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12194 err = tg3_restart_hw(tp, false);
12196 tg3_netif_start(tp);
12199 tg3_full_unlock(tp);
12201 if (irq_sync && !err)
12207 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12209 struct tg3 *tp = netdev_priv(dev);
12211 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12213 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12214 epause->rx_pause = 1;
12216 epause->rx_pause = 0;
12218 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12219 epause->tx_pause = 1;
12221 epause->tx_pause = 0;
12224 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12226 struct tg3 *tp = netdev_priv(dev);
12229 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12230 tg3_warn_mgmt_link_flap(tp);
12232 if (tg3_flag(tp, USE_PHYLIB)) {
12234 struct phy_device *phydev;
12236 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12238 if (!(phydev->supported & SUPPORTED_Pause) ||
12239 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12240 (epause->rx_pause != epause->tx_pause)))
12243 tp->link_config.flowctrl = 0;
12244 if (epause->rx_pause) {
12245 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12247 if (epause->tx_pause) {
12248 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12249 newadv = ADVERTISED_Pause;
12251 newadv = ADVERTISED_Pause |
12252 ADVERTISED_Asym_Pause;
12253 } else if (epause->tx_pause) {
12254 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12255 newadv = ADVERTISED_Asym_Pause;
12259 if (epause->autoneg)
12260 tg3_flag_set(tp, PAUSE_AUTONEG);
12262 tg3_flag_clear(tp, PAUSE_AUTONEG);
12264 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12265 u32 oldadv = phydev->advertising &
12266 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12267 if (oldadv != newadv) {
12268 phydev->advertising &=
12269 ~(ADVERTISED_Pause |
12270 ADVERTISED_Asym_Pause);
12271 phydev->advertising |= newadv;
12272 if (phydev->autoneg) {
12274 * Always renegotiate the link to
12275 * inform our link partner of our
12276 * flow control settings, even if the
12277 * flow control is forced. Let
12278 * tg3_adjust_link() do the final
12279 * flow control setup.
12281 return phy_start_aneg(phydev);
12285 if (!epause->autoneg)
12286 tg3_setup_flow_control(tp, 0, 0);
12288 tp->link_config.advertising &=
12289 ~(ADVERTISED_Pause |
12290 ADVERTISED_Asym_Pause);
12291 tp->link_config.advertising |= newadv;
12296 if (netif_running(dev)) {
12297 tg3_netif_stop(tp);
12301 tg3_full_lock(tp, irq_sync);
12303 if (epause->autoneg)
12304 tg3_flag_set(tp, PAUSE_AUTONEG);
12306 tg3_flag_clear(tp, PAUSE_AUTONEG);
12307 if (epause->rx_pause)
12308 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12310 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12311 if (epause->tx_pause)
12312 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12314 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12316 if (netif_running(dev)) {
12317 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12318 err = tg3_restart_hw(tp, false);
12320 tg3_netif_start(tp);
12323 tg3_full_unlock(tp);
12326 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12331 static int tg3_get_sset_count(struct net_device *dev, int sset)
12335 return TG3_NUM_TEST;
12337 return TG3_NUM_STATS;
12339 return -EOPNOTSUPP;
12343 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12344 u32 *rules __always_unused)
12346 struct tg3 *tp = netdev_priv(dev);
12348 if (!tg3_flag(tp, SUPPORT_MSIX))
12349 return -EOPNOTSUPP;
12351 switch (info->cmd) {
12352 case ETHTOOL_GRXRINGS:
12353 if (netif_running(tp->dev))
12354 info->data = tp->rxq_cnt;
12356 info->data = num_online_cpus();
12357 if (info->data > TG3_RSS_MAX_NUM_QS)
12358 info->data = TG3_RSS_MAX_NUM_QS;
12361 /* The first interrupt vector only
12362 * handles link interrupts.
12368 return -EOPNOTSUPP;
12372 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12375 struct tg3 *tp = netdev_priv(dev);
12377 if (tg3_flag(tp, SUPPORT_MSIX))
12378 size = TG3_RSS_INDIR_TBL_SIZE;
12383 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12385 struct tg3 *tp = netdev_priv(dev);
12388 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12389 indir[i] = tp->rss_ind_tbl[i];
12394 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12396 struct tg3 *tp = netdev_priv(dev);
12399 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12400 tp->rss_ind_tbl[i] = indir[i];
12402 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12405 /* It is legal to write the indirection
12406 * table while the device is running.
12408 tg3_full_lock(tp, 0);
12409 tg3_rss_write_indir_tbl(tp);
12410 tg3_full_unlock(tp);
12415 static void tg3_get_channels(struct net_device *dev,
12416 struct ethtool_channels *channel)
12418 struct tg3 *tp = netdev_priv(dev);
12419 u32 deflt_qs = netif_get_num_default_rss_queues();
12421 channel->max_rx = tp->rxq_max;
12422 channel->max_tx = tp->txq_max;
12424 if (netif_running(dev)) {
12425 channel->rx_count = tp->rxq_cnt;
12426 channel->tx_count = tp->txq_cnt;
12429 channel->rx_count = tp->rxq_req;
12431 channel->rx_count = min(deflt_qs, tp->rxq_max);
12434 channel->tx_count = tp->txq_req;
12436 channel->tx_count = min(deflt_qs, tp->txq_max);
12440 static int tg3_set_channels(struct net_device *dev,
12441 struct ethtool_channels *channel)
12443 struct tg3 *tp = netdev_priv(dev);
12445 if (!tg3_flag(tp, SUPPORT_MSIX))
12446 return -EOPNOTSUPP;
12448 if (channel->rx_count > tp->rxq_max ||
12449 channel->tx_count > tp->txq_max)
12452 tp->rxq_req = channel->rx_count;
12453 tp->txq_req = channel->tx_count;
12455 if (!netif_running(dev))
12460 tg3_carrier_off(tp);
12462 tg3_start(tp, true, false, false);
12467 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12469 switch (stringset) {
12471 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12474 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12477 WARN_ON(1); /* we need a WARN() */
12482 static int tg3_set_phys_id(struct net_device *dev,
12483 enum ethtool_phys_id_state state)
12485 struct tg3 *tp = netdev_priv(dev);
12487 if (!netif_running(tp->dev))
12491 case ETHTOOL_ID_ACTIVE:
12492 return 1; /* cycle on/off once per second */
12494 case ETHTOOL_ID_ON:
12495 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12496 LED_CTRL_1000MBPS_ON |
12497 LED_CTRL_100MBPS_ON |
12498 LED_CTRL_10MBPS_ON |
12499 LED_CTRL_TRAFFIC_OVERRIDE |
12500 LED_CTRL_TRAFFIC_BLINK |
12501 LED_CTRL_TRAFFIC_LED);
12504 case ETHTOOL_ID_OFF:
12505 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12506 LED_CTRL_TRAFFIC_OVERRIDE);
12509 case ETHTOOL_ID_INACTIVE:
12510 tw32(MAC_LED_CTRL, tp->led_ctrl);
12517 static void tg3_get_ethtool_stats(struct net_device *dev,
12518 struct ethtool_stats *estats, u64 *tmp_stats)
12520 struct tg3 *tp = netdev_priv(dev);
12523 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12525 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12528 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12532 u32 offset = 0, len = 0;
12535 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12538 if (magic == TG3_EEPROM_MAGIC) {
12539 for (offset = TG3_NVM_DIR_START;
12540 offset < TG3_NVM_DIR_END;
12541 offset += TG3_NVM_DIRENT_SIZE) {
12542 if (tg3_nvram_read(tp, offset, &val))
12545 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12546 TG3_NVM_DIRTYPE_EXTVPD)
12550 if (offset != TG3_NVM_DIR_END) {
12551 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12552 if (tg3_nvram_read(tp, offset + 4, &offset))
12555 offset = tg3_nvram_logical_addr(tp, offset);
12559 if (!offset || !len) {
12560 offset = TG3_NVM_VPD_OFF;
12561 len = TG3_NVM_VPD_LEN;
12564 buf = kmalloc(len, GFP_KERNEL);
12568 if (magic == TG3_EEPROM_MAGIC) {
12569 for (i = 0; i < len; i += 4) {
12570 /* The data is in little-endian format in NVRAM.
12571 * Use the big-endian read routines to preserve
12572 * the byte order as it exists in NVRAM.
12574 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12580 unsigned int pos = 0;
12582 ptr = (u8 *)&buf[0];
12583 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12584 cnt = pci_read_vpd(tp->pdev, pos,
12586 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12604 #define NVRAM_TEST_SIZE 0x100
12605 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12606 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12607 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12608 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12609 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12610 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12611 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12612 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12614 static int tg3_test_nvram(struct tg3 *tp)
12616 u32 csum, magic, len;
12618 int i, j, k, err = 0, size;
12620 if (tg3_flag(tp, NO_NVRAM))
12623 if (tg3_nvram_read(tp, 0, &magic) != 0)
12626 if (magic == TG3_EEPROM_MAGIC)
12627 size = NVRAM_TEST_SIZE;
12628 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12629 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12630 TG3_EEPROM_SB_FORMAT_1) {
12631 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12632 case TG3_EEPROM_SB_REVISION_0:
12633 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12635 case TG3_EEPROM_SB_REVISION_2:
12636 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12638 case TG3_EEPROM_SB_REVISION_3:
12639 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12641 case TG3_EEPROM_SB_REVISION_4:
12642 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12644 case TG3_EEPROM_SB_REVISION_5:
12645 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12647 case TG3_EEPROM_SB_REVISION_6:
12648 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12655 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12656 size = NVRAM_SELFBOOT_HW_SIZE;
12660 buf = kmalloc(size, GFP_KERNEL);
12665 for (i = 0, j = 0; i < size; i += 4, j++) {
12666 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12673 /* Selfboot format */
12674 magic = be32_to_cpu(buf[0]);
12675 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12676 TG3_EEPROM_MAGIC_FW) {
12677 u8 *buf8 = (u8 *) buf, csum8 = 0;
12679 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12680 TG3_EEPROM_SB_REVISION_2) {
12681 /* For rev 2, the csum doesn't include the MBA. */
12682 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12684 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12687 for (i = 0; i < size; i++)
12700 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12701 TG3_EEPROM_MAGIC_HW) {
12702 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12703 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12704 u8 *buf8 = (u8 *) buf;
12706 /* Separate the parity bits and the data bytes. */
12707 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12708 if ((i == 0) || (i == 8)) {
12712 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12713 parity[k++] = buf8[i] & msk;
12715 } else if (i == 16) {
12719 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12720 parity[k++] = buf8[i] & msk;
12723 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12724 parity[k++] = buf8[i] & msk;
12727 data[j++] = buf8[i];
12731 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12732 u8 hw8 = hweight8(data[i]);
12734 if ((hw8 & 0x1) && parity[i])
12736 else if (!(hw8 & 0x1) && !parity[i])
12745 /* Bootstrap checksum at offset 0x10 */
12746 csum = calc_crc((unsigned char *) buf, 0x10);
12747 if (csum != le32_to_cpu(buf[0x10/4]))
12750 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12751 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12752 if (csum != le32_to_cpu(buf[0xfc/4]))
12757 buf = tg3_vpd_readblock(tp, &len);
12761 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12763 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12767 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12770 i += PCI_VPD_LRDT_TAG_SIZE;
12771 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12772 PCI_VPD_RO_KEYWORD_CHKSUM);
12776 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12778 for (i = 0; i <= j; i++)
12779 csum8 += ((u8 *)buf)[i];
12793 #define TG3_SERDES_TIMEOUT_SEC 2
12794 #define TG3_COPPER_TIMEOUT_SEC 6
12796 static int tg3_test_link(struct tg3 *tp)
12800 if (!netif_running(tp->dev))
12803 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12804 max = TG3_SERDES_TIMEOUT_SEC;
12806 max = TG3_COPPER_TIMEOUT_SEC;
12808 for (i = 0; i < max; i++) {
12812 if (msleep_interruptible(1000))
12819 /* Only test the commonly used registers */
12820 static int tg3_test_registers(struct tg3 *tp)
12822 int i, is_5705, is_5750;
12823 u32 offset, read_mask, write_mask, val, save_val, read_val;
12827 #define TG3_FL_5705 0x1
12828 #define TG3_FL_NOT_5705 0x2
12829 #define TG3_FL_NOT_5788 0x4
12830 #define TG3_FL_NOT_5750 0x8
12834 /* MAC Control Registers */
12835 { MAC_MODE, TG3_FL_NOT_5705,
12836 0x00000000, 0x00ef6f8c },
12837 { MAC_MODE, TG3_FL_5705,
12838 0x00000000, 0x01ef6b8c },
12839 { MAC_STATUS, TG3_FL_NOT_5705,
12840 0x03800107, 0x00000000 },
12841 { MAC_STATUS, TG3_FL_5705,
12842 0x03800100, 0x00000000 },
12843 { MAC_ADDR_0_HIGH, 0x0000,
12844 0x00000000, 0x0000ffff },
12845 { MAC_ADDR_0_LOW, 0x0000,
12846 0x00000000, 0xffffffff },
12847 { MAC_RX_MTU_SIZE, 0x0000,
12848 0x00000000, 0x0000ffff },
12849 { MAC_TX_MODE, 0x0000,
12850 0x00000000, 0x00000070 },
12851 { MAC_TX_LENGTHS, 0x0000,
12852 0x00000000, 0x00003fff },
12853 { MAC_RX_MODE, TG3_FL_NOT_5705,
12854 0x00000000, 0x000007fc },
12855 { MAC_RX_MODE, TG3_FL_5705,
12856 0x00000000, 0x000007dc },
12857 { MAC_HASH_REG_0, 0x0000,
12858 0x00000000, 0xffffffff },
12859 { MAC_HASH_REG_1, 0x0000,
12860 0x00000000, 0xffffffff },
12861 { MAC_HASH_REG_2, 0x0000,
12862 0x00000000, 0xffffffff },
12863 { MAC_HASH_REG_3, 0x0000,
12864 0x00000000, 0xffffffff },
12866 /* Receive Data and Receive BD Initiator Control Registers. */
12867 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12868 0x00000000, 0xffffffff },
12869 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12870 0x00000000, 0xffffffff },
12871 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12872 0x00000000, 0x00000003 },
12873 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12874 0x00000000, 0xffffffff },
12875 { RCVDBDI_STD_BD+0, 0x0000,
12876 0x00000000, 0xffffffff },
12877 { RCVDBDI_STD_BD+4, 0x0000,
12878 0x00000000, 0xffffffff },
12879 { RCVDBDI_STD_BD+8, 0x0000,
12880 0x00000000, 0xffff0002 },
12881 { RCVDBDI_STD_BD+0xc, 0x0000,
12882 0x00000000, 0xffffffff },
12884 /* Receive BD Initiator Control Registers. */
12885 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12886 0x00000000, 0xffffffff },
12887 { RCVBDI_STD_THRESH, TG3_FL_5705,
12888 0x00000000, 0x000003ff },
12889 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12890 0x00000000, 0xffffffff },
12892 /* Host Coalescing Control Registers. */
12893 { HOSTCC_MODE, TG3_FL_NOT_5705,
12894 0x00000000, 0x00000004 },
12895 { HOSTCC_MODE, TG3_FL_5705,
12896 0x00000000, 0x000000f6 },
12897 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12898 0x00000000, 0xffffffff },
12899 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12900 0x00000000, 0x000003ff },
12901 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12902 0x00000000, 0xffffffff },
12903 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12904 0x00000000, 0x000003ff },
12905 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12906 0x00000000, 0xffffffff },
12907 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12908 0x00000000, 0x000000ff },
12909 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12910 0x00000000, 0xffffffff },
12911 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12912 0x00000000, 0x000000ff },
12913 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12914 0x00000000, 0xffffffff },
12915 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12916 0x00000000, 0xffffffff },
12917 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12918 0x00000000, 0xffffffff },
12919 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12920 0x00000000, 0x000000ff },
12921 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12922 0x00000000, 0xffffffff },
12923 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12924 0x00000000, 0x000000ff },
12925 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12926 0x00000000, 0xffffffff },
12927 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12928 0x00000000, 0xffffffff },
12929 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12930 0x00000000, 0xffffffff },
12931 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12932 0x00000000, 0xffffffff },
12933 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12934 0x00000000, 0xffffffff },
12935 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12936 0xffffffff, 0x00000000 },
12937 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12938 0xffffffff, 0x00000000 },
12940 /* Buffer Manager Control Registers. */
12941 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12942 0x00000000, 0x007fff80 },
12943 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12944 0x00000000, 0x007fffff },
12945 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12946 0x00000000, 0x0000003f },
12947 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12948 0x00000000, 0x000001ff },
12949 { BUFMGR_MB_HIGH_WATER, 0x0000,
12950 0x00000000, 0x000001ff },
12951 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12952 0xffffffff, 0x00000000 },
12953 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12954 0xffffffff, 0x00000000 },
12956 /* Mailbox Registers */
12957 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12958 0x00000000, 0x000001ff },
12959 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12960 0x00000000, 0x000001ff },
12961 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12962 0x00000000, 0x000007ff },
12963 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12964 0x00000000, 0x000001ff },
12966 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12969 is_5705 = is_5750 = 0;
12970 if (tg3_flag(tp, 5705_PLUS)) {
12972 if (tg3_flag(tp, 5750_PLUS))
12976 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12977 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12980 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12983 if (tg3_flag(tp, IS_5788) &&
12984 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12987 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12990 offset = (u32) reg_tbl[i].offset;
12991 read_mask = reg_tbl[i].read_mask;
12992 write_mask = reg_tbl[i].write_mask;
12994 /* Save the original register content */
12995 save_val = tr32(offset);
12997 /* Determine the read-only value. */
12998 read_val = save_val & read_mask;
13000 /* Write zero to the register, then make sure the read-only bits
13001 * are not changed and the read/write bits are all zeros.
13005 val = tr32(offset);
13007 /* Test the read-only and read/write bits. */
13008 if (((val & read_mask) != read_val) || (val & write_mask))
13011 /* Write ones to all the bits defined by RdMask and WrMask, then
13012 * make sure the read-only bits are not changed and the
13013 * read/write bits are all ones.
13015 tw32(offset, read_mask | write_mask);
13017 val = tr32(offset);
13019 /* Test the read-only bits. */
13020 if ((val & read_mask) != read_val)
13023 /* Test the read/write bits. */
13024 if ((val & write_mask) != write_mask)
13027 tw32(offset, save_val);
13033 if (netif_msg_hw(tp))
13034 netdev_err(tp->dev,
13035 "Register test failed at offset %x\n", offset);
13036 tw32(offset, save_val);
13040 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13042 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13046 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13047 for (j = 0; j < len; j += 4) {
13050 tg3_write_mem(tp, offset + j, test_pattern[i]);
13051 tg3_read_mem(tp, offset + j, &val);
13052 if (val != test_pattern[i])
13059 static int tg3_test_memory(struct tg3 *tp)
13061 static struct mem_entry {
13064 } mem_tbl_570x[] = {
13065 { 0x00000000, 0x00b50},
13066 { 0x00002000, 0x1c000},
13067 { 0xffffffff, 0x00000}
13068 }, mem_tbl_5705[] = {
13069 { 0x00000100, 0x0000c},
13070 { 0x00000200, 0x00008},
13071 { 0x00004000, 0x00800},
13072 { 0x00006000, 0x01000},
13073 { 0x00008000, 0x02000},
13074 { 0x00010000, 0x0e000},
13075 { 0xffffffff, 0x00000}
13076 }, mem_tbl_5755[] = {
13077 { 0x00000200, 0x00008},
13078 { 0x00004000, 0x00800},
13079 { 0x00006000, 0x00800},
13080 { 0x00008000, 0x02000},
13081 { 0x00010000, 0x0c000},
13082 { 0xffffffff, 0x00000}
13083 }, mem_tbl_5906[] = {
13084 { 0x00000200, 0x00008},
13085 { 0x00004000, 0x00400},
13086 { 0x00006000, 0x00400},
13087 { 0x00008000, 0x01000},
13088 { 0x00010000, 0x01000},
13089 { 0xffffffff, 0x00000}
13090 }, mem_tbl_5717[] = {
13091 { 0x00000200, 0x00008},
13092 { 0x00010000, 0x0a000},
13093 { 0x00020000, 0x13c00},
13094 { 0xffffffff, 0x00000}
13095 }, mem_tbl_57765[] = {
13096 { 0x00000200, 0x00008},
13097 { 0x00004000, 0x00800},
13098 { 0x00006000, 0x09800},
13099 { 0x00010000, 0x0a000},
13100 { 0xffffffff, 0x00000}
13102 struct mem_entry *mem_tbl;
13106 if (tg3_flag(tp, 5717_PLUS))
13107 mem_tbl = mem_tbl_5717;
13108 else if (tg3_flag(tp, 57765_CLASS) ||
13109 tg3_asic_rev(tp) == ASIC_REV_5762)
13110 mem_tbl = mem_tbl_57765;
13111 else if (tg3_flag(tp, 5755_PLUS))
13112 mem_tbl = mem_tbl_5755;
13113 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13114 mem_tbl = mem_tbl_5906;
13115 else if (tg3_flag(tp, 5705_PLUS))
13116 mem_tbl = mem_tbl_5705;
13118 mem_tbl = mem_tbl_570x;
13120 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13121 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13129 #define TG3_TSO_MSS 500
13131 #define TG3_TSO_IP_HDR_LEN 20
13132 #define TG3_TSO_TCP_HDR_LEN 20
13133 #define TG3_TSO_TCP_OPT_LEN 12
13135 static const u8 tg3_tso_header[] = {
13137 0x45, 0x00, 0x00, 0x00,
13138 0x00, 0x00, 0x40, 0x00,
13139 0x40, 0x06, 0x00, 0x00,
13140 0x0a, 0x00, 0x00, 0x01,
13141 0x0a, 0x00, 0x00, 0x02,
13142 0x0d, 0x00, 0xe0, 0x00,
13143 0x00, 0x00, 0x01, 0x00,
13144 0x00, 0x00, 0x02, 0x00,
13145 0x80, 0x10, 0x10, 0x00,
13146 0x14, 0x09, 0x00, 0x00,
13147 0x01, 0x01, 0x08, 0x0a,
13148 0x11, 0x11, 0x11, 0x11,
13149 0x11, 0x11, 0x11, 0x11,
13152 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13154 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13155 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13157 struct sk_buff *skb;
13158 u8 *tx_data, *rx_data;
13160 int num_pkts, tx_len, rx_len, i, err;
13161 struct tg3_rx_buffer_desc *desc;
13162 struct tg3_napi *tnapi, *rnapi;
13163 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13165 tnapi = &tp->napi[0];
13166 rnapi = &tp->napi[0];
13167 if (tp->irq_cnt > 1) {
13168 if (tg3_flag(tp, ENABLE_RSS))
13169 rnapi = &tp->napi[1];
13170 if (tg3_flag(tp, ENABLE_TSS))
13171 tnapi = &tp->napi[1];
13173 coal_now = tnapi->coal_now | rnapi->coal_now;
13178 skb = netdev_alloc_skb(tp->dev, tx_len);
13182 tx_data = skb_put(skb, tx_len);
13183 memcpy(tx_data, tp->dev->dev_addr, 6);
13184 memset(tx_data + 6, 0x0, 8);
13186 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13188 if (tso_loopback) {
13189 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13191 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13192 TG3_TSO_TCP_OPT_LEN;
13194 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13195 sizeof(tg3_tso_header));
13198 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13199 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13201 /* Set the total length field in the IP header */
13202 iph->tot_len = htons((u16)(mss + hdr_len));
13204 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13205 TXD_FLAG_CPU_POST_DMA);
13207 if (tg3_flag(tp, HW_TSO_1) ||
13208 tg3_flag(tp, HW_TSO_2) ||
13209 tg3_flag(tp, HW_TSO_3)) {
13211 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13212 th = (struct tcphdr *)&tx_data[val];
13215 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13217 if (tg3_flag(tp, HW_TSO_3)) {
13218 mss |= (hdr_len & 0xc) << 12;
13219 if (hdr_len & 0x10)
13220 base_flags |= 0x00000010;
13221 base_flags |= (hdr_len & 0x3e0) << 5;
13222 } else if (tg3_flag(tp, HW_TSO_2))
13223 mss |= hdr_len << 9;
13224 else if (tg3_flag(tp, HW_TSO_1) ||
13225 tg3_asic_rev(tp) == ASIC_REV_5705) {
13226 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13228 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13231 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13234 data_off = ETH_HLEN;
13236 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13237 tx_len > VLAN_ETH_FRAME_LEN)
13238 base_flags |= TXD_FLAG_JMB_PKT;
13241 for (i = data_off; i < tx_len; i++)
13242 tx_data[i] = (u8) (i & 0xff);
13244 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13245 if (pci_dma_mapping_error(tp->pdev, map)) {
13246 dev_kfree_skb(skb);
13250 val = tnapi->tx_prod;
13251 tnapi->tx_buffers[val].skb = skb;
13252 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13254 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13259 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13261 budget = tg3_tx_avail(tnapi);
13262 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13263 base_flags | TXD_FLAG_END, mss, 0)) {
13264 tnapi->tx_buffers[val].skb = NULL;
13265 dev_kfree_skb(skb);
13271 /* Sync BD data before updating mailbox */
13274 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13275 tr32_mailbox(tnapi->prodmbox);
13279 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13280 for (i = 0; i < 35; i++) {
13281 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13286 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13287 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13288 if ((tx_idx == tnapi->tx_prod) &&
13289 (rx_idx == (rx_start_idx + num_pkts)))
13293 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13294 dev_kfree_skb(skb);
13296 if (tx_idx != tnapi->tx_prod)
13299 if (rx_idx != rx_start_idx + num_pkts)
13303 while (rx_idx != rx_start_idx) {
13304 desc = &rnapi->rx_rcb[rx_start_idx++];
13305 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13306 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13308 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13309 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13312 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13315 if (!tso_loopback) {
13316 if (rx_len != tx_len)
13319 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13320 if (opaque_key != RXD_OPAQUE_RING_STD)
13323 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13326 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13327 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13328 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13332 if (opaque_key == RXD_OPAQUE_RING_STD) {
13333 rx_data = tpr->rx_std_buffers[desc_idx].data;
13334 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13336 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13337 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13338 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13343 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13344 PCI_DMA_FROMDEVICE);
13346 rx_data += TG3_RX_OFFSET(tp);
13347 for (i = data_off; i < rx_len; i++, val++) {
13348 if (*(rx_data + i) != (u8) (val & 0xff))
13355 /* tg3_free_rings will unmap and free the rx_data */
13360 #define TG3_STD_LOOPBACK_FAILED 1
13361 #define TG3_JMB_LOOPBACK_FAILED 2
13362 #define TG3_TSO_LOOPBACK_FAILED 4
13363 #define TG3_LOOPBACK_FAILED \
13364 (TG3_STD_LOOPBACK_FAILED | \
13365 TG3_JMB_LOOPBACK_FAILED | \
13366 TG3_TSO_LOOPBACK_FAILED)
13368 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13372 u32 jmb_pkt_sz = 9000;
13375 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13377 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13378 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13380 if (!netif_running(tp->dev)) {
13381 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13382 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13384 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13388 err = tg3_reset_hw(tp, true);
13390 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13391 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13393 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13397 if (tg3_flag(tp, ENABLE_RSS)) {
13400 /* Reroute all rx packets to the 1st queue */
13401 for (i = MAC_RSS_INDIR_TBL_0;
13402 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13406 /* HW errata - mac loopback fails in some cases on 5780.
13407 * Normal traffic and PHY loopback are not affected by
13408 * errata. Also, the MAC loopback test is deprecated for
13409 * all newer ASIC revisions.
13411 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13412 !tg3_flag(tp, CPMU_PRESENT)) {
13413 tg3_mac_loopback(tp, true);
13415 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13416 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13418 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13419 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13420 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13422 tg3_mac_loopback(tp, false);
13425 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13426 !tg3_flag(tp, USE_PHYLIB)) {
13429 tg3_phy_lpbk_set(tp, 0, false);
13431 /* Wait for link */
13432 for (i = 0; i < 100; i++) {
13433 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13438 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13439 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13440 if (tg3_flag(tp, TSO_CAPABLE) &&
13441 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13442 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13443 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13444 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13445 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13448 tg3_phy_lpbk_set(tp, 0, true);
13450 /* All link indications report up, but the hardware
13451 * isn't really ready for about 20 msec. Double it
13456 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13457 data[TG3_EXT_LOOPB_TEST] |=
13458 TG3_STD_LOOPBACK_FAILED;
13459 if (tg3_flag(tp, TSO_CAPABLE) &&
13460 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13461 data[TG3_EXT_LOOPB_TEST] |=
13462 TG3_TSO_LOOPBACK_FAILED;
13463 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13464 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13465 data[TG3_EXT_LOOPB_TEST] |=
13466 TG3_JMB_LOOPBACK_FAILED;
13469 /* Re-enable gphy autopowerdown. */
13470 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13471 tg3_phy_toggle_apd(tp, true);
13474 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13475 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13478 tp->phy_flags |= eee_cap;
13483 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13486 struct tg3 *tp = netdev_priv(dev);
13487 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13489 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13490 if (tg3_power_up(tp)) {
13491 etest->flags |= ETH_TEST_FL_FAILED;
13492 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13495 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13498 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13500 if (tg3_test_nvram(tp) != 0) {
13501 etest->flags |= ETH_TEST_FL_FAILED;
13502 data[TG3_NVRAM_TEST] = 1;
13504 if (!doextlpbk && tg3_test_link(tp)) {
13505 etest->flags |= ETH_TEST_FL_FAILED;
13506 data[TG3_LINK_TEST] = 1;
13508 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13509 int err, err2 = 0, irq_sync = 0;
13511 if (netif_running(dev)) {
13513 tg3_netif_stop(tp);
13517 tg3_full_lock(tp, irq_sync);
13518 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13519 err = tg3_nvram_lock(tp);
13520 tg3_halt_cpu(tp, RX_CPU_BASE);
13521 if (!tg3_flag(tp, 5705_PLUS))
13522 tg3_halt_cpu(tp, TX_CPU_BASE);
13524 tg3_nvram_unlock(tp);
13526 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13529 if (tg3_test_registers(tp) != 0) {
13530 etest->flags |= ETH_TEST_FL_FAILED;
13531 data[TG3_REGISTER_TEST] = 1;
13534 if (tg3_test_memory(tp) != 0) {
13535 etest->flags |= ETH_TEST_FL_FAILED;
13536 data[TG3_MEMORY_TEST] = 1;
13540 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13542 if (tg3_test_loopback(tp, data, doextlpbk))
13543 etest->flags |= ETH_TEST_FL_FAILED;
13545 tg3_full_unlock(tp);
13547 if (tg3_test_interrupt(tp) != 0) {
13548 etest->flags |= ETH_TEST_FL_FAILED;
13549 data[TG3_INTERRUPT_TEST] = 1;
13552 tg3_full_lock(tp, 0);
13554 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13555 if (netif_running(dev)) {
13556 tg3_flag_set(tp, INIT_COMPLETE);
13557 err2 = tg3_restart_hw(tp, true);
13559 tg3_netif_start(tp);
13562 tg3_full_unlock(tp);
13564 if (irq_sync && !err2)
13567 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13568 tg3_power_down_prepare(tp);
13572 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13573 struct ifreq *ifr, int cmd)
13575 struct tg3 *tp = netdev_priv(dev);
13576 struct hwtstamp_config stmpconf;
13578 if (!tg3_flag(tp, PTP_CAPABLE))
13581 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13584 if (stmpconf.flags)
13587 switch (stmpconf.tx_type) {
13588 case HWTSTAMP_TX_ON:
13589 tg3_flag_set(tp, TX_TSTAMP_EN);
13591 case HWTSTAMP_TX_OFF:
13592 tg3_flag_clear(tp, TX_TSTAMP_EN);
13598 switch (stmpconf.rx_filter) {
13599 case HWTSTAMP_FILTER_NONE:
13602 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13603 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13604 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13606 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13607 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13608 TG3_RX_PTP_CTL_SYNC_EVNT;
13610 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13611 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13612 TG3_RX_PTP_CTL_DELAY_REQ;
13614 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13615 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13616 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13618 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13619 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13620 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13622 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13623 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13624 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13626 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13627 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13628 TG3_RX_PTP_CTL_SYNC_EVNT;
13630 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13631 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13632 TG3_RX_PTP_CTL_SYNC_EVNT;
13634 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13635 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13636 TG3_RX_PTP_CTL_SYNC_EVNT;
13638 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13639 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13640 TG3_RX_PTP_CTL_DELAY_REQ;
13642 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13643 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13644 TG3_RX_PTP_CTL_DELAY_REQ;
13646 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13647 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13648 TG3_RX_PTP_CTL_DELAY_REQ;
13654 if (netif_running(dev) && tp->rxptpctl)
13655 tw32(TG3_RX_PTP_CTL,
13656 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13658 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13662 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13664 struct mii_ioctl_data *data = if_mii(ifr);
13665 struct tg3 *tp = netdev_priv(dev);
13668 if (tg3_flag(tp, USE_PHYLIB)) {
13669 struct phy_device *phydev;
13670 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13672 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13673 return phy_mii_ioctl(phydev, ifr, cmd);
13678 data->phy_id = tp->phy_addr;
13681 case SIOCGMIIREG: {
13684 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13685 break; /* We have no PHY */
13687 if (!netif_running(dev))
13690 spin_lock_bh(&tp->lock);
13691 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13692 data->reg_num & 0x1f, &mii_regval);
13693 spin_unlock_bh(&tp->lock);
13695 data->val_out = mii_regval;
13701 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13702 break; /* We have no PHY */
13704 if (!netif_running(dev))
13707 spin_lock_bh(&tp->lock);
13708 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13709 data->reg_num & 0x1f, data->val_in);
13710 spin_unlock_bh(&tp->lock);
13714 case SIOCSHWTSTAMP:
13715 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13721 return -EOPNOTSUPP;
13724 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13726 struct tg3 *tp = netdev_priv(dev);
13728 memcpy(ec, &tp->coal, sizeof(*ec));
13732 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13734 struct tg3 *tp = netdev_priv(dev);
13735 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13736 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13738 if (!tg3_flag(tp, 5705_PLUS)) {
13739 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13740 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13741 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13742 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13745 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13746 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13747 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13748 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13749 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13750 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13751 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13752 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13753 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13754 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13757 /* No rx interrupts will be generated if both are zero */
13758 if ((ec->rx_coalesce_usecs == 0) &&
13759 (ec->rx_max_coalesced_frames == 0))
13762 /* No tx interrupts will be generated if both are zero */
13763 if ((ec->tx_coalesce_usecs == 0) &&
13764 (ec->tx_max_coalesced_frames == 0))
13767 /* Only copy relevant parameters, ignore all others. */
13768 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13769 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13770 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13771 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13772 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13773 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13774 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13775 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13776 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13778 if (netif_running(dev)) {
13779 tg3_full_lock(tp, 0);
13780 __tg3_set_coalesce(tp, &tp->coal);
13781 tg3_full_unlock(tp);
13786 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13788 struct tg3 *tp = netdev_priv(dev);
13790 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13791 netdev_warn(tp->dev, "Board does not support EEE!\n");
13792 return -EOPNOTSUPP;
13795 if (edata->advertised != tp->eee.advertised) {
13796 netdev_warn(tp->dev,
13797 "Direct manipulation of EEE advertisement is not supported\n");
13801 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13802 netdev_warn(tp->dev,
13803 "Maximal Tx Lpi timer supported is %#x(u)\n",
13804 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13810 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13811 tg3_warn_mgmt_link_flap(tp);
13813 if (netif_running(tp->dev)) {
13814 tg3_full_lock(tp, 0);
13817 tg3_full_unlock(tp);
13823 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13825 struct tg3 *tp = netdev_priv(dev);
13827 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13828 netdev_warn(tp->dev,
13829 "Board does not support EEE!\n");
13830 return -EOPNOTSUPP;
13837 static const struct ethtool_ops tg3_ethtool_ops = {
13838 .get_settings = tg3_get_settings,
13839 .set_settings = tg3_set_settings,
13840 .get_drvinfo = tg3_get_drvinfo,
13841 .get_regs_len = tg3_get_regs_len,
13842 .get_regs = tg3_get_regs,
13843 .get_wol = tg3_get_wol,
13844 .set_wol = tg3_set_wol,
13845 .get_msglevel = tg3_get_msglevel,
13846 .set_msglevel = tg3_set_msglevel,
13847 .nway_reset = tg3_nway_reset,
13848 .get_link = ethtool_op_get_link,
13849 .get_eeprom_len = tg3_get_eeprom_len,
13850 .get_eeprom = tg3_get_eeprom,
13851 .set_eeprom = tg3_set_eeprom,
13852 .get_ringparam = tg3_get_ringparam,
13853 .set_ringparam = tg3_set_ringparam,
13854 .get_pauseparam = tg3_get_pauseparam,
13855 .set_pauseparam = tg3_set_pauseparam,
13856 .self_test = tg3_self_test,
13857 .get_strings = tg3_get_strings,
13858 .set_phys_id = tg3_set_phys_id,
13859 .get_ethtool_stats = tg3_get_ethtool_stats,
13860 .get_coalesce = tg3_get_coalesce,
13861 .set_coalesce = tg3_set_coalesce,
13862 .get_sset_count = tg3_get_sset_count,
13863 .get_rxnfc = tg3_get_rxnfc,
13864 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13865 .get_rxfh_indir = tg3_get_rxfh_indir,
13866 .set_rxfh_indir = tg3_set_rxfh_indir,
13867 .get_channels = tg3_get_channels,
13868 .set_channels = tg3_set_channels,
13869 .get_ts_info = tg3_get_ts_info,
13870 .get_eee = tg3_get_eee,
13871 .set_eee = tg3_set_eee,
13874 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13875 struct rtnl_link_stats64 *stats)
13877 struct tg3 *tp = netdev_priv(dev);
13879 spin_lock_bh(&tp->lock);
13880 if (!tp->hw_stats) {
13881 spin_unlock_bh(&tp->lock);
13882 return &tp->net_stats_prev;
13885 tg3_get_nstats(tp, stats);
13886 spin_unlock_bh(&tp->lock);
13891 static void tg3_set_rx_mode(struct net_device *dev)
13893 struct tg3 *tp = netdev_priv(dev);
13895 if (!netif_running(dev))
13898 tg3_full_lock(tp, 0);
13899 __tg3_set_rx_mode(dev);
13900 tg3_full_unlock(tp);
13903 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13906 dev->mtu = new_mtu;
13908 if (new_mtu > ETH_DATA_LEN) {
13909 if (tg3_flag(tp, 5780_CLASS)) {
13910 netdev_update_features(dev);
13911 tg3_flag_clear(tp, TSO_CAPABLE);
13913 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13916 if (tg3_flag(tp, 5780_CLASS)) {
13917 tg3_flag_set(tp, TSO_CAPABLE);
13918 netdev_update_features(dev);
13920 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13924 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13926 struct tg3 *tp = netdev_priv(dev);
13928 bool reset_phy = false;
13930 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13933 if (!netif_running(dev)) {
13934 /* We'll just catch it later when the
13937 tg3_set_mtu(dev, tp, new_mtu);
13943 tg3_netif_stop(tp);
13945 tg3_full_lock(tp, 1);
13947 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13949 tg3_set_mtu(dev, tp, new_mtu);
13951 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13952 * breaks all requests to 256 bytes.
13954 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13957 err = tg3_restart_hw(tp, reset_phy);
13960 tg3_netif_start(tp);
13962 tg3_full_unlock(tp);
13970 static const struct net_device_ops tg3_netdev_ops = {
13971 .ndo_open = tg3_open,
13972 .ndo_stop = tg3_close,
13973 .ndo_start_xmit = tg3_start_xmit,
13974 .ndo_get_stats64 = tg3_get_stats64,
13975 .ndo_validate_addr = eth_validate_addr,
13976 .ndo_set_rx_mode = tg3_set_rx_mode,
13977 .ndo_set_mac_address = tg3_set_mac_addr,
13978 .ndo_do_ioctl = tg3_ioctl,
13979 .ndo_tx_timeout = tg3_tx_timeout,
13980 .ndo_change_mtu = tg3_change_mtu,
13981 .ndo_fix_features = tg3_fix_features,
13982 .ndo_set_features = tg3_set_features,
13983 #ifdef CONFIG_NET_POLL_CONTROLLER
13984 .ndo_poll_controller = tg3_poll_controller,
13988 static void tg3_get_eeprom_size(struct tg3 *tp)
13990 u32 cursize, val, magic;
13992 tp->nvram_size = EEPROM_CHIP_SIZE;
13994 if (tg3_nvram_read(tp, 0, &magic) != 0)
13997 if ((magic != TG3_EEPROM_MAGIC) &&
13998 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13999 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14003 * Size the chip by reading offsets at increasing powers of two.
14004 * When we encounter our validation signature, we know the addressing
14005 * has wrapped around, and thus have our chip size.
14009 while (cursize < tp->nvram_size) {
14010 if (tg3_nvram_read(tp, cursize, &val) != 0)
14019 tp->nvram_size = cursize;
14022 static void tg3_get_nvram_size(struct tg3 *tp)
14026 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14029 /* Selfboot format */
14030 if (val != TG3_EEPROM_MAGIC) {
14031 tg3_get_eeprom_size(tp);
14035 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14037 /* This is confusing. We want to operate on the
14038 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14039 * call will read from NVRAM and byteswap the data
14040 * according to the byteswapping settings for all
14041 * other register accesses. This ensures the data we
14042 * want will always reside in the lower 16-bits.
14043 * However, the data in NVRAM is in LE format, which
14044 * means the data from the NVRAM read will always be
14045 * opposite the endianness of the CPU. The 16-bit
14046 * byteswap then brings the data to CPU endianness.
14048 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14052 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14055 static void tg3_get_nvram_info(struct tg3 *tp)
14059 nvcfg1 = tr32(NVRAM_CFG1);
14060 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14061 tg3_flag_set(tp, FLASH);
14063 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14064 tw32(NVRAM_CFG1, nvcfg1);
14067 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14068 tg3_flag(tp, 5780_CLASS)) {
14069 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14070 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14071 tp->nvram_jedecnum = JEDEC_ATMEL;
14072 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14073 tg3_flag_set(tp, NVRAM_BUFFERED);
14075 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14076 tp->nvram_jedecnum = JEDEC_ATMEL;
14077 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14079 case FLASH_VENDOR_ATMEL_EEPROM:
14080 tp->nvram_jedecnum = JEDEC_ATMEL;
14081 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14082 tg3_flag_set(tp, NVRAM_BUFFERED);
14084 case FLASH_VENDOR_ST:
14085 tp->nvram_jedecnum = JEDEC_ST;
14086 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14087 tg3_flag_set(tp, NVRAM_BUFFERED);
14089 case FLASH_VENDOR_SAIFUN:
14090 tp->nvram_jedecnum = JEDEC_SAIFUN;
14091 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14093 case FLASH_VENDOR_SST_SMALL:
14094 case FLASH_VENDOR_SST_LARGE:
14095 tp->nvram_jedecnum = JEDEC_SST;
14096 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14100 tp->nvram_jedecnum = JEDEC_ATMEL;
14101 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14102 tg3_flag_set(tp, NVRAM_BUFFERED);
14106 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14108 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14109 case FLASH_5752PAGE_SIZE_256:
14110 tp->nvram_pagesize = 256;
14112 case FLASH_5752PAGE_SIZE_512:
14113 tp->nvram_pagesize = 512;
14115 case FLASH_5752PAGE_SIZE_1K:
14116 tp->nvram_pagesize = 1024;
14118 case FLASH_5752PAGE_SIZE_2K:
14119 tp->nvram_pagesize = 2048;
14121 case FLASH_5752PAGE_SIZE_4K:
14122 tp->nvram_pagesize = 4096;
14124 case FLASH_5752PAGE_SIZE_264:
14125 tp->nvram_pagesize = 264;
14127 case FLASH_5752PAGE_SIZE_528:
14128 tp->nvram_pagesize = 528;
14133 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14137 nvcfg1 = tr32(NVRAM_CFG1);
14139 /* NVRAM protection for TPM */
14140 if (nvcfg1 & (1 << 27))
14141 tg3_flag_set(tp, PROTECTED_NVRAM);
14143 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14144 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14145 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14146 tp->nvram_jedecnum = JEDEC_ATMEL;
14147 tg3_flag_set(tp, NVRAM_BUFFERED);
14149 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14150 tp->nvram_jedecnum = JEDEC_ATMEL;
14151 tg3_flag_set(tp, NVRAM_BUFFERED);
14152 tg3_flag_set(tp, FLASH);
14154 case FLASH_5752VENDOR_ST_M45PE10:
14155 case FLASH_5752VENDOR_ST_M45PE20:
14156 case FLASH_5752VENDOR_ST_M45PE40:
14157 tp->nvram_jedecnum = JEDEC_ST;
14158 tg3_flag_set(tp, NVRAM_BUFFERED);
14159 tg3_flag_set(tp, FLASH);
14163 if (tg3_flag(tp, FLASH)) {
14164 tg3_nvram_get_pagesize(tp, nvcfg1);
14166 /* For eeprom, set pagesize to maximum eeprom size */
14167 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14169 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14170 tw32(NVRAM_CFG1, nvcfg1);
14174 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14176 u32 nvcfg1, protect = 0;
14178 nvcfg1 = tr32(NVRAM_CFG1);
14180 /* NVRAM protection for TPM */
14181 if (nvcfg1 & (1 << 27)) {
14182 tg3_flag_set(tp, PROTECTED_NVRAM);
14186 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14188 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14189 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14190 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14191 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14192 tp->nvram_jedecnum = JEDEC_ATMEL;
14193 tg3_flag_set(tp, NVRAM_BUFFERED);
14194 tg3_flag_set(tp, FLASH);
14195 tp->nvram_pagesize = 264;
14196 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14197 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14198 tp->nvram_size = (protect ? 0x3e200 :
14199 TG3_NVRAM_SIZE_512KB);
14200 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14201 tp->nvram_size = (protect ? 0x1f200 :
14202 TG3_NVRAM_SIZE_256KB);
14204 tp->nvram_size = (protect ? 0x1f200 :
14205 TG3_NVRAM_SIZE_128KB);
14207 case FLASH_5752VENDOR_ST_M45PE10:
14208 case FLASH_5752VENDOR_ST_M45PE20:
14209 case FLASH_5752VENDOR_ST_M45PE40:
14210 tp->nvram_jedecnum = JEDEC_ST;
14211 tg3_flag_set(tp, NVRAM_BUFFERED);
14212 tg3_flag_set(tp, FLASH);
14213 tp->nvram_pagesize = 256;
14214 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14215 tp->nvram_size = (protect ?
14216 TG3_NVRAM_SIZE_64KB :
14217 TG3_NVRAM_SIZE_128KB);
14218 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14219 tp->nvram_size = (protect ?
14220 TG3_NVRAM_SIZE_64KB :
14221 TG3_NVRAM_SIZE_256KB);
14223 tp->nvram_size = (protect ?
14224 TG3_NVRAM_SIZE_128KB :
14225 TG3_NVRAM_SIZE_512KB);
14230 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14234 nvcfg1 = tr32(NVRAM_CFG1);
14236 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14237 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14238 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14239 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14240 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14241 tp->nvram_jedecnum = JEDEC_ATMEL;
14242 tg3_flag_set(tp, NVRAM_BUFFERED);
14243 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14245 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14246 tw32(NVRAM_CFG1, nvcfg1);
14248 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14249 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14250 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14251 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14252 tp->nvram_jedecnum = JEDEC_ATMEL;
14253 tg3_flag_set(tp, NVRAM_BUFFERED);
14254 tg3_flag_set(tp, FLASH);
14255 tp->nvram_pagesize = 264;
14257 case FLASH_5752VENDOR_ST_M45PE10:
14258 case FLASH_5752VENDOR_ST_M45PE20:
14259 case FLASH_5752VENDOR_ST_M45PE40:
14260 tp->nvram_jedecnum = JEDEC_ST;
14261 tg3_flag_set(tp, NVRAM_BUFFERED);
14262 tg3_flag_set(tp, FLASH);
14263 tp->nvram_pagesize = 256;
14268 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14270 u32 nvcfg1, protect = 0;
14272 nvcfg1 = tr32(NVRAM_CFG1);
14274 /* NVRAM protection for TPM */
14275 if (nvcfg1 & (1 << 27)) {
14276 tg3_flag_set(tp, PROTECTED_NVRAM);
14280 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14282 case FLASH_5761VENDOR_ATMEL_ADB021D:
14283 case FLASH_5761VENDOR_ATMEL_ADB041D:
14284 case FLASH_5761VENDOR_ATMEL_ADB081D:
14285 case FLASH_5761VENDOR_ATMEL_ADB161D:
14286 case FLASH_5761VENDOR_ATMEL_MDB021D:
14287 case FLASH_5761VENDOR_ATMEL_MDB041D:
14288 case FLASH_5761VENDOR_ATMEL_MDB081D:
14289 case FLASH_5761VENDOR_ATMEL_MDB161D:
14290 tp->nvram_jedecnum = JEDEC_ATMEL;
14291 tg3_flag_set(tp, NVRAM_BUFFERED);
14292 tg3_flag_set(tp, FLASH);
14293 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14294 tp->nvram_pagesize = 256;
14296 case FLASH_5761VENDOR_ST_A_M45PE20:
14297 case FLASH_5761VENDOR_ST_A_M45PE40:
14298 case FLASH_5761VENDOR_ST_A_M45PE80:
14299 case FLASH_5761VENDOR_ST_A_M45PE16:
14300 case FLASH_5761VENDOR_ST_M_M45PE20:
14301 case FLASH_5761VENDOR_ST_M_M45PE40:
14302 case FLASH_5761VENDOR_ST_M_M45PE80:
14303 case FLASH_5761VENDOR_ST_M_M45PE16:
14304 tp->nvram_jedecnum = JEDEC_ST;
14305 tg3_flag_set(tp, NVRAM_BUFFERED);
14306 tg3_flag_set(tp, FLASH);
14307 tp->nvram_pagesize = 256;
14312 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14315 case FLASH_5761VENDOR_ATMEL_ADB161D:
14316 case FLASH_5761VENDOR_ATMEL_MDB161D:
14317 case FLASH_5761VENDOR_ST_A_M45PE16:
14318 case FLASH_5761VENDOR_ST_M_M45PE16:
14319 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14321 case FLASH_5761VENDOR_ATMEL_ADB081D:
14322 case FLASH_5761VENDOR_ATMEL_MDB081D:
14323 case FLASH_5761VENDOR_ST_A_M45PE80:
14324 case FLASH_5761VENDOR_ST_M_M45PE80:
14325 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14327 case FLASH_5761VENDOR_ATMEL_ADB041D:
14328 case FLASH_5761VENDOR_ATMEL_MDB041D:
14329 case FLASH_5761VENDOR_ST_A_M45PE40:
14330 case FLASH_5761VENDOR_ST_M_M45PE40:
14331 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14333 case FLASH_5761VENDOR_ATMEL_ADB021D:
14334 case FLASH_5761VENDOR_ATMEL_MDB021D:
14335 case FLASH_5761VENDOR_ST_A_M45PE20:
14336 case FLASH_5761VENDOR_ST_M_M45PE20:
14337 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14343 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14345 tp->nvram_jedecnum = JEDEC_ATMEL;
14346 tg3_flag_set(tp, NVRAM_BUFFERED);
14347 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14350 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14354 nvcfg1 = tr32(NVRAM_CFG1);
14356 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14357 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14358 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14359 tp->nvram_jedecnum = JEDEC_ATMEL;
14360 tg3_flag_set(tp, NVRAM_BUFFERED);
14361 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14363 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14364 tw32(NVRAM_CFG1, nvcfg1);
14366 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14367 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14368 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14369 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14370 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14371 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14372 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14373 tp->nvram_jedecnum = JEDEC_ATMEL;
14374 tg3_flag_set(tp, NVRAM_BUFFERED);
14375 tg3_flag_set(tp, FLASH);
14377 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14378 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14379 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14380 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14381 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14383 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14384 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14385 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14387 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14388 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14389 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14393 case FLASH_5752VENDOR_ST_M45PE10:
14394 case FLASH_5752VENDOR_ST_M45PE20:
14395 case FLASH_5752VENDOR_ST_M45PE40:
14396 tp->nvram_jedecnum = JEDEC_ST;
14397 tg3_flag_set(tp, NVRAM_BUFFERED);
14398 tg3_flag_set(tp, FLASH);
14400 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14401 case FLASH_5752VENDOR_ST_M45PE10:
14402 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14404 case FLASH_5752VENDOR_ST_M45PE20:
14405 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14407 case FLASH_5752VENDOR_ST_M45PE40:
14408 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14413 tg3_flag_set(tp, NO_NVRAM);
14417 tg3_nvram_get_pagesize(tp, nvcfg1);
14418 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14419 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14423 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14427 nvcfg1 = tr32(NVRAM_CFG1);
14429 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14430 case FLASH_5717VENDOR_ATMEL_EEPROM:
14431 case FLASH_5717VENDOR_MICRO_EEPROM:
14432 tp->nvram_jedecnum = JEDEC_ATMEL;
14433 tg3_flag_set(tp, NVRAM_BUFFERED);
14434 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14436 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14437 tw32(NVRAM_CFG1, nvcfg1);
14439 case FLASH_5717VENDOR_ATMEL_MDB011D:
14440 case FLASH_5717VENDOR_ATMEL_ADB011B:
14441 case FLASH_5717VENDOR_ATMEL_ADB011D:
14442 case FLASH_5717VENDOR_ATMEL_MDB021D:
14443 case FLASH_5717VENDOR_ATMEL_ADB021B:
14444 case FLASH_5717VENDOR_ATMEL_ADB021D:
14445 case FLASH_5717VENDOR_ATMEL_45USPT:
14446 tp->nvram_jedecnum = JEDEC_ATMEL;
14447 tg3_flag_set(tp, NVRAM_BUFFERED);
14448 tg3_flag_set(tp, FLASH);
14450 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14451 case FLASH_5717VENDOR_ATMEL_MDB021D:
14452 /* Detect size with tg3_nvram_get_size() */
14454 case FLASH_5717VENDOR_ATMEL_ADB021B:
14455 case FLASH_5717VENDOR_ATMEL_ADB021D:
14456 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14459 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14463 case FLASH_5717VENDOR_ST_M_M25PE10:
14464 case FLASH_5717VENDOR_ST_A_M25PE10:
14465 case FLASH_5717VENDOR_ST_M_M45PE10:
14466 case FLASH_5717VENDOR_ST_A_M45PE10:
14467 case FLASH_5717VENDOR_ST_M_M25PE20:
14468 case FLASH_5717VENDOR_ST_A_M25PE20:
14469 case FLASH_5717VENDOR_ST_M_M45PE20:
14470 case FLASH_5717VENDOR_ST_A_M45PE20:
14471 case FLASH_5717VENDOR_ST_25USPT:
14472 case FLASH_5717VENDOR_ST_45USPT:
14473 tp->nvram_jedecnum = JEDEC_ST;
14474 tg3_flag_set(tp, NVRAM_BUFFERED);
14475 tg3_flag_set(tp, FLASH);
14477 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14478 case FLASH_5717VENDOR_ST_M_M25PE20:
14479 case FLASH_5717VENDOR_ST_M_M45PE20:
14480 /* Detect size with tg3_nvram_get_size() */
14482 case FLASH_5717VENDOR_ST_A_M25PE20:
14483 case FLASH_5717VENDOR_ST_A_M45PE20:
14484 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14487 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14492 tg3_flag_set(tp, NO_NVRAM);
14496 tg3_nvram_get_pagesize(tp, nvcfg1);
14497 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14498 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14501 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14503 u32 nvcfg1, nvmpinstrp;
14505 nvcfg1 = tr32(NVRAM_CFG1);
14506 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14508 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14509 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14510 tg3_flag_set(tp, NO_NVRAM);
14514 switch (nvmpinstrp) {
14515 case FLASH_5762_EEPROM_HD:
14516 nvmpinstrp = FLASH_5720_EEPROM_HD;
14518 case FLASH_5762_EEPROM_LD:
14519 nvmpinstrp = FLASH_5720_EEPROM_LD;
14521 case FLASH_5720VENDOR_M_ST_M45PE20:
14522 /* This pinstrap supports multiple sizes, so force it
14523 * to read the actual size from location 0xf0.
14525 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14530 switch (nvmpinstrp) {
14531 case FLASH_5720_EEPROM_HD:
14532 case FLASH_5720_EEPROM_LD:
14533 tp->nvram_jedecnum = JEDEC_ATMEL;
14534 tg3_flag_set(tp, NVRAM_BUFFERED);
14536 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14537 tw32(NVRAM_CFG1, nvcfg1);
14538 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14539 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14541 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14543 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14544 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14545 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14546 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14547 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14548 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14549 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14550 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14551 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14552 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14553 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14554 case FLASH_5720VENDOR_ATMEL_45USPT:
14555 tp->nvram_jedecnum = JEDEC_ATMEL;
14556 tg3_flag_set(tp, NVRAM_BUFFERED);
14557 tg3_flag_set(tp, FLASH);
14559 switch (nvmpinstrp) {
14560 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14561 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14562 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14563 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14565 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14566 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14567 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14568 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14570 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14571 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14572 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14575 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14576 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14580 case FLASH_5720VENDOR_M_ST_M25PE10:
14581 case FLASH_5720VENDOR_M_ST_M45PE10:
14582 case FLASH_5720VENDOR_A_ST_M25PE10:
14583 case FLASH_5720VENDOR_A_ST_M45PE10:
14584 case FLASH_5720VENDOR_M_ST_M25PE20:
14585 case FLASH_5720VENDOR_M_ST_M45PE20:
14586 case FLASH_5720VENDOR_A_ST_M25PE20:
14587 case FLASH_5720VENDOR_A_ST_M45PE20:
14588 case FLASH_5720VENDOR_M_ST_M25PE40:
14589 case FLASH_5720VENDOR_M_ST_M45PE40:
14590 case FLASH_5720VENDOR_A_ST_M25PE40:
14591 case FLASH_5720VENDOR_A_ST_M45PE40:
14592 case FLASH_5720VENDOR_M_ST_M25PE80:
14593 case FLASH_5720VENDOR_M_ST_M45PE80:
14594 case FLASH_5720VENDOR_A_ST_M25PE80:
14595 case FLASH_5720VENDOR_A_ST_M45PE80:
14596 case FLASH_5720VENDOR_ST_25USPT:
14597 case FLASH_5720VENDOR_ST_45USPT:
14598 tp->nvram_jedecnum = JEDEC_ST;
14599 tg3_flag_set(tp, NVRAM_BUFFERED);
14600 tg3_flag_set(tp, FLASH);
14602 switch (nvmpinstrp) {
14603 case FLASH_5720VENDOR_M_ST_M25PE20:
14604 case FLASH_5720VENDOR_M_ST_M45PE20:
14605 case FLASH_5720VENDOR_A_ST_M25PE20:
14606 case FLASH_5720VENDOR_A_ST_M45PE20:
14607 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14609 case FLASH_5720VENDOR_M_ST_M25PE40:
14610 case FLASH_5720VENDOR_M_ST_M45PE40:
14611 case FLASH_5720VENDOR_A_ST_M25PE40:
14612 case FLASH_5720VENDOR_A_ST_M45PE40:
14613 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14615 case FLASH_5720VENDOR_M_ST_M25PE80:
14616 case FLASH_5720VENDOR_M_ST_M45PE80:
14617 case FLASH_5720VENDOR_A_ST_M25PE80:
14618 case FLASH_5720VENDOR_A_ST_M45PE80:
14619 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14622 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14623 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14628 tg3_flag_set(tp, NO_NVRAM);
14632 tg3_nvram_get_pagesize(tp, nvcfg1);
14633 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14634 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14636 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14639 if (tg3_nvram_read(tp, 0, &val))
14642 if (val != TG3_EEPROM_MAGIC &&
14643 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14644 tg3_flag_set(tp, NO_NVRAM);
14648 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14649 static void tg3_nvram_init(struct tg3 *tp)
14651 if (tg3_flag(tp, IS_SSB_CORE)) {
14652 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14653 tg3_flag_clear(tp, NVRAM);
14654 tg3_flag_clear(tp, NVRAM_BUFFERED);
14655 tg3_flag_set(tp, NO_NVRAM);
14659 tw32_f(GRC_EEPROM_ADDR,
14660 (EEPROM_ADDR_FSM_RESET |
14661 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14662 EEPROM_ADDR_CLKPERD_SHIFT)));
14666 /* Enable seeprom accesses. */
14667 tw32_f(GRC_LOCAL_CTRL,
14668 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14671 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14672 tg3_asic_rev(tp) != ASIC_REV_5701) {
14673 tg3_flag_set(tp, NVRAM);
14675 if (tg3_nvram_lock(tp)) {
14676 netdev_warn(tp->dev,
14677 "Cannot get nvram lock, %s failed\n",
14681 tg3_enable_nvram_access(tp);
14683 tp->nvram_size = 0;
14685 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14686 tg3_get_5752_nvram_info(tp);
14687 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14688 tg3_get_5755_nvram_info(tp);
14689 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14690 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14691 tg3_asic_rev(tp) == ASIC_REV_5785)
14692 tg3_get_5787_nvram_info(tp);
14693 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14694 tg3_get_5761_nvram_info(tp);
14695 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14696 tg3_get_5906_nvram_info(tp);
14697 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14698 tg3_flag(tp, 57765_CLASS))
14699 tg3_get_57780_nvram_info(tp);
14700 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14701 tg3_asic_rev(tp) == ASIC_REV_5719)
14702 tg3_get_5717_nvram_info(tp);
14703 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14704 tg3_asic_rev(tp) == ASIC_REV_5762)
14705 tg3_get_5720_nvram_info(tp);
14707 tg3_get_nvram_info(tp);
14709 if (tp->nvram_size == 0)
14710 tg3_get_nvram_size(tp);
14712 tg3_disable_nvram_access(tp);
14713 tg3_nvram_unlock(tp);
14716 tg3_flag_clear(tp, NVRAM);
14717 tg3_flag_clear(tp, NVRAM_BUFFERED);
14719 tg3_get_eeprom_size(tp);
14723 struct subsys_tbl_ent {
14724 u16 subsys_vendor, subsys_devid;
14728 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14729 /* Broadcom boards. */
14730 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14731 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14732 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14733 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14734 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14735 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14736 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14737 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14738 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14739 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14740 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14741 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14742 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14743 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14744 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14745 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14746 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14747 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14748 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14749 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14750 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14751 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14754 { TG3PCI_SUBVENDOR_ID_3COM,
14755 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14756 { TG3PCI_SUBVENDOR_ID_3COM,
14757 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14758 { TG3PCI_SUBVENDOR_ID_3COM,
14759 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14760 { TG3PCI_SUBVENDOR_ID_3COM,
14761 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14762 { TG3PCI_SUBVENDOR_ID_3COM,
14763 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14766 { TG3PCI_SUBVENDOR_ID_DELL,
14767 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14768 { TG3PCI_SUBVENDOR_ID_DELL,
14769 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14770 { TG3PCI_SUBVENDOR_ID_DELL,
14771 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14772 { TG3PCI_SUBVENDOR_ID_DELL,
14773 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14775 /* Compaq boards. */
14776 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14777 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14778 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14779 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14780 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14781 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14782 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14783 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14784 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14785 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14788 { TG3PCI_SUBVENDOR_ID_IBM,
14789 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14792 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14796 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14797 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14798 tp->pdev->subsystem_vendor) &&
14799 (subsys_id_to_phy_id[i].subsys_devid ==
14800 tp->pdev->subsystem_device))
14801 return &subsys_id_to_phy_id[i];
14806 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14810 tp->phy_id = TG3_PHY_ID_INVALID;
14811 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14813 /* Assume an onboard device and WOL capable by default. */
14814 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14815 tg3_flag_set(tp, WOL_CAP);
14817 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14818 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14819 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14820 tg3_flag_set(tp, IS_NIC);
14822 val = tr32(VCPU_CFGSHDW);
14823 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14824 tg3_flag_set(tp, ASPM_WORKAROUND);
14825 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14826 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14827 tg3_flag_set(tp, WOL_ENABLE);
14828 device_set_wakeup_enable(&tp->pdev->dev, true);
14833 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14834 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14835 u32 nic_cfg, led_cfg;
14836 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14837 int eeprom_phy_serdes = 0;
14839 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14840 tp->nic_sram_data_cfg = nic_cfg;
14842 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14843 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14844 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14845 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14846 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14847 (ver > 0) && (ver < 0x100))
14848 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14850 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14851 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14853 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14854 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14855 eeprom_phy_serdes = 1;
14857 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14858 if (nic_phy_id != 0) {
14859 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14860 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14862 eeprom_phy_id = (id1 >> 16) << 10;
14863 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14864 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14868 tp->phy_id = eeprom_phy_id;
14869 if (eeprom_phy_serdes) {
14870 if (!tg3_flag(tp, 5705_PLUS))
14871 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14873 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14876 if (tg3_flag(tp, 5750_PLUS))
14877 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14878 SHASTA_EXT_LED_MODE_MASK);
14880 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14884 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14885 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14888 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14889 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14892 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14893 tp->led_ctrl = LED_CTRL_MODE_MAC;
14895 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14896 * read on some older 5700/5701 bootcode.
14898 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14899 tg3_asic_rev(tp) == ASIC_REV_5701)
14900 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14904 case SHASTA_EXT_LED_SHARED:
14905 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14906 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14907 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14908 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14909 LED_CTRL_MODE_PHY_2);
14912 case SHASTA_EXT_LED_MAC:
14913 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14916 case SHASTA_EXT_LED_COMBO:
14917 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14918 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14919 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14920 LED_CTRL_MODE_PHY_2);
14925 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14926 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14927 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14928 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14930 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14931 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14933 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14934 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14935 if ((tp->pdev->subsystem_vendor ==
14936 PCI_VENDOR_ID_ARIMA) &&
14937 (tp->pdev->subsystem_device == 0x205a ||
14938 tp->pdev->subsystem_device == 0x2063))
14939 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14941 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14942 tg3_flag_set(tp, IS_NIC);
14945 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14946 tg3_flag_set(tp, ENABLE_ASF);
14947 if (tg3_flag(tp, 5750_PLUS))
14948 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14951 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14952 tg3_flag(tp, 5750_PLUS))
14953 tg3_flag_set(tp, ENABLE_APE);
14955 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14956 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14957 tg3_flag_clear(tp, WOL_CAP);
14959 if (tg3_flag(tp, WOL_CAP) &&
14960 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14961 tg3_flag_set(tp, WOL_ENABLE);
14962 device_set_wakeup_enable(&tp->pdev->dev, true);
14965 if (cfg2 & (1 << 17))
14966 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14968 /* serdes signal pre-emphasis in register 0x590 set by */
14969 /* bootcode if bit 18 is set */
14970 if (cfg2 & (1 << 18))
14971 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14973 if ((tg3_flag(tp, 57765_PLUS) ||
14974 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14975 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14976 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14977 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14979 if (tg3_flag(tp, PCI_EXPRESS)) {
14982 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14983 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14984 !tg3_flag(tp, 57765_PLUS) &&
14985 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14986 tg3_flag_set(tp, ASPM_WORKAROUND);
14987 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14988 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14989 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14990 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14993 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14994 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14995 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14996 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14997 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14998 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15001 if (tg3_flag(tp, WOL_CAP))
15002 device_set_wakeup_enable(&tp->pdev->dev,
15003 tg3_flag(tp, WOL_ENABLE));
15005 device_set_wakeup_capable(&tp->pdev->dev, false);
15008 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15011 u32 val2, off = offset * 8;
15013 err = tg3_nvram_lock(tp);
15017 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15018 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15019 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15020 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15023 for (i = 0; i < 100; i++) {
15024 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15025 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15026 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15032 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15034 tg3_nvram_unlock(tp);
15035 if (val2 & APE_OTP_STATUS_CMD_DONE)
15041 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15046 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15047 tw32(OTP_CTRL, cmd);
15049 /* Wait for up to 1 ms for command to execute. */
15050 for (i = 0; i < 100; i++) {
15051 val = tr32(OTP_STATUS);
15052 if (val & OTP_STATUS_CMD_DONE)
15057 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15060 /* Read the gphy configuration from the OTP region of the chip. The gphy
15061 * configuration is a 32-bit value that straddles the alignment boundary.
15062 * We do two 32-bit reads and then shift and merge the results.
15064 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15066 u32 bhalf_otp, thalf_otp;
15068 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15070 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15073 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15075 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15078 thalf_otp = tr32(OTP_READ_DATA);
15080 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15082 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15085 bhalf_otp = tr32(OTP_READ_DATA);
15087 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15090 static void tg3_phy_init_link_config(struct tg3 *tp)
15092 u32 adv = ADVERTISED_Autoneg;
15094 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15095 adv |= ADVERTISED_1000baseT_Half |
15096 ADVERTISED_1000baseT_Full;
15098 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15099 adv |= ADVERTISED_100baseT_Half |
15100 ADVERTISED_100baseT_Full |
15101 ADVERTISED_10baseT_Half |
15102 ADVERTISED_10baseT_Full |
15105 adv |= ADVERTISED_FIBRE;
15107 tp->link_config.advertising = adv;
15108 tp->link_config.speed = SPEED_UNKNOWN;
15109 tp->link_config.duplex = DUPLEX_UNKNOWN;
15110 tp->link_config.autoneg = AUTONEG_ENABLE;
15111 tp->link_config.active_speed = SPEED_UNKNOWN;
15112 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15117 static int tg3_phy_probe(struct tg3 *tp)
15119 u32 hw_phy_id_1, hw_phy_id_2;
15120 u32 hw_phy_id, hw_phy_id_masked;
15123 /* flow control autonegotiation is default behavior */
15124 tg3_flag_set(tp, PAUSE_AUTONEG);
15125 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15127 if (tg3_flag(tp, ENABLE_APE)) {
15128 switch (tp->pci_fn) {
15130 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15133 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15136 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15139 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15144 if (!tg3_flag(tp, ENABLE_ASF) &&
15145 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15146 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15147 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15148 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15150 if (tg3_flag(tp, USE_PHYLIB))
15151 return tg3_phy_init(tp);
15153 /* Reading the PHY ID register can conflict with ASF
15154 * firmware access to the PHY hardware.
15157 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15158 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15160 /* Now read the physical PHY_ID from the chip and verify
15161 * that it is sane. If it doesn't look good, we fall back
15162 * to either the hard-coded table based PHY_ID and failing
15163 * that the value found in the eeprom area.
15165 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15166 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15168 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15169 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15170 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15172 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15175 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15176 tp->phy_id = hw_phy_id;
15177 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15178 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15180 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15182 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15183 /* Do nothing, phy ID already set up in
15184 * tg3_get_eeprom_hw_cfg().
15187 struct subsys_tbl_ent *p;
15189 /* No eeprom signature? Try the hardcoded
15190 * subsys device table.
15192 p = tg3_lookup_by_subsys(tp);
15194 tp->phy_id = p->phy_id;
15195 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15196 /* For now we saw the IDs 0xbc050cd0,
15197 * 0xbc050f80 and 0xbc050c30 on devices
15198 * connected to an BCM4785 and there are
15199 * probably more. Just assume that the phy is
15200 * supported when it is connected to a SSB core
15207 tp->phy_id == TG3_PHY_ID_BCM8002)
15208 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15212 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15213 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15214 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15215 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15216 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15217 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15218 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15219 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15220 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15221 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15223 tp->eee.supported = SUPPORTED_100baseT_Full |
15224 SUPPORTED_1000baseT_Full;
15225 tp->eee.advertised = ADVERTISED_100baseT_Full |
15226 ADVERTISED_1000baseT_Full;
15227 tp->eee.eee_enabled = 1;
15228 tp->eee.tx_lpi_enabled = 1;
15229 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15232 tg3_phy_init_link_config(tp);
15234 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15235 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15236 !tg3_flag(tp, ENABLE_APE) &&
15237 !tg3_flag(tp, ENABLE_ASF)) {
15240 tg3_readphy(tp, MII_BMSR, &bmsr);
15241 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15242 (bmsr & BMSR_LSTATUS))
15243 goto skip_phy_reset;
15245 err = tg3_phy_reset(tp);
15249 tg3_phy_set_wirespeed(tp);
15251 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15252 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15253 tp->link_config.flowctrl);
15255 tg3_writephy(tp, MII_BMCR,
15256 BMCR_ANENABLE | BMCR_ANRESTART);
15261 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15262 err = tg3_init_5401phy_dsp(tp);
15266 err = tg3_init_5401phy_dsp(tp);
15272 static void tg3_read_vpd(struct tg3 *tp)
15275 unsigned int block_end, rosize, len;
15279 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15283 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15285 goto out_not_found;
15287 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15288 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15289 i += PCI_VPD_LRDT_TAG_SIZE;
15291 if (block_end > vpdlen)
15292 goto out_not_found;
15294 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15295 PCI_VPD_RO_KEYWORD_MFR_ID);
15297 len = pci_vpd_info_field_size(&vpd_data[j]);
15299 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15300 if (j + len > block_end || len != 4 ||
15301 memcmp(&vpd_data[j], "1028", 4))
15304 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15305 PCI_VPD_RO_KEYWORD_VENDOR0);
15309 len = pci_vpd_info_field_size(&vpd_data[j]);
15311 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15312 if (j + len > block_end)
15315 if (len >= sizeof(tp->fw_ver))
15316 len = sizeof(tp->fw_ver) - 1;
15317 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15318 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15323 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15324 PCI_VPD_RO_KEYWORD_PARTNO);
15326 goto out_not_found;
15328 len = pci_vpd_info_field_size(&vpd_data[i]);
15330 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15331 if (len > TG3_BPN_SIZE ||
15332 (len + i) > vpdlen)
15333 goto out_not_found;
15335 memcpy(tp->board_part_number, &vpd_data[i], len);
15339 if (tp->board_part_number[0])
15343 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15344 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15345 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15346 strcpy(tp->board_part_number, "BCM5717");
15347 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15348 strcpy(tp->board_part_number, "BCM5718");
15351 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15352 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15353 strcpy(tp->board_part_number, "BCM57780");
15354 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15355 strcpy(tp->board_part_number, "BCM57760");
15356 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15357 strcpy(tp->board_part_number, "BCM57790");
15358 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15359 strcpy(tp->board_part_number, "BCM57788");
15362 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15363 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15364 strcpy(tp->board_part_number, "BCM57761");
15365 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15366 strcpy(tp->board_part_number, "BCM57765");
15367 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15368 strcpy(tp->board_part_number, "BCM57781");
15369 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15370 strcpy(tp->board_part_number, "BCM57785");
15371 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15372 strcpy(tp->board_part_number, "BCM57791");
15373 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15374 strcpy(tp->board_part_number, "BCM57795");
15377 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15378 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15379 strcpy(tp->board_part_number, "BCM57762");
15380 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15381 strcpy(tp->board_part_number, "BCM57766");
15382 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15383 strcpy(tp->board_part_number, "BCM57782");
15384 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15385 strcpy(tp->board_part_number, "BCM57786");
15388 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15389 strcpy(tp->board_part_number, "BCM95906");
15392 strcpy(tp->board_part_number, "none");
15396 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15400 if (tg3_nvram_read(tp, offset, &val) ||
15401 (val & 0xfc000000) != 0x0c000000 ||
15402 tg3_nvram_read(tp, offset + 4, &val) ||
15409 static void tg3_read_bc_ver(struct tg3 *tp)
15411 u32 val, offset, start, ver_offset;
15413 bool newver = false;
15415 if (tg3_nvram_read(tp, 0xc, &offset) ||
15416 tg3_nvram_read(tp, 0x4, &start))
15419 offset = tg3_nvram_logical_addr(tp, offset);
15421 if (tg3_nvram_read(tp, offset, &val))
15424 if ((val & 0xfc000000) == 0x0c000000) {
15425 if (tg3_nvram_read(tp, offset + 4, &val))
15432 dst_off = strlen(tp->fw_ver);
15435 if (TG3_VER_SIZE - dst_off < 16 ||
15436 tg3_nvram_read(tp, offset + 8, &ver_offset))
15439 offset = offset + ver_offset - start;
15440 for (i = 0; i < 16; i += 4) {
15442 if (tg3_nvram_read_be32(tp, offset + i, &v))
15445 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15450 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15453 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15454 TG3_NVM_BCVER_MAJSFT;
15455 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15456 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15457 "v%d.%02d", major, minor);
15461 static void tg3_read_hwsb_ver(struct tg3 *tp)
15463 u32 val, major, minor;
15465 /* Use native endian representation */
15466 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15469 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15470 TG3_NVM_HWSB_CFG1_MAJSFT;
15471 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15472 TG3_NVM_HWSB_CFG1_MINSFT;
15474 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15477 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15479 u32 offset, major, minor, build;
15481 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15483 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15486 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15487 case TG3_EEPROM_SB_REVISION_0:
15488 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15490 case TG3_EEPROM_SB_REVISION_2:
15491 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15493 case TG3_EEPROM_SB_REVISION_3:
15494 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15496 case TG3_EEPROM_SB_REVISION_4:
15497 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15499 case TG3_EEPROM_SB_REVISION_5:
15500 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15502 case TG3_EEPROM_SB_REVISION_6:
15503 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15509 if (tg3_nvram_read(tp, offset, &val))
15512 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15513 TG3_EEPROM_SB_EDH_BLD_SHFT;
15514 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15515 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15516 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15518 if (minor > 99 || build > 26)
15521 offset = strlen(tp->fw_ver);
15522 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15523 " v%d.%02d", major, minor);
15526 offset = strlen(tp->fw_ver);
15527 if (offset < TG3_VER_SIZE - 1)
15528 tp->fw_ver[offset] = 'a' + build - 1;
15532 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15534 u32 val, offset, start;
15537 for (offset = TG3_NVM_DIR_START;
15538 offset < TG3_NVM_DIR_END;
15539 offset += TG3_NVM_DIRENT_SIZE) {
15540 if (tg3_nvram_read(tp, offset, &val))
15543 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15547 if (offset == TG3_NVM_DIR_END)
15550 if (!tg3_flag(tp, 5705_PLUS))
15551 start = 0x08000000;
15552 else if (tg3_nvram_read(tp, offset - 4, &start))
15555 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15556 !tg3_fw_img_is_valid(tp, offset) ||
15557 tg3_nvram_read(tp, offset + 8, &val))
15560 offset += val - start;
15562 vlen = strlen(tp->fw_ver);
15564 tp->fw_ver[vlen++] = ',';
15565 tp->fw_ver[vlen++] = ' ';
15567 for (i = 0; i < 4; i++) {
15569 if (tg3_nvram_read_be32(tp, offset, &v))
15572 offset += sizeof(v);
15574 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15575 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15579 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15584 static void tg3_probe_ncsi(struct tg3 *tp)
15588 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15589 if (apedata != APE_SEG_SIG_MAGIC)
15592 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15593 if (!(apedata & APE_FW_STATUS_READY))
15596 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15597 tg3_flag_set(tp, APE_HAS_NCSI);
15600 static void tg3_read_dash_ver(struct tg3 *tp)
15606 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15608 if (tg3_flag(tp, APE_HAS_NCSI))
15610 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15615 vlen = strlen(tp->fw_ver);
15617 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15619 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15620 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15621 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15622 (apedata & APE_FW_VERSION_BLDMSK));
15625 static void tg3_read_otp_ver(struct tg3 *tp)
15629 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15632 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15633 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15634 TG3_OTP_MAGIC0_VALID(val)) {
15635 u64 val64 = (u64) val << 32 | val2;
15639 for (i = 0; i < 7; i++) {
15640 if ((val64 & 0xff) == 0)
15642 ver = val64 & 0xff;
15645 vlen = strlen(tp->fw_ver);
15646 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15650 static void tg3_read_fw_ver(struct tg3 *tp)
15653 bool vpd_vers = false;
15655 if (tp->fw_ver[0] != 0)
15658 if (tg3_flag(tp, NO_NVRAM)) {
15659 strcat(tp->fw_ver, "sb");
15660 tg3_read_otp_ver(tp);
15664 if (tg3_nvram_read(tp, 0, &val))
15667 if (val == TG3_EEPROM_MAGIC)
15668 tg3_read_bc_ver(tp);
15669 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15670 tg3_read_sb_ver(tp, val);
15671 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15672 tg3_read_hwsb_ver(tp);
15674 if (tg3_flag(tp, ENABLE_ASF)) {
15675 if (tg3_flag(tp, ENABLE_APE)) {
15676 tg3_probe_ncsi(tp);
15678 tg3_read_dash_ver(tp);
15679 } else if (!vpd_vers) {
15680 tg3_read_mgmtfw_ver(tp);
15684 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15687 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15689 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15690 return TG3_RX_RET_MAX_SIZE_5717;
15691 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15692 return TG3_RX_RET_MAX_SIZE_5700;
15694 return TG3_RX_RET_MAX_SIZE_5705;
15697 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15698 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15699 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15700 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15704 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15706 struct pci_dev *peer;
15707 unsigned int func, devnr = tp->pdev->devfn & ~7;
15709 for (func = 0; func < 8; func++) {
15710 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15711 if (peer && peer != tp->pdev)
15715 /* 5704 can be configured in single-port mode, set peer to
15716 * tp->pdev in that case.
15724 * We don't need to keep the refcount elevated; there's no way
15725 * to remove one half of this device without removing the other
15732 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15734 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15735 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15738 /* All devices that use the alternate
15739 * ASIC REV location have a CPMU.
15741 tg3_flag_set(tp, CPMU_PRESENT);
15743 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15744 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15745 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15746 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15747 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15748 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15749 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15750 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15751 reg = TG3PCI_GEN2_PRODID_ASICREV;
15752 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15754 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15762 reg = TG3PCI_GEN15_PRODID_ASICREV;
15764 reg = TG3PCI_PRODID_ASICREV;
15766 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15769 /* Wrong chip ID in 5752 A0. This code can be removed later
15770 * as A0 is not in production.
15772 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15773 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15775 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15776 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15778 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15779 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15780 tg3_asic_rev(tp) == ASIC_REV_5720)
15781 tg3_flag_set(tp, 5717_PLUS);
15783 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15784 tg3_asic_rev(tp) == ASIC_REV_57766)
15785 tg3_flag_set(tp, 57765_CLASS);
15787 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15788 tg3_asic_rev(tp) == ASIC_REV_5762)
15789 tg3_flag_set(tp, 57765_PLUS);
15791 /* Intentionally exclude ASIC_REV_5906 */
15792 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15793 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15794 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15795 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15796 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15797 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15798 tg3_flag(tp, 57765_PLUS))
15799 tg3_flag_set(tp, 5755_PLUS);
15801 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15802 tg3_asic_rev(tp) == ASIC_REV_5714)
15803 tg3_flag_set(tp, 5780_CLASS);
15805 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15806 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15807 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15808 tg3_flag(tp, 5755_PLUS) ||
15809 tg3_flag(tp, 5780_CLASS))
15810 tg3_flag_set(tp, 5750_PLUS);
15812 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15813 tg3_flag(tp, 5750_PLUS))
15814 tg3_flag_set(tp, 5705_PLUS);
15817 static bool tg3_10_100_only_device(struct tg3 *tp,
15818 const struct pci_device_id *ent)
15820 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15822 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15823 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15824 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15827 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15828 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15829 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15839 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15842 u32 pci_state_reg, grc_misc_cfg;
15847 /* Force memory write invalidate off. If we leave it on,
15848 * then on 5700_BX chips we have to enable a workaround.
15849 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15850 * to match the cacheline size. The Broadcom driver have this
15851 * workaround but turns MWI off all the times so never uses
15852 * it. This seems to suggest that the workaround is insufficient.
15854 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15855 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15856 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15858 /* Important! -- Make sure register accesses are byteswapped
15859 * correctly. Also, for those chips that require it, make
15860 * sure that indirect register accesses are enabled before
15861 * the first operation.
15863 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15865 tp->misc_host_ctrl |= (misc_ctrl_reg &
15866 MISC_HOST_CTRL_CHIPREV);
15867 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15868 tp->misc_host_ctrl);
15870 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15872 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15873 * we need to disable memory and use config. cycles
15874 * only to access all registers. The 5702/03 chips
15875 * can mistakenly decode the special cycles from the
15876 * ICH chipsets as memory write cycles, causing corruption
15877 * of register and memory space. Only certain ICH bridges
15878 * will drive special cycles with non-zero data during the
15879 * address phase which can fall within the 5703's address
15880 * range. This is not an ICH bug as the PCI spec allows
15881 * non-zero address during special cycles. However, only
15882 * these ICH bridges are known to drive non-zero addresses
15883 * during special cycles.
15885 * Since special cycles do not cross PCI bridges, we only
15886 * enable this workaround if the 5703 is on the secondary
15887 * bus of these ICH bridges.
15889 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15890 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15891 static struct tg3_dev_id {
15895 } ich_chipsets[] = {
15896 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15898 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15900 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15902 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15906 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15907 struct pci_dev *bridge = NULL;
15909 while (pci_id->vendor != 0) {
15910 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15916 if (pci_id->rev != PCI_ANY_ID) {
15917 if (bridge->revision > pci_id->rev)
15920 if (bridge->subordinate &&
15921 (bridge->subordinate->number ==
15922 tp->pdev->bus->number)) {
15923 tg3_flag_set(tp, ICH_WORKAROUND);
15924 pci_dev_put(bridge);
15930 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15931 static struct tg3_dev_id {
15934 } bridge_chipsets[] = {
15935 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15936 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15939 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15940 struct pci_dev *bridge = NULL;
15942 while (pci_id->vendor != 0) {
15943 bridge = pci_get_device(pci_id->vendor,
15950 if (bridge->subordinate &&
15951 (bridge->subordinate->number <=
15952 tp->pdev->bus->number) &&
15953 (bridge->subordinate->busn_res.end >=
15954 tp->pdev->bus->number)) {
15955 tg3_flag_set(tp, 5701_DMA_BUG);
15956 pci_dev_put(bridge);
15962 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15963 * DMA addresses > 40-bit. This bridge may have other additional
15964 * 57xx devices behind it in some 4-port NIC designs for example.
15965 * Any tg3 device found behind the bridge will also need the 40-bit
15968 if (tg3_flag(tp, 5780_CLASS)) {
15969 tg3_flag_set(tp, 40BIT_DMA_BUG);
15970 tp->msi_cap = tp->pdev->msi_cap;
15972 struct pci_dev *bridge = NULL;
15975 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15976 PCI_DEVICE_ID_SERVERWORKS_EPB,
15978 if (bridge && bridge->subordinate &&
15979 (bridge->subordinate->number <=
15980 tp->pdev->bus->number) &&
15981 (bridge->subordinate->busn_res.end >=
15982 tp->pdev->bus->number)) {
15983 tg3_flag_set(tp, 40BIT_DMA_BUG);
15984 pci_dev_put(bridge);
15990 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15991 tg3_asic_rev(tp) == ASIC_REV_5714)
15992 tp->pdev_peer = tg3_find_peer(tp);
15994 /* Determine TSO capabilities */
15995 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15996 ; /* Do nothing. HW bug. */
15997 else if (tg3_flag(tp, 57765_PLUS))
15998 tg3_flag_set(tp, HW_TSO_3);
15999 else if (tg3_flag(tp, 5755_PLUS) ||
16000 tg3_asic_rev(tp) == ASIC_REV_5906)
16001 tg3_flag_set(tp, HW_TSO_2);
16002 else if (tg3_flag(tp, 5750_PLUS)) {
16003 tg3_flag_set(tp, HW_TSO_1);
16004 tg3_flag_set(tp, TSO_BUG);
16005 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16006 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16007 tg3_flag_clear(tp, TSO_BUG);
16008 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16009 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16010 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16011 tg3_flag_set(tp, FW_TSO);
16012 tg3_flag_set(tp, TSO_BUG);
16013 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16014 tp->fw_needed = FIRMWARE_TG3TSO5;
16016 tp->fw_needed = FIRMWARE_TG3TSO;
16019 /* Selectively allow TSO based on operating conditions */
16020 if (tg3_flag(tp, HW_TSO_1) ||
16021 tg3_flag(tp, HW_TSO_2) ||
16022 tg3_flag(tp, HW_TSO_3) ||
16023 tg3_flag(tp, FW_TSO)) {
16024 /* For firmware TSO, assume ASF is disabled.
16025 * We'll disable TSO later if we discover ASF
16026 * is enabled in tg3_get_eeprom_hw_cfg().
16028 tg3_flag_set(tp, TSO_CAPABLE);
16030 tg3_flag_clear(tp, TSO_CAPABLE);
16031 tg3_flag_clear(tp, TSO_BUG);
16032 tp->fw_needed = NULL;
16035 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16036 tp->fw_needed = FIRMWARE_TG3;
16038 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16039 tp->fw_needed = FIRMWARE_TG357766;
16043 if (tg3_flag(tp, 5750_PLUS)) {
16044 tg3_flag_set(tp, SUPPORT_MSI);
16045 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16046 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16047 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16048 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16049 tp->pdev_peer == tp->pdev))
16050 tg3_flag_clear(tp, SUPPORT_MSI);
16052 if (tg3_flag(tp, 5755_PLUS) ||
16053 tg3_asic_rev(tp) == ASIC_REV_5906) {
16054 tg3_flag_set(tp, 1SHOT_MSI);
16057 if (tg3_flag(tp, 57765_PLUS)) {
16058 tg3_flag_set(tp, SUPPORT_MSIX);
16059 tp->irq_max = TG3_IRQ_MAX_VECS;
16065 if (tp->irq_max > 1) {
16066 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16067 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16069 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16070 tg3_asic_rev(tp) == ASIC_REV_5720)
16071 tp->txq_max = tp->irq_max - 1;
16074 if (tg3_flag(tp, 5755_PLUS) ||
16075 tg3_asic_rev(tp) == ASIC_REV_5906)
16076 tg3_flag_set(tp, SHORT_DMA_BUG);
16078 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16079 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16081 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16082 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16083 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16084 tg3_asic_rev(tp) == ASIC_REV_5762)
16085 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16087 if (tg3_flag(tp, 57765_PLUS) &&
16088 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16089 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16091 if (!tg3_flag(tp, 5705_PLUS) ||
16092 tg3_flag(tp, 5780_CLASS) ||
16093 tg3_flag(tp, USE_JUMBO_BDFLAG))
16094 tg3_flag_set(tp, JUMBO_CAPABLE);
16096 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16099 if (pci_is_pcie(tp->pdev)) {
16102 tg3_flag_set(tp, PCI_EXPRESS);
16104 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16105 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16106 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16107 tg3_flag_clear(tp, HW_TSO_2);
16108 tg3_flag_clear(tp, TSO_CAPABLE);
16110 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16111 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16112 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16113 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16114 tg3_flag_set(tp, CLKREQ_BUG);
16115 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16116 tg3_flag_set(tp, L1PLLPD_EN);
16118 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16119 /* BCM5785 devices are effectively PCIe devices, and should
16120 * follow PCIe codepaths, but do not have a PCIe capabilities
16123 tg3_flag_set(tp, PCI_EXPRESS);
16124 } else if (!tg3_flag(tp, 5705_PLUS) ||
16125 tg3_flag(tp, 5780_CLASS)) {
16126 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16127 if (!tp->pcix_cap) {
16128 dev_err(&tp->pdev->dev,
16129 "Cannot find PCI-X capability, aborting\n");
16133 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16134 tg3_flag_set(tp, PCIX_MODE);
16137 /* If we have an AMD 762 or VIA K8T800 chipset, write
16138 * reordering to the mailbox registers done by the host
16139 * controller can cause major troubles. We read back from
16140 * every mailbox register write to force the writes to be
16141 * posted to the chip in order.
16143 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16144 !tg3_flag(tp, PCI_EXPRESS))
16145 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16147 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16148 &tp->pci_cacheline_sz);
16149 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16150 &tp->pci_lat_timer);
16151 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16152 tp->pci_lat_timer < 64) {
16153 tp->pci_lat_timer = 64;
16154 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16155 tp->pci_lat_timer);
16158 /* Important! -- It is critical that the PCI-X hw workaround
16159 * situation is decided before the first MMIO register access.
16161 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16162 /* 5700 BX chips need to have their TX producer index
16163 * mailboxes written twice to workaround a bug.
16165 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16167 /* If we are in PCI-X mode, enable register write workaround.
16169 * The workaround is to use indirect register accesses
16170 * for all chip writes not to mailbox registers.
16172 if (tg3_flag(tp, PCIX_MODE)) {
16175 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16177 /* The chip can have it's power management PCI config
16178 * space registers clobbered due to this bug.
16179 * So explicitly force the chip into D0 here.
16181 pci_read_config_dword(tp->pdev,
16182 tp->pm_cap + PCI_PM_CTRL,
16184 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16185 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16186 pci_write_config_dword(tp->pdev,
16187 tp->pm_cap + PCI_PM_CTRL,
16190 /* Also, force SERR#/PERR# in PCI command. */
16191 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16192 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16193 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16197 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16198 tg3_flag_set(tp, PCI_HIGH_SPEED);
16199 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16200 tg3_flag_set(tp, PCI_32BIT);
16202 /* Chip-specific fixup from Broadcom driver */
16203 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16204 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16205 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16206 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16209 /* Default fast path register access methods */
16210 tp->read32 = tg3_read32;
16211 tp->write32 = tg3_write32;
16212 tp->read32_mbox = tg3_read32;
16213 tp->write32_mbox = tg3_write32;
16214 tp->write32_tx_mbox = tg3_write32;
16215 tp->write32_rx_mbox = tg3_write32;
16217 /* Various workaround register access methods */
16218 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16219 tp->write32 = tg3_write_indirect_reg32;
16220 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16221 (tg3_flag(tp, PCI_EXPRESS) &&
16222 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16224 * Back to back register writes can cause problems on these
16225 * chips, the workaround is to read back all reg writes
16226 * except those to mailbox regs.
16228 * See tg3_write_indirect_reg32().
16230 tp->write32 = tg3_write_flush_reg32;
16233 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16234 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16235 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16236 tp->write32_rx_mbox = tg3_write_flush_reg32;
16239 if (tg3_flag(tp, ICH_WORKAROUND)) {
16240 tp->read32 = tg3_read_indirect_reg32;
16241 tp->write32 = tg3_write_indirect_reg32;
16242 tp->read32_mbox = tg3_read_indirect_mbox;
16243 tp->write32_mbox = tg3_write_indirect_mbox;
16244 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16245 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16250 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16251 pci_cmd &= ~PCI_COMMAND_MEMORY;
16252 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16254 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16255 tp->read32_mbox = tg3_read32_mbox_5906;
16256 tp->write32_mbox = tg3_write32_mbox_5906;
16257 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16258 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16261 if (tp->write32 == tg3_write_indirect_reg32 ||
16262 (tg3_flag(tp, PCIX_MODE) &&
16263 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16264 tg3_asic_rev(tp) == ASIC_REV_5701)))
16265 tg3_flag_set(tp, SRAM_USE_CONFIG);
16267 /* The memory arbiter has to be enabled in order for SRAM accesses
16268 * to succeed. Normally on powerup the tg3 chip firmware will make
16269 * sure it is enabled, but other entities such as system netboot
16270 * code might disable it.
16272 val = tr32(MEMARB_MODE);
16273 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16275 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16276 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16277 tg3_flag(tp, 5780_CLASS)) {
16278 if (tg3_flag(tp, PCIX_MODE)) {
16279 pci_read_config_dword(tp->pdev,
16280 tp->pcix_cap + PCI_X_STATUS,
16282 tp->pci_fn = val & 0x7;
16284 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16285 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16286 tg3_asic_rev(tp) == ASIC_REV_5720) {
16287 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16288 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16289 val = tr32(TG3_CPMU_STATUS);
16291 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16292 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16294 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16295 TG3_CPMU_STATUS_FSHFT_5719;
16298 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16299 tp->write32_tx_mbox = tg3_write_flush_reg32;
16300 tp->write32_rx_mbox = tg3_write_flush_reg32;
16303 /* Get eeprom hw config before calling tg3_set_power_state().
16304 * In particular, the TG3_FLAG_IS_NIC flag must be
16305 * determined before calling tg3_set_power_state() so that
16306 * we know whether or not to switch out of Vaux power.
16307 * When the flag is set, it means that GPIO1 is used for eeprom
16308 * write protect and also implies that it is a LOM where GPIOs
16309 * are not used to switch power.
16311 tg3_get_eeprom_hw_cfg(tp);
16313 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16314 tg3_flag_clear(tp, TSO_CAPABLE);
16315 tg3_flag_clear(tp, TSO_BUG);
16316 tp->fw_needed = NULL;
16319 if (tg3_flag(tp, ENABLE_APE)) {
16320 /* Allow reads and writes to the
16321 * APE register and memory space.
16323 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16324 PCISTATE_ALLOW_APE_SHMEM_WR |
16325 PCISTATE_ALLOW_APE_PSPACE_WR;
16326 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16329 tg3_ape_lock_init(tp);
16332 /* Set up tp->grc_local_ctrl before calling
16333 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16334 * will bring 5700's external PHY out of reset.
16335 * It is also used as eeprom write protect on LOMs.
16337 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16338 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16339 tg3_flag(tp, EEPROM_WRITE_PROT))
16340 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16341 GRC_LCLCTRL_GPIO_OUTPUT1);
16342 /* Unused GPIO3 must be driven as output on 5752 because there
16343 * are no pull-up resistors on unused GPIO pins.
16345 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16346 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16348 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16349 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16350 tg3_flag(tp, 57765_CLASS))
16351 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16353 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16354 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16355 /* Turn off the debug UART. */
16356 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16357 if (tg3_flag(tp, IS_NIC))
16358 /* Keep VMain power. */
16359 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16360 GRC_LCLCTRL_GPIO_OUTPUT0;
16363 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16364 tp->grc_local_ctrl |=
16365 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16367 /* Switch out of Vaux if it is a NIC */
16368 tg3_pwrsrc_switch_to_vmain(tp);
16370 /* Derive initial jumbo mode from MTU assigned in
16371 * ether_setup() via the alloc_etherdev() call
16373 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16374 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16376 /* Determine WakeOnLan speed to use. */
16377 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16378 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16379 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16380 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16381 tg3_flag_clear(tp, WOL_SPEED_100MB);
16383 tg3_flag_set(tp, WOL_SPEED_100MB);
16386 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16387 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16389 /* A few boards don't want Ethernet@WireSpeed phy feature */
16390 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16391 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16392 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16393 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16394 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16395 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16396 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16398 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16399 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16400 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16401 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16402 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16404 if (tg3_flag(tp, 5705_PLUS) &&
16405 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16406 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16407 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16408 !tg3_flag(tp, 57765_PLUS)) {
16409 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16410 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16411 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16412 tg3_asic_rev(tp) == ASIC_REV_5761) {
16413 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16414 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16415 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16416 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16417 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16419 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16422 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16423 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16424 tp->phy_otp = tg3_read_otp_phycfg(tp);
16425 if (tp->phy_otp == 0)
16426 tp->phy_otp = TG3_OTP_DEFAULT;
16429 if (tg3_flag(tp, CPMU_PRESENT))
16430 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16432 tp->mi_mode = MAC_MI_MODE_BASE;
16434 tp->coalesce_mode = 0;
16435 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16436 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16437 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16439 /* Set these bits to enable statistics workaround. */
16440 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16441 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16442 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16443 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16444 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16447 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16448 tg3_asic_rev(tp) == ASIC_REV_57780)
16449 tg3_flag_set(tp, USE_PHYLIB);
16451 err = tg3_mdio_init(tp);
16455 /* Initialize data/descriptor byte/word swapping. */
16456 val = tr32(GRC_MODE);
16457 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16458 tg3_asic_rev(tp) == ASIC_REV_5762)
16459 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16460 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16461 GRC_MODE_B2HRX_ENABLE |
16462 GRC_MODE_HTX2B_ENABLE |
16463 GRC_MODE_HOST_STACKUP);
16465 val &= GRC_MODE_HOST_STACKUP;
16467 tw32(GRC_MODE, val | tp->grc_mode);
16469 tg3_switch_clocks(tp);
16471 /* Clear this out for sanity. */
16472 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16474 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16476 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16477 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16478 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16479 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16480 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16481 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16482 void __iomem *sram_base;
16484 /* Write some dummy words into the SRAM status block
16485 * area, see if it reads back correctly. If the return
16486 * value is bad, force enable the PCIX workaround.
16488 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16490 writel(0x00000000, sram_base);
16491 writel(0x00000000, sram_base + 4);
16492 writel(0xffffffff, sram_base + 4);
16493 if (readl(sram_base) != 0x00000000)
16494 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16499 tg3_nvram_init(tp);
16501 /* If the device has an NVRAM, no need to load patch firmware */
16502 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16503 !tg3_flag(tp, NO_NVRAM))
16504 tp->fw_needed = NULL;
16506 grc_misc_cfg = tr32(GRC_MISC_CFG);
16507 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16509 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16510 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16511 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16512 tg3_flag_set(tp, IS_5788);
16514 if (!tg3_flag(tp, IS_5788) &&
16515 tg3_asic_rev(tp) != ASIC_REV_5700)
16516 tg3_flag_set(tp, TAGGED_STATUS);
16517 if (tg3_flag(tp, TAGGED_STATUS)) {
16518 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16519 HOSTCC_MODE_CLRTICK_TXBD);
16521 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16522 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16523 tp->misc_host_ctrl);
16526 /* Preserve the APE MAC_MODE bits */
16527 if (tg3_flag(tp, ENABLE_APE))
16528 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16532 if (tg3_10_100_only_device(tp, ent))
16533 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16535 err = tg3_phy_probe(tp);
16537 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16538 /* ... but do not return immediately ... */
16543 tg3_read_fw_ver(tp);
16545 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16546 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16548 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16549 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16551 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16554 /* 5700 {AX,BX} chips have a broken status block link
16555 * change bit implementation, so we must use the
16556 * status register in those cases.
16558 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16559 tg3_flag_set(tp, USE_LINKCHG_REG);
16561 tg3_flag_clear(tp, USE_LINKCHG_REG);
16563 /* The led_ctrl is set during tg3_phy_probe, here we might
16564 * have to force the link status polling mechanism based
16565 * upon subsystem IDs.
16567 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16568 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16569 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16570 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16571 tg3_flag_set(tp, USE_LINKCHG_REG);
16574 /* For all SERDES we poll the MAC status register. */
16575 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16576 tg3_flag_set(tp, POLL_SERDES);
16578 tg3_flag_clear(tp, POLL_SERDES);
16580 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16581 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16582 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16583 tg3_flag(tp, PCIX_MODE)) {
16584 tp->rx_offset = NET_SKB_PAD;
16585 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16586 tp->rx_copy_thresh = ~(u16)0;
16590 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16591 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16592 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16594 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16596 /* Increment the rx prod index on the rx std ring by at most
16597 * 8 for these chips to workaround hw errata.
16599 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16600 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16601 tg3_asic_rev(tp) == ASIC_REV_5755)
16602 tp->rx_std_max_post = 8;
16604 if (tg3_flag(tp, ASPM_WORKAROUND))
16605 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16606 PCIE_PWR_MGMT_L1_THRESH_MSK;
16611 #ifdef CONFIG_SPARC
16612 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16614 struct net_device *dev = tp->dev;
16615 struct pci_dev *pdev = tp->pdev;
16616 struct device_node *dp = pci_device_to_OF_node(pdev);
16617 const unsigned char *addr;
16620 addr = of_get_property(dp, "local-mac-address", &len);
16621 if (addr && len == 6) {
16622 memcpy(dev->dev_addr, addr, 6);
16628 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16630 struct net_device *dev = tp->dev;
16632 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16637 static int tg3_get_device_address(struct tg3 *tp)
16639 struct net_device *dev = tp->dev;
16640 u32 hi, lo, mac_offset;
16644 #ifdef CONFIG_SPARC
16645 if (!tg3_get_macaddr_sparc(tp))
16649 if (tg3_flag(tp, IS_SSB_CORE)) {
16650 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16651 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16656 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16657 tg3_flag(tp, 5780_CLASS)) {
16658 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16660 if (tg3_nvram_lock(tp))
16661 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16663 tg3_nvram_unlock(tp);
16664 } else if (tg3_flag(tp, 5717_PLUS)) {
16665 if (tp->pci_fn & 1)
16667 if (tp->pci_fn > 1)
16668 mac_offset += 0x18c;
16669 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16672 /* First try to get it from MAC address mailbox. */
16673 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16674 if ((hi >> 16) == 0x484b) {
16675 dev->dev_addr[0] = (hi >> 8) & 0xff;
16676 dev->dev_addr[1] = (hi >> 0) & 0xff;
16678 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16679 dev->dev_addr[2] = (lo >> 24) & 0xff;
16680 dev->dev_addr[3] = (lo >> 16) & 0xff;
16681 dev->dev_addr[4] = (lo >> 8) & 0xff;
16682 dev->dev_addr[5] = (lo >> 0) & 0xff;
16684 /* Some old bootcode may report a 0 MAC address in SRAM */
16685 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16688 /* Next, try NVRAM. */
16689 if (!tg3_flag(tp, NO_NVRAM) &&
16690 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16691 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16692 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16693 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16695 /* Finally just fetch it out of the MAC control regs. */
16697 hi = tr32(MAC_ADDR_0_HIGH);
16698 lo = tr32(MAC_ADDR_0_LOW);
16700 dev->dev_addr[5] = lo & 0xff;
16701 dev->dev_addr[4] = (lo >> 8) & 0xff;
16702 dev->dev_addr[3] = (lo >> 16) & 0xff;
16703 dev->dev_addr[2] = (lo >> 24) & 0xff;
16704 dev->dev_addr[1] = hi & 0xff;
16705 dev->dev_addr[0] = (hi >> 8) & 0xff;
16709 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16710 #ifdef CONFIG_SPARC
16711 if (!tg3_get_default_macaddr_sparc(tp))
16719 #define BOUNDARY_SINGLE_CACHELINE 1
16720 #define BOUNDARY_MULTI_CACHELINE 2
16722 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16724 int cacheline_size;
16728 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16730 cacheline_size = 1024;
16732 cacheline_size = (int) byte * 4;
16734 /* On 5703 and later chips, the boundary bits have no
16737 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16738 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16739 !tg3_flag(tp, PCI_EXPRESS))
16742 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16743 goal = BOUNDARY_MULTI_CACHELINE;
16745 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16746 goal = BOUNDARY_SINGLE_CACHELINE;
16752 if (tg3_flag(tp, 57765_PLUS)) {
16753 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16760 /* PCI controllers on most RISC systems tend to disconnect
16761 * when a device tries to burst across a cache-line boundary.
16762 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16764 * Unfortunately, for PCI-E there are only limited
16765 * write-side controls for this, and thus for reads
16766 * we will still get the disconnects. We'll also waste
16767 * these PCI cycles for both read and write for chips
16768 * other than 5700 and 5701 which do not implement the
16771 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16772 switch (cacheline_size) {
16777 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16778 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16779 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16781 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16782 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16787 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16788 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16792 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16793 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16796 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16797 switch (cacheline_size) {
16801 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16802 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16803 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16809 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16810 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16814 switch (cacheline_size) {
16816 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16817 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16818 DMA_RWCTRL_WRITE_BNDRY_16);
16823 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16824 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16825 DMA_RWCTRL_WRITE_BNDRY_32);
16830 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16831 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16832 DMA_RWCTRL_WRITE_BNDRY_64);
16837 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16838 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16839 DMA_RWCTRL_WRITE_BNDRY_128);
16844 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16845 DMA_RWCTRL_WRITE_BNDRY_256);
16848 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16849 DMA_RWCTRL_WRITE_BNDRY_512);
16853 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16854 DMA_RWCTRL_WRITE_BNDRY_1024);
16863 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16864 int size, bool to_device)
16866 struct tg3_internal_buffer_desc test_desc;
16867 u32 sram_dma_descs;
16870 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16872 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16873 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16874 tw32(RDMAC_STATUS, 0);
16875 tw32(WDMAC_STATUS, 0);
16877 tw32(BUFMGR_MODE, 0);
16878 tw32(FTQ_RESET, 0);
16880 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16881 test_desc.addr_lo = buf_dma & 0xffffffff;
16882 test_desc.nic_mbuf = 0x00002100;
16883 test_desc.len = size;
16886 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16887 * the *second* time the tg3 driver was getting loaded after an
16890 * Broadcom tells me:
16891 * ...the DMA engine is connected to the GRC block and a DMA
16892 * reset may affect the GRC block in some unpredictable way...
16893 * The behavior of resets to individual blocks has not been tested.
16895 * Broadcom noted the GRC reset will also reset all sub-components.
16898 test_desc.cqid_sqid = (13 << 8) | 2;
16900 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16903 test_desc.cqid_sqid = (16 << 8) | 7;
16905 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16908 test_desc.flags = 0x00000005;
16910 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16913 val = *(((u32 *)&test_desc) + i);
16914 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16915 sram_dma_descs + (i * sizeof(u32)));
16916 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16918 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16921 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16923 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16926 for (i = 0; i < 40; i++) {
16930 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16932 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16933 if ((val & 0xffff) == sram_dma_descs) {
16944 #define TEST_BUFFER_SIZE 0x2000
16946 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16947 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16951 static int tg3_test_dma(struct tg3 *tp)
16953 dma_addr_t buf_dma;
16954 u32 *buf, saved_dma_rwctrl;
16957 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16958 &buf_dma, GFP_KERNEL);
16964 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16965 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16967 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16969 if (tg3_flag(tp, 57765_PLUS))
16972 if (tg3_flag(tp, PCI_EXPRESS)) {
16973 /* DMA read watermark not used on PCIE */
16974 tp->dma_rwctrl |= 0x00180000;
16975 } else if (!tg3_flag(tp, PCIX_MODE)) {
16976 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16977 tg3_asic_rev(tp) == ASIC_REV_5750)
16978 tp->dma_rwctrl |= 0x003f0000;
16980 tp->dma_rwctrl |= 0x003f000f;
16982 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16983 tg3_asic_rev(tp) == ASIC_REV_5704) {
16984 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16985 u32 read_water = 0x7;
16987 /* If the 5704 is behind the EPB bridge, we can
16988 * do the less restrictive ONE_DMA workaround for
16989 * better performance.
16991 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16992 tg3_asic_rev(tp) == ASIC_REV_5704)
16993 tp->dma_rwctrl |= 0x8000;
16994 else if (ccval == 0x6 || ccval == 0x7)
16995 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16997 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16999 /* Set bit 23 to enable PCIX hw bug fix */
17001 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17002 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17004 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17005 /* 5780 always in PCIX mode */
17006 tp->dma_rwctrl |= 0x00144000;
17007 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17008 /* 5714 always in PCIX mode */
17009 tp->dma_rwctrl |= 0x00148000;
17011 tp->dma_rwctrl |= 0x001b000f;
17014 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17015 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17017 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17018 tg3_asic_rev(tp) == ASIC_REV_5704)
17019 tp->dma_rwctrl &= 0xfffffff0;
17021 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17022 tg3_asic_rev(tp) == ASIC_REV_5701) {
17023 /* Remove this if it causes problems for some boards. */
17024 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17026 /* On 5700/5701 chips, we need to set this bit.
17027 * Otherwise the chip will issue cacheline transactions
17028 * to streamable DMA memory with not all the byte
17029 * enables turned on. This is an error on several
17030 * RISC PCI controllers, in particular sparc64.
17032 * On 5703/5704 chips, this bit has been reassigned
17033 * a different meaning. In particular, it is used
17034 * on those chips to enable a PCI-X workaround.
17036 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17039 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17042 /* Unneeded, already done by tg3_get_invariants. */
17043 tg3_switch_clocks(tp);
17046 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17047 tg3_asic_rev(tp) != ASIC_REV_5701)
17050 /* It is best to perform DMA test with maximum write burst size
17051 * to expose the 5700/5701 write DMA bug.
17053 saved_dma_rwctrl = tp->dma_rwctrl;
17054 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17055 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17060 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17063 /* Send the buffer to the chip. */
17064 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17066 dev_err(&tp->pdev->dev,
17067 "%s: Buffer write failed. err = %d\n",
17073 /* validate data reached card RAM correctly. */
17074 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17076 tg3_read_mem(tp, 0x2100 + (i*4), &val);
17077 if (le32_to_cpu(val) != p[i]) {
17078 dev_err(&tp->pdev->dev,
17079 "%s: Buffer corrupted on device! "
17080 "(%d != %d)\n", __func__, val, i);
17081 /* ret = -ENODEV here? */
17086 /* Now read it back. */
17087 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17089 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17090 "err = %d\n", __func__, ret);
17095 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17099 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17100 DMA_RWCTRL_WRITE_BNDRY_16) {
17101 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17102 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17103 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17106 dev_err(&tp->pdev->dev,
17107 "%s: Buffer corrupted on read back! "
17108 "(%d != %d)\n", __func__, p[i], i);
17114 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17120 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17121 DMA_RWCTRL_WRITE_BNDRY_16) {
17122 /* DMA test passed without adjusting DMA boundary,
17123 * now look for chipsets that are known to expose the
17124 * DMA bug without failing the test.
17126 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17127 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17128 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17130 /* Safe to use the calculated DMA boundary. */
17131 tp->dma_rwctrl = saved_dma_rwctrl;
17134 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17138 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17143 static void tg3_init_bufmgr_config(struct tg3 *tp)
17145 if (tg3_flag(tp, 57765_PLUS)) {
17146 tp->bufmgr_config.mbuf_read_dma_low_water =
17147 DEFAULT_MB_RDMA_LOW_WATER_5705;
17148 tp->bufmgr_config.mbuf_mac_rx_low_water =
17149 DEFAULT_MB_MACRX_LOW_WATER_57765;
17150 tp->bufmgr_config.mbuf_high_water =
17151 DEFAULT_MB_HIGH_WATER_57765;
17153 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17154 DEFAULT_MB_RDMA_LOW_WATER_5705;
17155 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17156 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17157 tp->bufmgr_config.mbuf_high_water_jumbo =
17158 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17159 } else if (tg3_flag(tp, 5705_PLUS)) {
17160 tp->bufmgr_config.mbuf_read_dma_low_water =
17161 DEFAULT_MB_RDMA_LOW_WATER_5705;
17162 tp->bufmgr_config.mbuf_mac_rx_low_water =
17163 DEFAULT_MB_MACRX_LOW_WATER_5705;
17164 tp->bufmgr_config.mbuf_high_water =
17165 DEFAULT_MB_HIGH_WATER_5705;
17166 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17167 tp->bufmgr_config.mbuf_mac_rx_low_water =
17168 DEFAULT_MB_MACRX_LOW_WATER_5906;
17169 tp->bufmgr_config.mbuf_high_water =
17170 DEFAULT_MB_HIGH_WATER_5906;
17173 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17174 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17175 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17176 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17177 tp->bufmgr_config.mbuf_high_water_jumbo =
17178 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17180 tp->bufmgr_config.mbuf_read_dma_low_water =
17181 DEFAULT_MB_RDMA_LOW_WATER;
17182 tp->bufmgr_config.mbuf_mac_rx_low_water =
17183 DEFAULT_MB_MACRX_LOW_WATER;
17184 tp->bufmgr_config.mbuf_high_water =
17185 DEFAULT_MB_HIGH_WATER;
17187 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17188 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17189 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17190 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17191 tp->bufmgr_config.mbuf_high_water_jumbo =
17192 DEFAULT_MB_HIGH_WATER_JUMBO;
17195 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17196 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17199 static char *tg3_phy_string(struct tg3 *tp)
17201 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17202 case TG3_PHY_ID_BCM5400: return "5400";
17203 case TG3_PHY_ID_BCM5401: return "5401";
17204 case TG3_PHY_ID_BCM5411: return "5411";
17205 case TG3_PHY_ID_BCM5701: return "5701";
17206 case TG3_PHY_ID_BCM5703: return "5703";
17207 case TG3_PHY_ID_BCM5704: return "5704";
17208 case TG3_PHY_ID_BCM5705: return "5705";
17209 case TG3_PHY_ID_BCM5750: return "5750";
17210 case TG3_PHY_ID_BCM5752: return "5752";
17211 case TG3_PHY_ID_BCM5714: return "5714";
17212 case TG3_PHY_ID_BCM5780: return "5780";
17213 case TG3_PHY_ID_BCM5755: return "5755";
17214 case TG3_PHY_ID_BCM5787: return "5787";
17215 case TG3_PHY_ID_BCM5784: return "5784";
17216 case TG3_PHY_ID_BCM5756: return "5722/5756";
17217 case TG3_PHY_ID_BCM5906: return "5906";
17218 case TG3_PHY_ID_BCM5761: return "5761";
17219 case TG3_PHY_ID_BCM5718C: return "5718C";
17220 case TG3_PHY_ID_BCM5718S: return "5718S";
17221 case TG3_PHY_ID_BCM57765: return "57765";
17222 case TG3_PHY_ID_BCM5719C: return "5719C";
17223 case TG3_PHY_ID_BCM5720C: return "5720C";
17224 case TG3_PHY_ID_BCM5762: return "5762C";
17225 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17226 case 0: return "serdes";
17227 default: return "unknown";
17231 static char *tg3_bus_string(struct tg3 *tp, char *str)
17233 if (tg3_flag(tp, PCI_EXPRESS)) {
17234 strcpy(str, "PCI Express");
17236 } else if (tg3_flag(tp, PCIX_MODE)) {
17237 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17239 strcpy(str, "PCIX:");
17241 if ((clock_ctrl == 7) ||
17242 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17243 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17244 strcat(str, "133MHz");
17245 else if (clock_ctrl == 0)
17246 strcat(str, "33MHz");
17247 else if (clock_ctrl == 2)
17248 strcat(str, "50MHz");
17249 else if (clock_ctrl == 4)
17250 strcat(str, "66MHz");
17251 else if (clock_ctrl == 6)
17252 strcat(str, "100MHz");
17254 strcpy(str, "PCI:");
17255 if (tg3_flag(tp, PCI_HIGH_SPEED))
17256 strcat(str, "66MHz");
17258 strcat(str, "33MHz");
17260 if (tg3_flag(tp, PCI_32BIT))
17261 strcat(str, ":32-bit");
17263 strcat(str, ":64-bit");
17267 static void tg3_init_coal(struct tg3 *tp)
17269 struct ethtool_coalesce *ec = &tp->coal;
17271 memset(ec, 0, sizeof(*ec));
17272 ec->cmd = ETHTOOL_GCOALESCE;
17273 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17274 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17275 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17276 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17277 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17278 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17279 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17280 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17281 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17283 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17284 HOSTCC_MODE_CLRTICK_TXBD)) {
17285 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17286 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17287 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17288 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17291 if (tg3_flag(tp, 5705_PLUS)) {
17292 ec->rx_coalesce_usecs_irq = 0;
17293 ec->tx_coalesce_usecs_irq = 0;
17294 ec->stats_block_coalesce_usecs = 0;
17298 static int tg3_init_one(struct pci_dev *pdev,
17299 const struct pci_device_id *ent)
17301 struct net_device *dev;
17304 u32 sndmbx, rcvmbx, intmbx;
17306 u64 dma_mask, persist_dma_mask;
17307 netdev_features_t features = 0;
17309 printk_once(KERN_INFO "%s\n", version);
17311 err = pci_enable_device(pdev);
17313 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17317 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17319 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17320 goto err_out_disable_pdev;
17323 pci_set_master(pdev);
17325 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17328 goto err_out_free_res;
17331 SET_NETDEV_DEV(dev, &pdev->dev);
17333 tp = netdev_priv(dev);
17336 tp->pm_cap = pdev->pm_cap;
17337 tp->rx_mode = TG3_DEF_RX_MODE;
17338 tp->tx_mode = TG3_DEF_TX_MODE;
17342 tp->msg_enable = tg3_debug;
17344 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17346 if (pdev_is_ssb_gige_core(pdev)) {
17347 tg3_flag_set(tp, IS_SSB_CORE);
17348 if (ssb_gige_must_flush_posted_writes(pdev))
17349 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17350 if (ssb_gige_one_dma_at_once(pdev))
17351 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17352 if (ssb_gige_have_roboswitch(pdev))
17353 tg3_flag_set(tp, ROBOSWITCH);
17354 if (ssb_gige_is_rgmii(pdev))
17355 tg3_flag_set(tp, RGMII_MODE);
17358 /* The word/byte swap controls here control register access byte
17359 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17362 tp->misc_host_ctrl =
17363 MISC_HOST_CTRL_MASK_PCI_INT |
17364 MISC_HOST_CTRL_WORD_SWAP |
17365 MISC_HOST_CTRL_INDIR_ACCESS |
17366 MISC_HOST_CTRL_PCISTATE_RW;
17368 /* The NONFRM (non-frame) byte/word swap controls take effect
17369 * on descriptor entries, anything which isn't packet data.
17371 * The StrongARM chips on the board (one for tx, one for rx)
17372 * are running in big-endian mode.
17374 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17375 GRC_MODE_WSWAP_NONFRM_DATA);
17376 #ifdef __BIG_ENDIAN
17377 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17379 spin_lock_init(&tp->lock);
17380 spin_lock_init(&tp->indirect_lock);
17381 INIT_WORK(&tp->reset_task, tg3_reset_task);
17383 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17385 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17387 goto err_out_free_dev;
17390 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17391 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17392 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17393 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17394 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17395 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17396 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17397 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17398 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17399 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17400 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17401 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17402 tg3_flag_set(tp, ENABLE_APE);
17403 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17404 if (!tp->aperegs) {
17405 dev_err(&pdev->dev,
17406 "Cannot map APE registers, aborting\n");
17408 goto err_out_iounmap;
17412 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17413 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17415 dev->ethtool_ops = &tg3_ethtool_ops;
17416 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17417 dev->netdev_ops = &tg3_netdev_ops;
17418 dev->irq = pdev->irq;
17420 err = tg3_get_invariants(tp, ent);
17422 dev_err(&pdev->dev,
17423 "Problem fetching invariants of chip, aborting\n");
17424 goto err_out_apeunmap;
17427 /* The EPB bridge inside 5714, 5715, and 5780 and any
17428 * device behind the EPB cannot support DMA addresses > 40-bit.
17429 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17430 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17431 * do DMA address check in tg3_start_xmit().
17433 if (tg3_flag(tp, IS_5788))
17434 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17435 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17436 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17437 #ifdef CONFIG_HIGHMEM
17438 dma_mask = DMA_BIT_MASK(64);
17441 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17443 /* Configure DMA attributes. */
17444 if (dma_mask > DMA_BIT_MASK(32)) {
17445 err = pci_set_dma_mask(pdev, dma_mask);
17447 features |= NETIF_F_HIGHDMA;
17448 err = pci_set_consistent_dma_mask(pdev,
17451 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17452 "DMA for consistent allocations\n");
17453 goto err_out_apeunmap;
17457 if (err || dma_mask == DMA_BIT_MASK(32)) {
17458 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17460 dev_err(&pdev->dev,
17461 "No usable DMA configuration, aborting\n");
17462 goto err_out_apeunmap;
17466 tg3_init_bufmgr_config(tp);
17468 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17470 /* 5700 B0 chips do not support checksumming correctly due
17471 * to hardware bugs.
17473 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17474 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17476 if (tg3_flag(tp, 5755_PLUS))
17477 features |= NETIF_F_IPV6_CSUM;
17480 /* TSO is on by default on chips that support hardware TSO.
17481 * Firmware TSO on older chips gives lower performance, so it
17482 * is off by default, but can be enabled using ethtool.
17484 if ((tg3_flag(tp, HW_TSO_1) ||
17485 tg3_flag(tp, HW_TSO_2) ||
17486 tg3_flag(tp, HW_TSO_3)) &&
17487 (features & NETIF_F_IP_CSUM))
17488 features |= NETIF_F_TSO;
17489 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17490 if (features & NETIF_F_IPV6_CSUM)
17491 features |= NETIF_F_TSO6;
17492 if (tg3_flag(tp, HW_TSO_3) ||
17493 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17494 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17495 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17496 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17497 tg3_asic_rev(tp) == ASIC_REV_57780)
17498 features |= NETIF_F_TSO_ECN;
17501 dev->features |= features;
17502 dev->vlan_features |= features;
17505 * Add loopback capability only for a subset of devices that support
17506 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17507 * loopback for the remaining devices.
17509 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17510 !tg3_flag(tp, CPMU_PRESENT))
17511 /* Add the loopback capability */
17512 features |= NETIF_F_LOOPBACK;
17514 dev->hw_features |= features;
17516 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17517 !tg3_flag(tp, TSO_CAPABLE) &&
17518 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17519 tg3_flag_set(tp, MAX_RXPEND_64);
17520 tp->rx_pending = 63;
17523 err = tg3_get_device_address(tp);
17525 dev_err(&pdev->dev,
17526 "Could not obtain valid ethernet address, aborting\n");
17527 goto err_out_apeunmap;
17531 * Reset chip in case UNDI or EFI driver did not shutdown
17532 * DMA self test will enable WDMAC and we'll see (spurious)
17533 * pending DMA on the PCI bus at that point.
17535 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17536 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17537 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17538 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17541 err = tg3_test_dma(tp);
17543 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17544 goto err_out_apeunmap;
17547 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17548 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17549 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17550 for (i = 0; i < tp->irq_max; i++) {
17551 struct tg3_napi *tnapi = &tp->napi[i];
17554 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17556 tnapi->int_mbox = intmbx;
17562 tnapi->consmbox = rcvmbx;
17563 tnapi->prodmbox = sndmbx;
17566 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17568 tnapi->coal_now = HOSTCC_MODE_NOW;
17570 if (!tg3_flag(tp, SUPPORT_MSIX))
17574 * If we support MSIX, we'll be using RSS. If we're using
17575 * RSS, the first vector only handles link interrupts and the
17576 * remaining vectors handle rx and tx interrupts. Reuse the
17577 * mailbox values for the next iteration. The values we setup
17578 * above are still useful for the single vectored mode.
17593 pci_set_drvdata(pdev, dev);
17595 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17596 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17597 tg3_asic_rev(tp) == ASIC_REV_5762)
17598 tg3_flag_set(tp, PTP_CAPABLE);
17600 tg3_timer_init(tp);
17602 tg3_carrier_off(tp);
17604 err = register_netdev(dev);
17606 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17607 goto err_out_apeunmap;
17610 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17611 tp->board_part_number,
17612 tg3_chip_rev_id(tp),
17613 tg3_bus_string(tp, str),
17616 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17617 struct phy_device *phydev;
17618 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17620 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17621 phydev->drv->name, dev_name(&phydev->dev));
17625 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17626 ethtype = "10/100Base-TX";
17627 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17628 ethtype = "1000Base-SX";
17630 ethtype = "10/100/1000Base-T";
17632 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17633 "(WireSpeed[%d], EEE[%d])\n",
17634 tg3_phy_string(tp), ethtype,
17635 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17636 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17639 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17640 (dev->features & NETIF_F_RXCSUM) != 0,
17641 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17642 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17643 tg3_flag(tp, ENABLE_ASF) != 0,
17644 tg3_flag(tp, TSO_CAPABLE) != 0);
17645 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17647 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17648 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17650 pci_save_state(pdev);
17656 iounmap(tp->aperegs);
17657 tp->aperegs = NULL;
17670 pci_release_regions(pdev);
17672 err_out_disable_pdev:
17673 if (pci_is_enabled(pdev))
17674 pci_disable_device(pdev);
17675 pci_set_drvdata(pdev, NULL);
17679 static void tg3_remove_one(struct pci_dev *pdev)
17681 struct net_device *dev = pci_get_drvdata(pdev);
17684 struct tg3 *tp = netdev_priv(dev);
17686 release_firmware(tp->fw);
17688 tg3_reset_task_cancel(tp);
17690 if (tg3_flag(tp, USE_PHYLIB)) {
17695 unregister_netdev(dev);
17697 iounmap(tp->aperegs);
17698 tp->aperegs = NULL;
17705 pci_release_regions(pdev);
17706 pci_disable_device(pdev);
17707 pci_set_drvdata(pdev, NULL);
17711 #ifdef CONFIG_PM_SLEEP
17712 static int tg3_suspend(struct device *device)
17714 struct pci_dev *pdev = to_pci_dev(device);
17715 struct net_device *dev = pci_get_drvdata(pdev);
17716 struct tg3 *tp = netdev_priv(dev);
17719 if (!netif_running(dev))
17722 tg3_reset_task_cancel(tp);
17724 tg3_netif_stop(tp);
17726 tg3_timer_stop(tp);
17728 tg3_full_lock(tp, 1);
17729 tg3_disable_ints(tp);
17730 tg3_full_unlock(tp);
17732 netif_device_detach(dev);
17734 tg3_full_lock(tp, 0);
17735 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17736 tg3_flag_clear(tp, INIT_COMPLETE);
17737 tg3_full_unlock(tp);
17739 err = tg3_power_down_prepare(tp);
17743 tg3_full_lock(tp, 0);
17745 tg3_flag_set(tp, INIT_COMPLETE);
17746 err2 = tg3_restart_hw(tp, true);
17750 tg3_timer_start(tp);
17752 netif_device_attach(dev);
17753 tg3_netif_start(tp);
17756 tg3_full_unlock(tp);
17765 static int tg3_resume(struct device *device)
17767 struct pci_dev *pdev = to_pci_dev(device);
17768 struct net_device *dev = pci_get_drvdata(pdev);
17769 struct tg3 *tp = netdev_priv(dev);
17772 if (!netif_running(dev))
17775 netif_device_attach(dev);
17777 tg3_full_lock(tp, 0);
17779 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17781 tg3_flag_set(tp, INIT_COMPLETE);
17782 err = tg3_restart_hw(tp,
17783 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17787 tg3_timer_start(tp);
17789 tg3_netif_start(tp);
17792 tg3_full_unlock(tp);
17799 #endif /* CONFIG_PM_SLEEP */
17801 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17803 static void tg3_shutdown(struct pci_dev *pdev)
17805 struct net_device *dev = pci_get_drvdata(pdev);
17806 struct tg3 *tp = netdev_priv(dev);
17809 netif_device_detach(dev);
17811 if (netif_running(dev))
17814 if (system_state == SYSTEM_POWER_OFF)
17815 tg3_power_down(tp);
17821 * tg3_io_error_detected - called when PCI error is detected
17822 * @pdev: Pointer to PCI device
17823 * @state: The current pci connection state
17825 * This function is called after a PCI bus error affecting
17826 * this device has been detected.
17828 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17829 pci_channel_state_t state)
17831 struct net_device *netdev = pci_get_drvdata(pdev);
17832 struct tg3 *tp = netdev_priv(netdev);
17833 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17835 netdev_info(netdev, "PCI I/O error detected\n");
17839 /* We probably don't have netdev yet */
17840 if (!netdev || !netif_running(netdev))
17845 tg3_netif_stop(tp);
17847 tg3_timer_stop(tp);
17849 /* Want to make sure that the reset task doesn't run */
17850 tg3_reset_task_cancel(tp);
17852 netif_device_detach(netdev);
17854 /* Clean up software state, even if MMIO is blocked */
17855 tg3_full_lock(tp, 0);
17856 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17857 tg3_full_unlock(tp);
17860 if (state == pci_channel_io_perm_failure) {
17862 tg3_napi_enable(tp);
17865 err = PCI_ERS_RESULT_DISCONNECT;
17867 pci_disable_device(pdev);
17876 * tg3_io_slot_reset - called after the pci bus has been reset.
17877 * @pdev: Pointer to PCI device
17879 * Restart the card from scratch, as if from a cold-boot.
17880 * At this point, the card has exprienced a hard reset,
17881 * followed by fixups by BIOS, and has its config space
17882 * set up identically to what it was at cold boot.
17884 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17886 struct net_device *netdev = pci_get_drvdata(pdev);
17887 struct tg3 *tp = netdev_priv(netdev);
17888 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17893 if (pci_enable_device(pdev)) {
17894 dev_err(&pdev->dev,
17895 "Cannot re-enable PCI device after reset.\n");
17899 pci_set_master(pdev);
17900 pci_restore_state(pdev);
17901 pci_save_state(pdev);
17903 if (!netdev || !netif_running(netdev)) {
17904 rc = PCI_ERS_RESULT_RECOVERED;
17908 err = tg3_power_up(tp);
17912 rc = PCI_ERS_RESULT_RECOVERED;
17915 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17916 tg3_napi_enable(tp);
17925 * tg3_io_resume - called when traffic can start flowing again.
17926 * @pdev: Pointer to PCI device
17928 * This callback is called when the error recovery driver tells
17929 * us that its OK to resume normal operation.
17931 static void tg3_io_resume(struct pci_dev *pdev)
17933 struct net_device *netdev = pci_get_drvdata(pdev);
17934 struct tg3 *tp = netdev_priv(netdev);
17939 if (!netif_running(netdev))
17942 tg3_full_lock(tp, 0);
17943 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17944 tg3_flag_set(tp, INIT_COMPLETE);
17945 err = tg3_restart_hw(tp, true);
17947 tg3_full_unlock(tp);
17948 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17952 netif_device_attach(netdev);
17954 tg3_timer_start(tp);
17956 tg3_netif_start(tp);
17958 tg3_full_unlock(tp);
17966 static const struct pci_error_handlers tg3_err_handler = {
17967 .error_detected = tg3_io_error_detected,
17968 .slot_reset = tg3_io_slot_reset,
17969 .resume = tg3_io_resume
17972 static struct pci_driver tg3_driver = {
17973 .name = DRV_MODULE_NAME,
17974 .id_table = tg3_pci_tbl,
17975 .probe = tg3_init_one,
17976 .remove = tg3_remove_one,
17977 .err_handler = &tg3_err_handler,
17978 .driver.pm = &tg3_pm_ops,
17979 .shutdown = tg3_shutdown,
17982 module_pci_driver(tg3_driver);