2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1648 if (!tg3_readphy(tp, MII_BMCR, ®))
1650 if (!tg3_readphy(tp, MII_BMSR, ®))
1651 val |= (reg & 0xffff);
1655 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1657 if (!tg3_readphy(tp, MII_LPA, ®))
1658 val |= (reg & 0xffff);
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1665 if (!tg3_readphy(tp, MII_STAT1000, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685 tg3_phy_gather_ump_data(tp, data);
1687 tg3_wait_for_event_ack(tp);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1696 tg3_generate_fw_event(tp);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1708 tg3_generate_fw_event(tp);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 static int tg3_poll_fw(struct tg3 *tp)
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834 netdev_info(tp->dev, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3 *tp)
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1856 (tp->link_config.active_speed == SPEED_100 ?
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1871 tg3_ump_link_report(tp);
1874 tp->link_up = netif_carrier_ok(tp->dev);
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1881 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882 miireg = ADVERTISE_1000XPAUSE;
1883 else if (flow_ctrl & FLOW_CTRL_TX)
1884 miireg = ADVERTISE_1000XPSE_ASYM;
1885 else if (flow_ctrl & FLOW_CTRL_RX)
1886 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1897 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900 if (lcladv & ADVERTISE_1000XPAUSE)
1902 if (rmtadv & ADVERTISE_1000XPAUSE)
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1913 u32 old_rx_mode = tp->rx_mode;
1914 u32 old_tx_mode = tp->tx_mode;
1916 if (tg3_flag(tp, USE_PHYLIB))
1917 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1919 autoneg = tp->link_config.autoneg;
1921 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1925 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1927 flowctrl = tp->link_config.flowctrl;
1929 tp->link_config.active_flowctrl = flowctrl;
1931 if (flowctrl & FLOW_CTRL_RX)
1932 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1934 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1936 if (old_rx_mode != tp->rx_mode)
1937 tw32_f(MAC_RX_MODE, tp->rx_mode);
1939 if (flowctrl & FLOW_CTRL_TX)
1940 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1942 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1944 if (old_tx_mode != tp->tx_mode)
1945 tw32_f(MAC_TX_MODE, tp->tx_mode);
1948 static void tg3_adjust_link(struct net_device *dev)
1950 u8 oldflowctrl, linkmesg = 0;
1951 u32 mac_mode, lcl_adv, rmt_adv;
1952 struct tg3 *tp = netdev_priv(dev);
1953 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1955 spin_lock_bh(&tp->lock);
1957 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958 MAC_MODE_HALF_DUPLEX);
1960 oldflowctrl = tp->link_config.active_flowctrl;
1966 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967 mac_mode |= MAC_MODE_PORT_MODE_MII;
1968 else if (phydev->speed == SPEED_1000 ||
1969 tg3_asic_rev(tp) != ASIC_REV_5785)
1970 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1972 mac_mode |= MAC_MODE_PORT_MODE_MII;
1974 if (phydev->duplex == DUPLEX_HALF)
1975 mac_mode |= MAC_MODE_HALF_DUPLEX;
1977 lcl_adv = mii_advertise_flowctrl(
1978 tp->link_config.flowctrl);
1981 rmt_adv = LPA_PAUSE_CAP;
1982 if (phydev->asym_pause)
1983 rmt_adv |= LPA_PAUSE_ASYM;
1986 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1988 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1990 if (mac_mode != tp->mac_mode) {
1991 tp->mac_mode = mac_mode;
1992 tw32_f(MAC_MODE, tp->mac_mode);
1996 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997 if (phydev->speed == SPEED_10)
1999 MAC_MI_STAT_10MBPS_MODE |
2000 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2002 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2005 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006 tw32(MAC_TX_LENGTHS,
2007 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008 (6 << TX_LENGTHS_IPG_SHIFT) |
2009 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2011 tw32(MAC_TX_LENGTHS,
2012 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013 (6 << TX_LENGTHS_IPG_SHIFT) |
2014 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2016 if (phydev->link != tp->old_link ||
2017 phydev->speed != tp->link_config.active_speed ||
2018 phydev->duplex != tp->link_config.active_duplex ||
2019 oldflowctrl != tp->link_config.active_flowctrl)
2022 tp->old_link = phydev->link;
2023 tp->link_config.active_speed = phydev->speed;
2024 tp->link_config.active_duplex = phydev->duplex;
2026 spin_unlock_bh(&tp->lock);
2029 tg3_link_report(tp);
2032 static int tg3_phy_init(struct tg3 *tp)
2034 struct phy_device *phydev;
2036 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2039 /* Bring the PHY back to a known state. */
2042 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2044 /* Attach the MAC to the PHY. */
2045 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046 tg3_adjust_link, phydev->interface);
2047 if (IS_ERR(phydev)) {
2048 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049 return PTR_ERR(phydev);
2052 /* Mask with MAC supported features. */
2053 switch (phydev->interface) {
2054 case PHY_INTERFACE_MODE_GMII:
2055 case PHY_INTERFACE_MODE_RGMII:
2056 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057 phydev->supported &= (PHY_GBIT_FEATURES |
2059 SUPPORTED_Asym_Pause);
2063 case PHY_INTERFACE_MODE_MII:
2064 phydev->supported &= (PHY_BASIC_FEATURES |
2066 SUPPORTED_Asym_Pause);
2069 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2075 phydev->advertising = phydev->supported;
2080 static void tg3_phy_start(struct tg3 *tp)
2082 struct phy_device *phydev;
2084 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2087 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2089 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091 phydev->speed = tp->link_config.speed;
2092 phydev->duplex = tp->link_config.duplex;
2093 phydev->autoneg = tp->link_config.autoneg;
2094 phydev->advertising = tp->link_config.advertising;
2099 phy_start_aneg(phydev);
2102 static void tg3_phy_stop(struct tg3 *tp)
2104 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2107 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2110 static void tg3_phy_fini(struct tg3 *tp)
2112 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2123 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2126 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127 /* Cannot do read-modify-write on 5401 */
2128 err = tg3_phy_auxctl_write(tp,
2129 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2135 err = tg3_phy_auxctl_read(tp,
2136 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2140 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141 err = tg3_phy_auxctl_write(tp,
2142 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2152 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2155 tg3_writephy(tp, MII_TG3_FET_TEST,
2156 phytest | MII_TG3_FET_SHADOW_EN);
2157 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2159 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2161 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2164 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2172 if (!tg3_flag(tp, 5705_PLUS) ||
2173 (tg3_flag(tp, 5717_PLUS) &&
2174 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2177 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178 tg3_phy_fet_toggle_apd(tp, enable);
2182 reg = MII_TG3_MISC_SHDW_WREN |
2183 MII_TG3_MISC_SHDW_SCR5_SEL |
2184 MII_TG3_MISC_SHDW_SCR5_LPED |
2185 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186 MII_TG3_MISC_SHDW_SCR5_SDTL |
2187 MII_TG3_MISC_SHDW_SCR5_C125OE;
2188 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2191 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2194 reg = MII_TG3_MISC_SHDW_WREN |
2195 MII_TG3_MISC_SHDW_APD_SEL |
2196 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2198 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2200 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2207 if (!tg3_flag(tp, 5705_PLUS) ||
2208 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2211 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2214 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2217 tg3_writephy(tp, MII_TG3_FET_TEST,
2218 ephy | MII_TG3_FET_SHADOW_EN);
2219 if (!tg3_readphy(tp, reg, &phy)) {
2221 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2223 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224 tg3_writephy(tp, reg, phy);
2226 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2231 ret = tg3_phy_auxctl_read(tp,
2232 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2235 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2237 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238 tg3_phy_auxctl_write(tp,
2239 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2249 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2252 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2254 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2267 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2270 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2274 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2278 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2282 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2285 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2288 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2292 tg3_phy_toggle_auxctl_smdsp(tp, false);
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2299 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2304 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305 current_link_up == 1 &&
2306 tp->link_config.active_duplex == DUPLEX_FULL &&
2307 (tp->link_config.active_speed == SPEED_100 ||
2308 tp->link_config.active_speed == SPEED_1000)) {
2311 if (tp->link_config.active_speed == SPEED_1000)
2312 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2314 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2316 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2318 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319 TG3_CL45_D7_EEERES_STAT, &val);
2321 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2326 if (!tp->setlpicnt) {
2327 if (current_link_up == 1 &&
2328 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330 tg3_phy_toggle_auxctl_smdsp(tp, false);
2333 val = tr32(TG3_CPMU_EEE_MODE);
2334 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2342 if (tp->link_config.active_speed == SPEED_1000 &&
2343 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345 tg3_flag(tp, 57765_CLASS)) &&
2346 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347 val = MII_TG3_DSP_TAP26_ALNOKO |
2348 MII_TG3_DSP_TAP26_RMRXSTO;
2349 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350 tg3_phy_toggle_auxctl_smdsp(tp, false);
2353 val = tr32(TG3_CPMU_EEE_MODE);
2354 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2364 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365 if ((tmp32 & 0x1000) == 0)
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2377 static const u32 test_pat[4][6] = {
2378 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2385 for (chan = 0; chan < 4; chan++) {
2388 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389 (chan * 0x2000) | 0x0200);
2390 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2392 for (i = 0; i < 6; i++)
2393 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2396 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397 if (tg3_wait_macro_done(tp)) {
2402 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403 (chan * 0x2000) | 0x0200);
2404 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405 if (tg3_wait_macro_done(tp)) {
2410 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411 if (tg3_wait_macro_done(tp)) {
2416 for (i = 0; i < 6; i += 2) {
2419 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421 tg3_wait_macro_done(tp)) {
2427 if (low != test_pat[chan][i] ||
2428 high != test_pat[chan][i+1]) {
2429 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2445 for (chan = 0; chan < 4; chan++) {
2448 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449 (chan * 0x2000) | 0x0200);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451 for (i = 0; i < 6; i++)
2452 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454 if (tg3_wait_macro_done(tp))
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2463 u32 reg32, phy9_orig;
2464 int retries, do_phy_reset, err;
2470 err = tg3_bmcr_reset(tp);
2476 /* Disable transmitter and interrupt. */
2477 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2481 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2483 /* Set full-duplex, 1000 mbps. */
2484 tg3_writephy(tp, MII_BMCR,
2485 BMCR_FULLDPLX | BMCR_SPEED1000);
2487 /* Set to master mode. */
2488 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2491 tg3_writephy(tp, MII_CTRL1000,
2492 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2494 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2498 /* Block the PHY control access. */
2499 tg3_phydsp_write(tp, 0x8005, 0x0800);
2501 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2504 } while (--retries);
2506 err = tg3_phy_reset_chanpat(tp);
2510 tg3_phydsp_write(tp, 0x8005, 0x0000);
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2515 tg3_phy_toggle_auxctl_smdsp(tp, false);
2517 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2519 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2521 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2528 static void tg3_carrier_off(struct tg3 *tp)
2530 netif_carrier_off(tp->dev);
2531 tp->link_up = false;
2534 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2536 if (tg3_flag(tp, ENABLE_ASF))
2537 netdev_warn(tp->dev,
2538 "Management side-band traffic will be interrupted during phy settings change\n");
2541 /* This will reset the tigon3 PHY if there is no valid
2542 * link unless the FORCE argument is non-zero.
2544 static int tg3_phy_reset(struct tg3 *tp)
2549 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2550 val = tr32(GRC_MISC_CFG);
2551 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2554 err = tg3_readphy(tp, MII_BMSR, &val);
2555 err |= tg3_readphy(tp, MII_BMSR, &val);
2559 if (netif_running(tp->dev) && tp->link_up) {
2560 netif_carrier_off(tp->dev);
2561 tg3_link_report(tp);
2564 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2565 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2566 tg3_asic_rev(tp) == ASIC_REV_5705) {
2567 err = tg3_phy_reset_5703_4_5(tp);
2574 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2575 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2576 cpmuctrl = tr32(TG3_CPMU_CTRL);
2577 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2579 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2582 err = tg3_bmcr_reset(tp);
2586 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2587 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2588 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2590 tw32(TG3_CPMU_CTRL, cpmuctrl);
2593 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2594 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2595 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2596 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2597 CPMU_LSPD_1000MB_MACCLK_12_5) {
2598 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2600 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2604 if (tg3_flag(tp, 5717_PLUS) &&
2605 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2608 tg3_phy_apply_otp(tp);
2610 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2611 tg3_phy_toggle_apd(tp, true);
2613 tg3_phy_toggle_apd(tp, false);
2616 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2617 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2618 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2619 tg3_phydsp_write(tp, 0x000a, 0x0323);
2620 tg3_phy_toggle_auxctl_smdsp(tp, false);
2623 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2624 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2625 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2628 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2629 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630 tg3_phydsp_write(tp, 0x000a, 0x310b);
2631 tg3_phydsp_write(tp, 0x201f, 0x9506);
2632 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2633 tg3_phy_toggle_auxctl_smdsp(tp, false);
2635 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2636 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2637 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2638 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2640 tg3_writephy(tp, MII_TG3_TEST1,
2641 MII_TG3_TEST1_TRIM_EN | 0x4);
2643 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2645 tg3_phy_toggle_auxctl_smdsp(tp, false);
2649 /* Set Extended packet length bit (bit 14) on all chips that */
2650 /* support jumbo frames */
2651 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2652 /* Cannot do read-modify-write on 5401 */
2653 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2654 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2655 /* Set bit 14 with read-modify-write to preserve other bits */
2656 err = tg3_phy_auxctl_read(tp,
2657 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2659 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2660 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2663 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2664 * jumbo frames transmission.
2666 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2667 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2668 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2669 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2672 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2673 /* adjust output voltage */
2674 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2677 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2678 tg3_phydsp_write(tp, 0xffb, 0x4000);
2680 tg3_phy_toggle_automdix(tp, 1);
2681 tg3_phy_set_wirespeed(tp);
2685 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2686 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2687 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2688 TG3_GPIO_MSG_NEED_VAUX)
2689 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2690 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2691 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2692 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2693 (TG3_GPIO_MSG_DRVR_PRES << 12))
2695 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2696 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2697 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2698 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2699 (TG3_GPIO_MSG_NEED_VAUX << 12))
2701 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2705 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2706 tg3_asic_rev(tp) == ASIC_REV_5719)
2707 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2709 status = tr32(TG3_CPMU_DRV_STATUS);
2711 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2712 status &= ~(TG3_GPIO_MSG_MASK << shift);
2713 status |= (newstat << shift);
2715 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2716 tg3_asic_rev(tp) == ASIC_REV_5719)
2717 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2719 tw32(TG3_CPMU_DRV_STATUS, status);
2721 return status >> TG3_APE_GPIO_MSG_SHIFT;
2724 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2726 if (!tg3_flag(tp, IS_NIC))
2729 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2730 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2731 tg3_asic_rev(tp) == ASIC_REV_5720) {
2732 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2735 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2737 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2738 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2742 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2743 TG3_GRC_LCLCTL_PWRSW_DELAY);
2749 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2753 if (!tg3_flag(tp, IS_NIC) ||
2754 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2755 tg3_asic_rev(tp) == ASIC_REV_5701)
2758 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2760 tw32_wait_f(GRC_LOCAL_CTRL,
2761 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2764 tw32_wait_f(GRC_LOCAL_CTRL,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 tw32_wait_f(GRC_LOCAL_CTRL,
2769 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2770 TG3_GRC_LCLCTL_PWRSW_DELAY);
2773 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2775 if (!tg3_flag(tp, IS_NIC))
2778 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2779 tg3_asic_rev(tp) == ASIC_REV_5701) {
2780 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2781 (GRC_LCLCTRL_GPIO_OE0 |
2782 GRC_LCLCTRL_GPIO_OE1 |
2783 GRC_LCLCTRL_GPIO_OE2 |
2784 GRC_LCLCTRL_GPIO_OUTPUT0 |
2785 GRC_LCLCTRL_GPIO_OUTPUT1),
2786 TG3_GRC_LCLCTL_PWRSW_DELAY);
2787 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2789 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2790 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2791 GRC_LCLCTRL_GPIO_OE1 |
2792 GRC_LCLCTRL_GPIO_OE2 |
2793 GRC_LCLCTRL_GPIO_OUTPUT0 |
2794 GRC_LCLCTRL_GPIO_OUTPUT1 |
2796 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2800 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2803 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2804 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2805 TG3_GRC_LCLCTL_PWRSW_DELAY);
2808 u32 grc_local_ctrl = 0;
2810 /* Workaround to prevent overdrawing Amps. */
2811 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2812 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2813 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2815 TG3_GRC_LCLCTL_PWRSW_DELAY);
2818 /* On 5753 and variants, GPIO2 cannot be used. */
2819 no_gpio2 = tp->nic_sram_data_cfg &
2820 NIC_SRAM_DATA_CFG_NO_GPIO2;
2822 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2823 GRC_LCLCTRL_GPIO_OE1 |
2824 GRC_LCLCTRL_GPIO_OE2 |
2825 GRC_LCLCTRL_GPIO_OUTPUT1 |
2826 GRC_LCLCTRL_GPIO_OUTPUT2;
2828 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2829 GRC_LCLCTRL_GPIO_OUTPUT2);
2831 tw32_wait_f(GRC_LOCAL_CTRL,
2832 tp->grc_local_ctrl | grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2837 tw32_wait_f(GRC_LOCAL_CTRL,
2838 tp->grc_local_ctrl | grc_local_ctrl,
2839 TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2843 tw32_wait_f(GRC_LOCAL_CTRL,
2844 tp->grc_local_ctrl | grc_local_ctrl,
2845 TG3_GRC_LCLCTL_PWRSW_DELAY);
2850 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2854 /* Serialize power state transitions */
2855 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2858 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2859 msg = TG3_GPIO_MSG_NEED_VAUX;
2861 msg = tg3_set_function_status(tp, msg);
2863 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2866 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2867 tg3_pwrsrc_switch_to_vaux(tp);
2869 tg3_pwrsrc_die_with_vmain(tp);
2872 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2875 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2877 bool need_vaux = false;
2879 /* The GPIOs do something completely different on 57765. */
2880 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2883 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2884 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2885 tg3_asic_rev(tp) == ASIC_REV_5720) {
2886 tg3_frob_aux_power_5717(tp, include_wol ?
2887 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2891 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2892 struct net_device *dev_peer;
2894 dev_peer = pci_get_drvdata(tp->pdev_peer);
2896 /* remove_one() may have been run on the peer. */
2898 struct tg3 *tp_peer = netdev_priv(dev_peer);
2900 if (tg3_flag(tp_peer, INIT_COMPLETE))
2903 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2904 tg3_flag(tp_peer, ENABLE_ASF))
2909 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2910 tg3_flag(tp, ENABLE_ASF))
2914 tg3_pwrsrc_switch_to_vaux(tp);
2916 tg3_pwrsrc_die_with_vmain(tp);
2919 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2921 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2923 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2924 if (speed != SPEED_10)
2926 } else if (speed == SPEED_10)
2932 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2936 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2937 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2938 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2939 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2942 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2943 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2944 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2949 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2951 val = tr32(GRC_MISC_CFG);
2952 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2955 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2957 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2960 tg3_writephy(tp, MII_ADVERTISE, 0);
2961 tg3_writephy(tp, MII_BMCR,
2962 BMCR_ANENABLE | BMCR_ANRESTART);
2964 tg3_writephy(tp, MII_TG3_FET_TEST,
2965 phytest | MII_TG3_FET_SHADOW_EN);
2966 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2967 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2969 MII_TG3_FET_SHDW_AUXMODE4,
2972 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2975 } else if (do_low_power) {
2976 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2977 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2979 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2980 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2981 MII_TG3_AUXCTL_PCTL_VREG_11V;
2982 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2985 /* The PHY should not be powered down on some chips because
2988 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2989 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2990 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2991 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2992 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2996 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2997 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2998 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2999 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3000 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3001 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3004 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3007 /* tp->lock is held. */
3008 static int tg3_nvram_lock(struct tg3 *tp)
3010 if (tg3_flag(tp, NVRAM)) {
3013 if (tp->nvram_lock_cnt == 0) {
3014 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3015 for (i = 0; i < 8000; i++) {
3016 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3021 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3025 tp->nvram_lock_cnt++;
3030 /* tp->lock is held. */
3031 static void tg3_nvram_unlock(struct tg3 *tp)
3033 if (tg3_flag(tp, NVRAM)) {
3034 if (tp->nvram_lock_cnt > 0)
3035 tp->nvram_lock_cnt--;
3036 if (tp->nvram_lock_cnt == 0)
3037 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3041 /* tp->lock is held. */
3042 static void tg3_enable_nvram_access(struct tg3 *tp)
3044 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3045 u32 nvaccess = tr32(NVRAM_ACCESS);
3047 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3051 /* tp->lock is held. */
3052 static void tg3_disable_nvram_access(struct tg3 *tp)
3054 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3055 u32 nvaccess = tr32(NVRAM_ACCESS);
3057 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3061 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3062 u32 offset, u32 *val)
3067 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3070 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3071 EEPROM_ADDR_DEVID_MASK |
3073 tw32(GRC_EEPROM_ADDR,
3075 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3076 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3077 EEPROM_ADDR_ADDR_MASK) |
3078 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3080 for (i = 0; i < 1000; i++) {
3081 tmp = tr32(GRC_EEPROM_ADDR);
3083 if (tmp & EEPROM_ADDR_COMPLETE)
3087 if (!(tmp & EEPROM_ADDR_COMPLETE))
3090 tmp = tr32(GRC_EEPROM_DATA);
3093 * The data will always be opposite the native endian
3094 * format. Perform a blind byteswap to compensate.
3101 #define NVRAM_CMD_TIMEOUT 10000
3103 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3107 tw32(NVRAM_CMD, nvram_cmd);
3108 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3110 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3116 if (i == NVRAM_CMD_TIMEOUT)
3122 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3124 if (tg3_flag(tp, NVRAM) &&
3125 tg3_flag(tp, NVRAM_BUFFERED) &&
3126 tg3_flag(tp, FLASH) &&
3127 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3128 (tp->nvram_jedecnum == JEDEC_ATMEL))
3130 addr = ((addr / tp->nvram_pagesize) <<
3131 ATMEL_AT45DB0X1B_PAGE_POS) +
3132 (addr % tp->nvram_pagesize);
3137 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3139 if (tg3_flag(tp, NVRAM) &&
3140 tg3_flag(tp, NVRAM_BUFFERED) &&
3141 tg3_flag(tp, FLASH) &&
3142 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3143 (tp->nvram_jedecnum == JEDEC_ATMEL))
3145 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3146 tp->nvram_pagesize) +
3147 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3152 /* NOTE: Data read in from NVRAM is byteswapped according to
3153 * the byteswapping settings for all other register accesses.
3154 * tg3 devices are BE devices, so on a BE machine, the data
3155 * returned will be exactly as it is seen in NVRAM. On a LE
3156 * machine, the 32-bit value will be byteswapped.
3158 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3162 if (!tg3_flag(tp, NVRAM))
3163 return tg3_nvram_read_using_eeprom(tp, offset, val);
3165 offset = tg3_nvram_phys_addr(tp, offset);
3167 if (offset > NVRAM_ADDR_MSK)
3170 ret = tg3_nvram_lock(tp);
3174 tg3_enable_nvram_access(tp);
3176 tw32(NVRAM_ADDR, offset);
3177 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3178 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3181 *val = tr32(NVRAM_RDDATA);
3183 tg3_disable_nvram_access(tp);
3185 tg3_nvram_unlock(tp);
3190 /* Ensures NVRAM data is in bytestream format. */
3191 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3194 int res = tg3_nvram_read(tp, offset, &v);
3196 *val = cpu_to_be32(v);
3200 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3201 u32 offset, u32 len, u8 *buf)
3206 for (i = 0; i < len; i += 4) {
3212 memcpy(&data, buf + i, 4);
3215 * The SEEPROM interface expects the data to always be opposite
3216 * the native endian format. We accomplish this by reversing
3217 * all the operations that would have been performed on the
3218 * data from a call to tg3_nvram_read_be32().
3220 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3222 val = tr32(GRC_EEPROM_ADDR);
3223 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3225 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3227 tw32(GRC_EEPROM_ADDR, val |
3228 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3229 (addr & EEPROM_ADDR_ADDR_MASK) |
3233 for (j = 0; j < 1000; j++) {
3234 val = tr32(GRC_EEPROM_ADDR);
3236 if (val & EEPROM_ADDR_COMPLETE)
3240 if (!(val & EEPROM_ADDR_COMPLETE)) {
3249 /* offset and length are dword aligned */
3250 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3254 u32 pagesize = tp->nvram_pagesize;
3255 u32 pagemask = pagesize - 1;
3259 tmp = kmalloc(pagesize, GFP_KERNEL);
3265 u32 phy_addr, page_off, size;
3267 phy_addr = offset & ~pagemask;
3269 for (j = 0; j < pagesize; j += 4) {
3270 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3271 (__be32 *) (tmp + j));
3278 page_off = offset & pagemask;
3285 memcpy(tmp + page_off, buf, size);
3287 offset = offset + (pagesize - page_off);
3289 tg3_enable_nvram_access(tp);
3292 * Before we can erase the flash page, we need
3293 * to issue a special "write enable" command.
3295 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3297 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3300 /* Erase the target page */
3301 tw32(NVRAM_ADDR, phy_addr);
3303 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3304 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3306 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3309 /* Issue another write enable to start the write. */
3310 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3312 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3315 for (j = 0; j < pagesize; j += 4) {
3318 data = *((__be32 *) (tmp + j));
3320 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3322 tw32(NVRAM_ADDR, phy_addr + j);
3324 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3328 nvram_cmd |= NVRAM_CMD_FIRST;
3329 else if (j == (pagesize - 4))
3330 nvram_cmd |= NVRAM_CMD_LAST;
3332 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3340 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3341 tg3_nvram_exec_cmd(tp, nvram_cmd);
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3354 for (i = 0; i < len; i += 4, offset += 4) {
3355 u32 page_off, phy_addr, nvram_cmd;
3358 memcpy(&data, buf + i, 4);
3359 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3361 page_off = offset % tp->nvram_pagesize;
3363 phy_addr = tg3_nvram_phys_addr(tp, offset);
3365 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3367 if (page_off == 0 || i == 0)
3368 nvram_cmd |= NVRAM_CMD_FIRST;
3369 if (page_off == (tp->nvram_pagesize - 4))
3370 nvram_cmd |= NVRAM_CMD_LAST;
3373 nvram_cmd |= NVRAM_CMD_LAST;
3375 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3376 !tg3_flag(tp, FLASH) ||
3377 !tg3_flag(tp, 57765_PLUS))
3378 tw32(NVRAM_ADDR, phy_addr);
3380 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3381 !tg3_flag(tp, 5755_PLUS) &&
3382 (tp->nvram_jedecnum == JEDEC_ST) &&
3383 (nvram_cmd & NVRAM_CMD_FIRST)) {
3386 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3387 ret = tg3_nvram_exec_cmd(tp, cmd);
3391 if (!tg3_flag(tp, FLASH)) {
3392 /* We always do complete word writes to eeprom. */
3393 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3396 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3403 /* offset and length are dword aligned */
3404 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3408 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3409 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3410 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3414 if (!tg3_flag(tp, NVRAM)) {
3415 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3419 ret = tg3_nvram_lock(tp);
3423 tg3_enable_nvram_access(tp);
3424 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3425 tw32(NVRAM_WRITE1, 0x406);
3427 grc_mode = tr32(GRC_MODE);
3428 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3430 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3431 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3434 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3438 grc_mode = tr32(GRC_MODE);
3439 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3441 tg3_disable_nvram_access(tp);
3442 tg3_nvram_unlock(tp);
3445 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3446 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3453 #define RX_CPU_SCRATCH_BASE 0x30000
3454 #define RX_CPU_SCRATCH_SIZE 0x04000
3455 #define TX_CPU_SCRATCH_BASE 0x34000
3456 #define TX_CPU_SCRATCH_SIZE 0x04000
3458 /* tp->lock is held. */
3459 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3462 const int iters = 10000;
3464 for (i = 0; i < iters; i++) {
3465 tw32(cpu_base + CPU_STATE, 0xffffffff);
3466 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3467 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3471 return (i == iters) ? -EBUSY : 0;
3474 /* tp->lock is held. */
3475 static int tg3_rxcpu_pause(struct tg3 *tp)
3477 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3479 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3480 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3486 /* tp->lock is held. */
3487 static int tg3_txcpu_pause(struct tg3 *tp)
3489 return tg3_pause_cpu(tp, TX_CPU_BASE);
3492 /* tp->lock is held. */
3493 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3495 tw32(cpu_base + CPU_STATE, 0xffffffff);
3496 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3499 /* tp->lock is held. */
3500 static void tg3_rxcpu_resume(struct tg3 *tp)
3502 tg3_resume_cpu(tp, RX_CPU_BASE);
3505 /* tp->lock is held. */
3506 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3510 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3512 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3513 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3515 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3518 if (cpu_base == RX_CPU_BASE) {
3519 rc = tg3_rxcpu_pause(tp);
3522 * There is only an Rx CPU for the 5750 derivative in the
3525 if (tg3_flag(tp, IS_SSB_CORE))
3528 rc = tg3_txcpu_pause(tp);
3532 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3533 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3537 /* Clear firmware's nvram arbitration. */
3538 if (tg3_flag(tp, NVRAM))
3539 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3543 static int tg3_fw_data_len(struct tg3 *tp,
3544 const struct tg3_firmware_hdr *fw_hdr)
3548 /* Non fragmented firmware have one firmware header followed by a
3549 * contiguous chunk of data to be written. The length field in that
3550 * header is not the length of data to be written but the complete
3551 * length of the bss. The data length is determined based on
3552 * tp->fw->size minus headers.
3554 * Fragmented firmware have a main header followed by multiple
3555 * fragments. Each fragment is identical to non fragmented firmware
3556 * with a firmware header followed by a contiguous chunk of data. In
3557 * the main header, the length field is unused and set to 0xffffffff.
3558 * In each fragment header the length is the entire size of that
3559 * fragment i.e. fragment data + header length. Data length is
3560 * therefore length field in the header minus TG3_FW_HDR_LEN.
3562 if (tp->fw_len == 0xffffffff)
3563 fw_len = be32_to_cpu(fw_hdr->len);
3565 fw_len = tp->fw->size;
3567 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3570 /* tp->lock is held. */
3571 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3572 u32 cpu_scratch_base, int cpu_scratch_size,
3573 const struct tg3_firmware_hdr *fw_hdr)
3576 void (*write_op)(struct tg3 *, u32, u32);
3577 int total_len = tp->fw->size;
3579 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3581 "%s: Trying to load TX cpu firmware which is 5705\n",
3586 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3587 write_op = tg3_write_mem;
3589 write_op = tg3_write_indirect_reg32;
3591 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3592 /* It is possible that bootcode is still loading at this point.
3593 * Get the nvram lock first before halting the cpu.
3595 int lock_err = tg3_nvram_lock(tp);
3596 err = tg3_halt_cpu(tp, cpu_base);
3598 tg3_nvram_unlock(tp);
3602 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3603 write_op(tp, cpu_scratch_base + i, 0);
3604 tw32(cpu_base + CPU_STATE, 0xffffffff);
3605 tw32(cpu_base + CPU_MODE,
3606 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3608 /* Subtract additional main header for fragmented firmware and
3609 * advance to the first fragment
3611 total_len -= TG3_FW_HDR_LEN;
3616 u32 *fw_data = (u32 *)(fw_hdr + 1);
3617 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3618 write_op(tp, cpu_scratch_base +
3619 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3621 be32_to_cpu(fw_data[i]));
3623 total_len -= be32_to_cpu(fw_hdr->len);
3625 /* Advance to next fragment */
3626 fw_hdr = (struct tg3_firmware_hdr *)
3627 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3628 } while (total_len > 0);
3636 /* tp->lock is held. */
3637 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3640 const int iters = 5;
3642 tw32(cpu_base + CPU_STATE, 0xffffffff);
3643 tw32_f(cpu_base + CPU_PC, pc);
3645 for (i = 0; i < iters; i++) {
3646 if (tr32(cpu_base + CPU_PC) == pc)
3648 tw32(cpu_base + CPU_STATE, 0xffffffff);
3649 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3650 tw32_f(cpu_base + CPU_PC, pc);
3654 return (i == iters) ? -EBUSY : 0;
3657 /* tp->lock is held. */
3658 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3660 const struct tg3_firmware_hdr *fw_hdr;
3663 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3665 /* Firmware blob starts with version numbers, followed by
3666 start address and length. We are setting complete length.
3667 length = end_address_of_bss - start_address_of_text.
3668 Remainder is the blob to be loaded contiguously
3669 from start address. */
3671 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3672 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3677 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3678 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3683 /* Now startup only the RX cpu. */
3684 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3685 be32_to_cpu(fw_hdr->base_addr));
3687 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3688 "should be %08x\n", __func__,
3689 tr32(RX_CPU_BASE + CPU_PC),
3690 be32_to_cpu(fw_hdr->base_addr));
3694 tg3_rxcpu_resume(tp);
3699 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3701 const int iters = 1000;
3705 /* Wait for boot code to complete initialization and enter service
3706 * loop. It is then safe to download service patches
3708 for (i = 0; i < iters; i++) {
3709 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3716 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3720 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3722 netdev_warn(tp->dev,
3723 "Other patches exist. Not downloading EEE patch\n");
3730 /* tp->lock is held. */
3731 static void tg3_load_57766_firmware(struct tg3 *tp)
3733 struct tg3_firmware_hdr *fw_hdr;
3735 if (!tg3_flag(tp, NO_NVRAM))
3738 if (tg3_validate_rxcpu_state(tp))
3744 /* This firmware blob has a different format than older firmware
3745 * releases as given below. The main difference is we have fragmented
3746 * data to be written to non-contiguous locations.
3748 * In the beginning we have a firmware header identical to other
3749 * firmware which consists of version, base addr and length. The length
3750 * here is unused and set to 0xffffffff.
3752 * This is followed by a series of firmware fragments which are
3753 * individually identical to previous firmware. i.e. they have the
3754 * firmware header and followed by data for that fragment. The version
3755 * field of the individual fragment header is unused.
3758 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3759 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3762 if (tg3_rxcpu_pause(tp))
3765 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3766 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3768 tg3_rxcpu_resume(tp);
3771 /* tp->lock is held. */
3772 static int tg3_load_tso_firmware(struct tg3 *tp)
3774 const struct tg3_firmware_hdr *fw_hdr;
3775 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3778 if (!tg3_flag(tp, FW_TSO))
3781 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3783 /* Firmware blob starts with version numbers, followed by
3784 start address and length. We are setting complete length.
3785 length = end_address_of_bss - start_address_of_text.
3786 Remainder is the blob to be loaded contiguously
3787 from start address. */
3789 cpu_scratch_size = tp->fw_len;
3791 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3792 cpu_base = RX_CPU_BASE;
3793 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3795 cpu_base = TX_CPU_BASE;
3796 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3797 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3800 err = tg3_load_firmware_cpu(tp, cpu_base,
3801 cpu_scratch_base, cpu_scratch_size,
3806 /* Now startup the cpu. */
3807 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3808 be32_to_cpu(fw_hdr->base_addr));
3811 "%s fails to set CPU PC, is %08x should be %08x\n",
3812 __func__, tr32(cpu_base + CPU_PC),
3813 be32_to_cpu(fw_hdr->base_addr));
3817 tg3_resume_cpu(tp, cpu_base);
3822 /* tp->lock is held. */
3823 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3825 u32 addr_high, addr_low;
3828 addr_high = ((tp->dev->dev_addr[0] << 8) |
3829 tp->dev->dev_addr[1]);
3830 addr_low = ((tp->dev->dev_addr[2] << 24) |
3831 (tp->dev->dev_addr[3] << 16) |
3832 (tp->dev->dev_addr[4] << 8) |
3833 (tp->dev->dev_addr[5] << 0));
3834 for (i = 0; i < 4; i++) {
3835 if (i == 1 && skip_mac_1)
3837 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3838 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3841 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3842 tg3_asic_rev(tp) == ASIC_REV_5704) {
3843 for (i = 0; i < 12; i++) {
3844 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3845 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3849 addr_high = (tp->dev->dev_addr[0] +
3850 tp->dev->dev_addr[1] +
3851 tp->dev->dev_addr[2] +
3852 tp->dev->dev_addr[3] +
3853 tp->dev->dev_addr[4] +
3854 tp->dev->dev_addr[5]) &
3855 TX_BACKOFF_SEED_MASK;
3856 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3859 static void tg3_enable_register_access(struct tg3 *tp)
3862 * Make sure register accesses (indirect or otherwise) will function
3865 pci_write_config_dword(tp->pdev,
3866 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3869 static int tg3_power_up(struct tg3 *tp)
3873 tg3_enable_register_access(tp);
3875 err = pci_set_power_state(tp->pdev, PCI_D0);
3877 /* Switch out of Vaux if it is a NIC */
3878 tg3_pwrsrc_switch_to_vmain(tp);
3880 netdev_err(tp->dev, "Transition to D0 failed\n");
3886 static int tg3_setup_phy(struct tg3 *, int);
3888 static int tg3_power_down_prepare(struct tg3 *tp)
3891 bool device_should_wake, do_low_power;
3893 tg3_enable_register_access(tp);
3895 /* Restore the CLKREQ setting. */
3896 if (tg3_flag(tp, CLKREQ_BUG))
3897 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3898 PCI_EXP_LNKCTL_CLKREQ_EN);
3900 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3901 tw32(TG3PCI_MISC_HOST_CTRL,
3902 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3904 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3905 tg3_flag(tp, WOL_ENABLE);
3907 if (tg3_flag(tp, USE_PHYLIB)) {
3908 do_low_power = false;
3909 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3910 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3911 struct phy_device *phydev;
3912 u32 phyid, advertising;
3914 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3916 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3918 tp->link_config.speed = phydev->speed;
3919 tp->link_config.duplex = phydev->duplex;
3920 tp->link_config.autoneg = phydev->autoneg;
3921 tp->link_config.advertising = phydev->advertising;
3923 advertising = ADVERTISED_TP |
3925 ADVERTISED_Autoneg |
3926 ADVERTISED_10baseT_Half;
3928 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3929 if (tg3_flag(tp, WOL_SPEED_100MB))
3931 ADVERTISED_100baseT_Half |
3932 ADVERTISED_100baseT_Full |
3933 ADVERTISED_10baseT_Full;
3935 advertising |= ADVERTISED_10baseT_Full;
3938 phydev->advertising = advertising;
3940 phy_start_aneg(phydev);
3942 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3943 if (phyid != PHY_ID_BCMAC131) {
3944 phyid &= PHY_BCM_OUI_MASK;
3945 if (phyid == PHY_BCM_OUI_1 ||
3946 phyid == PHY_BCM_OUI_2 ||
3947 phyid == PHY_BCM_OUI_3)
3948 do_low_power = true;
3952 do_low_power = true;
3954 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3955 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3957 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3958 tg3_setup_phy(tp, 0);
3961 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3964 val = tr32(GRC_VCPU_EXT_CTRL);
3965 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3966 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3970 for (i = 0; i < 200; i++) {
3971 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3972 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3977 if (tg3_flag(tp, WOL_CAP))
3978 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3979 WOL_DRV_STATE_SHUTDOWN |
3983 if (device_should_wake) {
3986 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3988 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3989 tg3_phy_auxctl_write(tp,
3990 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3991 MII_TG3_AUXCTL_PCTL_WOL_EN |
3992 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3993 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3997 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3998 mac_mode = MAC_MODE_PORT_MODE_GMII;
4000 mac_mode = MAC_MODE_PORT_MODE_MII;
4002 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4003 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4004 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4005 SPEED_100 : SPEED_10;
4006 if (tg3_5700_link_polarity(tp, speed))
4007 mac_mode |= MAC_MODE_LINK_POLARITY;
4009 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4012 mac_mode = MAC_MODE_PORT_MODE_TBI;
4015 if (!tg3_flag(tp, 5750_PLUS))
4016 tw32(MAC_LED_CTRL, tp->led_ctrl);
4018 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4019 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4020 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4021 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4023 if (tg3_flag(tp, ENABLE_APE))
4024 mac_mode |= MAC_MODE_APE_TX_EN |
4025 MAC_MODE_APE_RX_EN |
4026 MAC_MODE_TDE_ENABLE;
4028 tw32_f(MAC_MODE, mac_mode);
4031 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4035 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4036 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4037 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4040 base_val = tp->pci_clock_ctrl;
4041 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4042 CLOCK_CTRL_TXCLK_DISABLE);
4044 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4045 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4046 } else if (tg3_flag(tp, 5780_CLASS) ||
4047 tg3_flag(tp, CPMU_PRESENT) ||
4048 tg3_asic_rev(tp) == ASIC_REV_5906) {
4050 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4051 u32 newbits1, newbits2;
4053 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4054 tg3_asic_rev(tp) == ASIC_REV_5701) {
4055 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4056 CLOCK_CTRL_TXCLK_DISABLE |
4058 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4059 } else if (tg3_flag(tp, 5705_PLUS)) {
4060 newbits1 = CLOCK_CTRL_625_CORE;
4061 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4063 newbits1 = CLOCK_CTRL_ALTCLK;
4064 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4067 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4070 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4073 if (!tg3_flag(tp, 5705_PLUS)) {
4076 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4077 tg3_asic_rev(tp) == ASIC_REV_5701) {
4078 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4079 CLOCK_CTRL_TXCLK_DISABLE |
4080 CLOCK_CTRL_44MHZ_CORE);
4082 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4085 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4086 tp->pci_clock_ctrl | newbits3, 40);
4090 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4091 tg3_power_down_phy(tp, do_low_power);
4093 tg3_frob_aux_power(tp, true);
4095 /* Workaround for unstable PLL clock */
4096 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4097 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4098 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4099 u32 val = tr32(0x7d00);
4101 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4103 if (!tg3_flag(tp, ENABLE_ASF)) {
4106 err = tg3_nvram_lock(tp);
4107 tg3_halt_cpu(tp, RX_CPU_BASE);
4109 tg3_nvram_unlock(tp);
4113 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4118 static void tg3_power_down(struct tg3 *tp)
4120 tg3_power_down_prepare(tp);
4122 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4123 pci_set_power_state(tp->pdev, PCI_D3hot);
4126 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4128 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4129 case MII_TG3_AUX_STAT_10HALF:
4131 *duplex = DUPLEX_HALF;
4134 case MII_TG3_AUX_STAT_10FULL:
4136 *duplex = DUPLEX_FULL;
4139 case MII_TG3_AUX_STAT_100HALF:
4141 *duplex = DUPLEX_HALF;
4144 case MII_TG3_AUX_STAT_100FULL:
4146 *duplex = DUPLEX_FULL;
4149 case MII_TG3_AUX_STAT_1000HALF:
4150 *speed = SPEED_1000;
4151 *duplex = DUPLEX_HALF;
4154 case MII_TG3_AUX_STAT_1000FULL:
4155 *speed = SPEED_1000;
4156 *duplex = DUPLEX_FULL;
4160 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4161 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4163 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4167 *speed = SPEED_UNKNOWN;
4168 *duplex = DUPLEX_UNKNOWN;
4173 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4178 new_adv = ADVERTISE_CSMA;
4179 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4180 new_adv |= mii_advertise_flowctrl(flowctrl);
4182 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4186 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4187 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4189 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4190 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4191 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4193 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4198 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4201 tw32(TG3_CPMU_EEE_MODE,
4202 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4204 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4209 /* Advertise 100-BaseTX EEE ability */
4210 if (advertise & ADVERTISED_100baseT_Full)
4211 val |= MDIO_AN_EEE_ADV_100TX;
4212 /* Advertise 1000-BaseT EEE ability */
4213 if (advertise & ADVERTISED_1000baseT_Full)
4214 val |= MDIO_AN_EEE_ADV_1000T;
4215 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4219 switch (tg3_asic_rev(tp)) {
4221 case ASIC_REV_57765:
4222 case ASIC_REV_57766:
4224 /* If we advertised any eee advertisements above... */
4226 val = MII_TG3_DSP_TAP26_ALNOKO |
4227 MII_TG3_DSP_TAP26_RMRXSTO |
4228 MII_TG3_DSP_TAP26_OPCSINPT;
4229 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4233 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4234 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4235 MII_TG3_DSP_CH34TP2_HIBW01);
4238 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4247 static void tg3_phy_copper_begin(struct tg3 *tp)
4249 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4250 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4253 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4254 adv = ADVERTISED_10baseT_Half |
4255 ADVERTISED_10baseT_Full;
4256 if (tg3_flag(tp, WOL_SPEED_100MB))
4257 adv |= ADVERTISED_100baseT_Half |
4258 ADVERTISED_100baseT_Full;
4260 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4262 adv = tp->link_config.advertising;
4263 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4264 adv &= ~(ADVERTISED_1000baseT_Half |
4265 ADVERTISED_1000baseT_Full);
4267 fc = tp->link_config.flowctrl;
4270 tg3_phy_autoneg_cfg(tp, adv, fc);
4272 tg3_writephy(tp, MII_BMCR,
4273 BMCR_ANENABLE | BMCR_ANRESTART);
4276 u32 bmcr, orig_bmcr;
4278 tp->link_config.active_speed = tp->link_config.speed;
4279 tp->link_config.active_duplex = tp->link_config.duplex;
4281 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4282 /* With autoneg disabled, 5715 only links up when the
4283 * advertisement register has the configured speed
4286 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4290 switch (tp->link_config.speed) {
4296 bmcr |= BMCR_SPEED100;
4300 bmcr |= BMCR_SPEED1000;
4304 if (tp->link_config.duplex == DUPLEX_FULL)
4305 bmcr |= BMCR_FULLDPLX;
4307 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4308 (bmcr != orig_bmcr)) {
4309 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4310 for (i = 0; i < 1500; i++) {
4314 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4315 tg3_readphy(tp, MII_BMSR, &tmp))
4317 if (!(tmp & BMSR_LSTATUS)) {
4322 tg3_writephy(tp, MII_BMCR, bmcr);
4328 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4332 /* Turn off tap power management. */
4333 /* Set Extended packet length bit */
4334 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4336 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4337 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4338 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4339 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4340 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4347 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4349 u32 advmsk, tgtadv, advertising;
4351 advertising = tp->link_config.advertising;
4352 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4354 advmsk = ADVERTISE_ALL;
4355 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4356 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4357 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4360 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4363 if ((*lcladv & advmsk) != tgtadv)
4366 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4369 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4371 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4375 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4376 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4377 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4378 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4379 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4381 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4384 if (tg3_ctrl != tgtadv)
4391 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4395 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4398 if (tg3_readphy(tp, MII_STAT1000, &val))
4401 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4404 if (tg3_readphy(tp, MII_LPA, rmtadv))
4407 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4408 tp->link_config.rmt_adv = lpeth;
4413 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4415 if (curr_link_up != tp->link_up) {
4417 netif_carrier_on(tp->dev);
4419 netif_carrier_off(tp->dev);
4420 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4421 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4424 tg3_link_report(tp);
4431 static void tg3_clear_mac_status(struct tg3 *tp)
4436 MAC_STATUS_SYNC_CHANGED |
4437 MAC_STATUS_CFG_CHANGED |
4438 MAC_STATUS_MI_COMPLETION |
4439 MAC_STATUS_LNKSTATE_CHANGED);
4443 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4445 int current_link_up;
4447 u32 lcl_adv, rmt_adv;
4452 tg3_clear_mac_status(tp);
4454 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4456 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4460 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4462 /* Some third-party PHYs need to be reset on link going
4465 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4466 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4467 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4469 tg3_readphy(tp, MII_BMSR, &bmsr);
4470 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4471 !(bmsr & BMSR_LSTATUS))
4477 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4478 tg3_readphy(tp, MII_BMSR, &bmsr);
4479 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4480 !tg3_flag(tp, INIT_COMPLETE))
4483 if (!(bmsr & BMSR_LSTATUS)) {
4484 err = tg3_init_5401phy_dsp(tp);
4488 tg3_readphy(tp, MII_BMSR, &bmsr);
4489 for (i = 0; i < 1000; i++) {
4491 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4492 (bmsr & BMSR_LSTATUS)) {
4498 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4499 TG3_PHY_REV_BCM5401_B0 &&
4500 !(bmsr & BMSR_LSTATUS) &&
4501 tp->link_config.active_speed == SPEED_1000) {
4502 err = tg3_phy_reset(tp);
4504 err = tg3_init_5401phy_dsp(tp);
4509 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4510 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4511 /* 5701 {A0,B0} CRC bug workaround */
4512 tg3_writephy(tp, 0x15, 0x0a75);
4513 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4514 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4515 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4518 /* Clear pending interrupts... */
4519 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4520 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4522 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4523 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4524 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4525 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4527 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4528 tg3_asic_rev(tp) == ASIC_REV_5701) {
4529 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4530 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4531 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4533 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4536 current_link_up = 0;
4537 current_speed = SPEED_UNKNOWN;
4538 current_duplex = DUPLEX_UNKNOWN;
4539 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4540 tp->link_config.rmt_adv = 0;
4542 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4543 err = tg3_phy_auxctl_read(tp,
4544 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4546 if (!err && !(val & (1 << 10))) {
4547 tg3_phy_auxctl_write(tp,
4548 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4555 for (i = 0; i < 100; i++) {
4556 tg3_readphy(tp, MII_BMSR, &bmsr);
4557 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4558 (bmsr & BMSR_LSTATUS))
4563 if (bmsr & BMSR_LSTATUS) {
4566 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4567 for (i = 0; i < 2000; i++) {
4569 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4574 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4579 for (i = 0; i < 200; i++) {
4580 tg3_readphy(tp, MII_BMCR, &bmcr);
4581 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4583 if (bmcr && bmcr != 0x7fff)
4591 tp->link_config.active_speed = current_speed;
4592 tp->link_config.active_duplex = current_duplex;
4594 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4595 if ((bmcr & BMCR_ANENABLE) &&
4596 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4597 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4598 current_link_up = 1;
4600 if (!(bmcr & BMCR_ANENABLE) &&
4601 tp->link_config.speed == current_speed &&
4602 tp->link_config.duplex == current_duplex) {
4603 current_link_up = 1;
4607 if (current_link_up == 1 &&
4608 tp->link_config.active_duplex == DUPLEX_FULL) {
4611 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4612 reg = MII_TG3_FET_GEN_STAT;
4613 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4615 reg = MII_TG3_EXT_STAT;
4616 bit = MII_TG3_EXT_STAT_MDIX;
4619 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4620 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4622 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4627 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4628 tg3_phy_copper_begin(tp);
4630 if (tg3_flag(tp, ROBOSWITCH)) {
4631 current_link_up = 1;
4632 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4633 current_speed = SPEED_1000;
4634 current_duplex = DUPLEX_FULL;
4635 tp->link_config.active_speed = current_speed;
4636 tp->link_config.active_duplex = current_duplex;
4639 tg3_readphy(tp, MII_BMSR, &bmsr);
4640 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4641 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4642 current_link_up = 1;
4645 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4646 if (current_link_up == 1) {
4647 if (tp->link_config.active_speed == SPEED_100 ||
4648 tp->link_config.active_speed == SPEED_10)
4649 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4651 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4652 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4653 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4655 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4657 /* In order for the 5750 core in BCM4785 chip to work properly
4658 * in RGMII mode, the Led Control Register must be set up.
4660 if (tg3_flag(tp, RGMII_MODE)) {
4661 u32 led_ctrl = tr32(MAC_LED_CTRL);
4662 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4664 if (tp->link_config.active_speed == SPEED_10)
4665 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4666 else if (tp->link_config.active_speed == SPEED_100)
4667 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4668 LED_CTRL_100MBPS_ON);
4669 else if (tp->link_config.active_speed == SPEED_1000)
4670 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4671 LED_CTRL_1000MBPS_ON);
4673 tw32(MAC_LED_CTRL, led_ctrl);
4677 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4678 if (tp->link_config.active_duplex == DUPLEX_HALF)
4679 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4681 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4682 if (current_link_up == 1 &&
4683 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4684 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4686 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4689 /* ??? Without this setting Netgear GA302T PHY does not
4690 * ??? send/receive packets...
4692 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4693 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4694 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4695 tw32_f(MAC_MI_MODE, tp->mi_mode);
4699 tw32_f(MAC_MODE, tp->mac_mode);
4702 tg3_phy_eee_adjust(tp, current_link_up);
4704 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4705 /* Polled via timer. */
4706 tw32_f(MAC_EVENT, 0);
4708 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4712 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4713 current_link_up == 1 &&
4714 tp->link_config.active_speed == SPEED_1000 &&
4715 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4718 (MAC_STATUS_SYNC_CHANGED |
4719 MAC_STATUS_CFG_CHANGED));
4722 NIC_SRAM_FIRMWARE_MBOX,
4723 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4726 /* Prevent send BD corruption. */
4727 if (tg3_flag(tp, CLKREQ_BUG)) {
4728 if (tp->link_config.active_speed == SPEED_100 ||
4729 tp->link_config.active_speed == SPEED_10)
4730 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4731 PCI_EXP_LNKCTL_CLKREQ_EN);
4733 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4734 PCI_EXP_LNKCTL_CLKREQ_EN);
4737 tg3_test_and_report_link_chg(tp, current_link_up);
4742 struct tg3_fiber_aneginfo {
4744 #define ANEG_STATE_UNKNOWN 0
4745 #define ANEG_STATE_AN_ENABLE 1
4746 #define ANEG_STATE_RESTART_INIT 2
4747 #define ANEG_STATE_RESTART 3
4748 #define ANEG_STATE_DISABLE_LINK_OK 4
4749 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4750 #define ANEG_STATE_ABILITY_DETECT 6
4751 #define ANEG_STATE_ACK_DETECT_INIT 7
4752 #define ANEG_STATE_ACK_DETECT 8
4753 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4754 #define ANEG_STATE_COMPLETE_ACK 10
4755 #define ANEG_STATE_IDLE_DETECT_INIT 11
4756 #define ANEG_STATE_IDLE_DETECT 12
4757 #define ANEG_STATE_LINK_OK 13
4758 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4759 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4762 #define MR_AN_ENABLE 0x00000001
4763 #define MR_RESTART_AN 0x00000002
4764 #define MR_AN_COMPLETE 0x00000004
4765 #define MR_PAGE_RX 0x00000008
4766 #define MR_NP_LOADED 0x00000010
4767 #define MR_TOGGLE_TX 0x00000020
4768 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4769 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4770 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4771 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4772 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4773 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4774 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4775 #define MR_TOGGLE_RX 0x00002000
4776 #define MR_NP_RX 0x00004000
4778 #define MR_LINK_OK 0x80000000
4780 unsigned long link_time, cur_time;
4782 u32 ability_match_cfg;
4783 int ability_match_count;
4785 char ability_match, idle_match, ack_match;
4787 u32 txconfig, rxconfig;
4788 #define ANEG_CFG_NP 0x00000080
4789 #define ANEG_CFG_ACK 0x00000040
4790 #define ANEG_CFG_RF2 0x00000020
4791 #define ANEG_CFG_RF1 0x00000010
4792 #define ANEG_CFG_PS2 0x00000001
4793 #define ANEG_CFG_PS1 0x00008000
4794 #define ANEG_CFG_HD 0x00004000
4795 #define ANEG_CFG_FD 0x00002000
4796 #define ANEG_CFG_INVAL 0x00001f06
4801 #define ANEG_TIMER_ENAB 2
4802 #define ANEG_FAILED -1
4804 #define ANEG_STATE_SETTLE_TIME 10000
4806 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4807 struct tg3_fiber_aneginfo *ap)
4810 unsigned long delta;
4814 if (ap->state == ANEG_STATE_UNKNOWN) {
4818 ap->ability_match_cfg = 0;
4819 ap->ability_match_count = 0;
4820 ap->ability_match = 0;
4826 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4827 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4829 if (rx_cfg_reg != ap->ability_match_cfg) {
4830 ap->ability_match_cfg = rx_cfg_reg;
4831 ap->ability_match = 0;
4832 ap->ability_match_count = 0;
4834 if (++ap->ability_match_count > 1) {
4835 ap->ability_match = 1;
4836 ap->ability_match_cfg = rx_cfg_reg;
4839 if (rx_cfg_reg & ANEG_CFG_ACK)
4847 ap->ability_match_cfg = 0;
4848 ap->ability_match_count = 0;
4849 ap->ability_match = 0;
4855 ap->rxconfig = rx_cfg_reg;
4858 switch (ap->state) {
4859 case ANEG_STATE_UNKNOWN:
4860 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4861 ap->state = ANEG_STATE_AN_ENABLE;
4864 case ANEG_STATE_AN_ENABLE:
4865 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4866 if (ap->flags & MR_AN_ENABLE) {
4869 ap->ability_match_cfg = 0;
4870 ap->ability_match_count = 0;
4871 ap->ability_match = 0;
4875 ap->state = ANEG_STATE_RESTART_INIT;
4877 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4881 case ANEG_STATE_RESTART_INIT:
4882 ap->link_time = ap->cur_time;
4883 ap->flags &= ~(MR_NP_LOADED);
4885 tw32(MAC_TX_AUTO_NEG, 0);
4886 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4887 tw32_f(MAC_MODE, tp->mac_mode);
4890 ret = ANEG_TIMER_ENAB;
4891 ap->state = ANEG_STATE_RESTART;
4894 case ANEG_STATE_RESTART:
4895 delta = ap->cur_time - ap->link_time;
4896 if (delta > ANEG_STATE_SETTLE_TIME)
4897 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4899 ret = ANEG_TIMER_ENAB;
4902 case ANEG_STATE_DISABLE_LINK_OK:
4906 case ANEG_STATE_ABILITY_DETECT_INIT:
4907 ap->flags &= ~(MR_TOGGLE_TX);
4908 ap->txconfig = ANEG_CFG_FD;
4909 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4910 if (flowctrl & ADVERTISE_1000XPAUSE)
4911 ap->txconfig |= ANEG_CFG_PS1;
4912 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4913 ap->txconfig |= ANEG_CFG_PS2;
4914 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4915 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4916 tw32_f(MAC_MODE, tp->mac_mode);
4919 ap->state = ANEG_STATE_ABILITY_DETECT;
4922 case ANEG_STATE_ABILITY_DETECT:
4923 if (ap->ability_match != 0 && ap->rxconfig != 0)
4924 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4927 case ANEG_STATE_ACK_DETECT_INIT:
4928 ap->txconfig |= ANEG_CFG_ACK;
4929 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4930 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4931 tw32_f(MAC_MODE, tp->mac_mode);
4934 ap->state = ANEG_STATE_ACK_DETECT;
4937 case ANEG_STATE_ACK_DETECT:
4938 if (ap->ack_match != 0) {
4939 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4940 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4941 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4943 ap->state = ANEG_STATE_AN_ENABLE;
4945 } else if (ap->ability_match != 0 &&
4946 ap->rxconfig == 0) {
4947 ap->state = ANEG_STATE_AN_ENABLE;
4951 case ANEG_STATE_COMPLETE_ACK_INIT:
4952 if (ap->rxconfig & ANEG_CFG_INVAL) {
4956 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4957 MR_LP_ADV_HALF_DUPLEX |
4958 MR_LP_ADV_SYM_PAUSE |
4959 MR_LP_ADV_ASYM_PAUSE |
4960 MR_LP_ADV_REMOTE_FAULT1 |
4961 MR_LP_ADV_REMOTE_FAULT2 |
4962 MR_LP_ADV_NEXT_PAGE |
4965 if (ap->rxconfig & ANEG_CFG_FD)
4966 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4967 if (ap->rxconfig & ANEG_CFG_HD)
4968 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4969 if (ap->rxconfig & ANEG_CFG_PS1)
4970 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4971 if (ap->rxconfig & ANEG_CFG_PS2)
4972 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4973 if (ap->rxconfig & ANEG_CFG_RF1)
4974 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4975 if (ap->rxconfig & ANEG_CFG_RF2)
4976 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4977 if (ap->rxconfig & ANEG_CFG_NP)
4978 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4980 ap->link_time = ap->cur_time;
4982 ap->flags ^= (MR_TOGGLE_TX);
4983 if (ap->rxconfig & 0x0008)
4984 ap->flags |= MR_TOGGLE_RX;
4985 if (ap->rxconfig & ANEG_CFG_NP)
4986 ap->flags |= MR_NP_RX;
4987 ap->flags |= MR_PAGE_RX;
4989 ap->state = ANEG_STATE_COMPLETE_ACK;
4990 ret = ANEG_TIMER_ENAB;
4993 case ANEG_STATE_COMPLETE_ACK:
4994 if (ap->ability_match != 0 &&
4995 ap->rxconfig == 0) {
4996 ap->state = ANEG_STATE_AN_ENABLE;
4999 delta = ap->cur_time - ap->link_time;
5000 if (delta > ANEG_STATE_SETTLE_TIME) {
5001 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5002 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5004 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5005 !(ap->flags & MR_NP_RX)) {
5006 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5014 case ANEG_STATE_IDLE_DETECT_INIT:
5015 ap->link_time = ap->cur_time;
5016 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5017 tw32_f(MAC_MODE, tp->mac_mode);
5020 ap->state = ANEG_STATE_IDLE_DETECT;
5021 ret = ANEG_TIMER_ENAB;
5024 case ANEG_STATE_IDLE_DETECT:
5025 if (ap->ability_match != 0 &&
5026 ap->rxconfig == 0) {
5027 ap->state = ANEG_STATE_AN_ENABLE;
5030 delta = ap->cur_time - ap->link_time;
5031 if (delta > ANEG_STATE_SETTLE_TIME) {
5032 /* XXX another gem from the Broadcom driver :( */
5033 ap->state = ANEG_STATE_LINK_OK;
5037 case ANEG_STATE_LINK_OK:
5038 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5042 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5043 /* ??? unimplemented */
5046 case ANEG_STATE_NEXT_PAGE_WAIT:
5047 /* ??? unimplemented */
5058 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5061 struct tg3_fiber_aneginfo aninfo;
5062 int status = ANEG_FAILED;
5066 tw32_f(MAC_TX_AUTO_NEG, 0);
5068 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5069 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5072 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5075 memset(&aninfo, 0, sizeof(aninfo));
5076 aninfo.flags |= MR_AN_ENABLE;
5077 aninfo.state = ANEG_STATE_UNKNOWN;
5078 aninfo.cur_time = 0;
5080 while (++tick < 195000) {
5081 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5082 if (status == ANEG_DONE || status == ANEG_FAILED)
5088 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5089 tw32_f(MAC_MODE, tp->mac_mode);
5092 *txflags = aninfo.txconfig;
5093 *rxflags = aninfo.flags;
5095 if (status == ANEG_DONE &&
5096 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5097 MR_LP_ADV_FULL_DUPLEX)))
5103 static void tg3_init_bcm8002(struct tg3 *tp)
5105 u32 mac_status = tr32(MAC_STATUS);
5108 /* Reset when initting first time or we have a link. */
5109 if (tg3_flag(tp, INIT_COMPLETE) &&
5110 !(mac_status & MAC_STATUS_PCS_SYNCED))
5113 /* Set PLL lock range. */
5114 tg3_writephy(tp, 0x16, 0x8007);
5117 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5119 /* Wait for reset to complete. */
5120 /* XXX schedule_timeout() ... */
5121 for (i = 0; i < 500; i++)
5124 /* Config mode; select PMA/Ch 1 regs. */
5125 tg3_writephy(tp, 0x10, 0x8411);
5127 /* Enable auto-lock and comdet, select txclk for tx. */
5128 tg3_writephy(tp, 0x11, 0x0a10);
5130 tg3_writephy(tp, 0x18, 0x00a0);
5131 tg3_writephy(tp, 0x16, 0x41ff);
5133 /* Assert and deassert POR. */
5134 tg3_writephy(tp, 0x13, 0x0400);
5136 tg3_writephy(tp, 0x13, 0x0000);
5138 tg3_writephy(tp, 0x11, 0x0a50);
5140 tg3_writephy(tp, 0x11, 0x0a10);
5142 /* Wait for signal to stabilize */
5143 /* XXX schedule_timeout() ... */
5144 for (i = 0; i < 15000; i++)
5147 /* Deselect the channel register so we can read the PHYID
5150 tg3_writephy(tp, 0x10, 0x8011);
5153 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5156 u32 sg_dig_ctrl, sg_dig_status;
5157 u32 serdes_cfg, expected_sg_dig_ctrl;
5158 int workaround, port_a;
5159 int current_link_up;
5162 expected_sg_dig_ctrl = 0;
5165 current_link_up = 0;
5167 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5168 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5170 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5173 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5174 /* preserve bits 20-23 for voltage regulator */
5175 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5178 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5180 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5181 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5183 u32 val = serdes_cfg;
5189 tw32_f(MAC_SERDES_CFG, val);
5192 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5194 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5195 tg3_setup_flow_control(tp, 0, 0);
5196 current_link_up = 1;
5201 /* Want auto-negotiation. */
5202 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5204 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5205 if (flowctrl & ADVERTISE_1000XPAUSE)
5206 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5207 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5208 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5210 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5211 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5212 tp->serdes_counter &&
5213 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5214 MAC_STATUS_RCVD_CFG)) ==
5215 MAC_STATUS_PCS_SYNCED)) {
5216 tp->serdes_counter--;
5217 current_link_up = 1;
5222 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5223 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5225 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5227 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5228 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5229 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5230 MAC_STATUS_SIGNAL_DET)) {
5231 sg_dig_status = tr32(SG_DIG_STATUS);
5232 mac_status = tr32(MAC_STATUS);
5234 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5235 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5236 u32 local_adv = 0, remote_adv = 0;
5238 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5239 local_adv |= ADVERTISE_1000XPAUSE;
5240 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5241 local_adv |= ADVERTISE_1000XPSE_ASYM;
5243 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5244 remote_adv |= LPA_1000XPAUSE;
5245 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5246 remote_adv |= LPA_1000XPAUSE_ASYM;
5248 tp->link_config.rmt_adv =
5249 mii_adv_to_ethtool_adv_x(remote_adv);
5251 tg3_setup_flow_control(tp, local_adv, remote_adv);
5252 current_link_up = 1;
5253 tp->serdes_counter = 0;
5254 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5255 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5256 if (tp->serdes_counter)
5257 tp->serdes_counter--;
5260 u32 val = serdes_cfg;
5267 tw32_f(MAC_SERDES_CFG, val);
5270 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5273 /* Link parallel detection - link is up */
5274 /* only if we have PCS_SYNC and not */
5275 /* receiving config code words */
5276 mac_status = tr32(MAC_STATUS);
5277 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5278 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5279 tg3_setup_flow_control(tp, 0, 0);
5280 current_link_up = 1;
5282 TG3_PHYFLG_PARALLEL_DETECT;
5283 tp->serdes_counter =
5284 SERDES_PARALLEL_DET_TIMEOUT;
5286 goto restart_autoneg;
5290 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5291 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5295 return current_link_up;
5298 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5300 int current_link_up = 0;
5302 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5305 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5306 u32 txflags, rxflags;
5309 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5310 u32 local_adv = 0, remote_adv = 0;
5312 if (txflags & ANEG_CFG_PS1)
5313 local_adv |= ADVERTISE_1000XPAUSE;
5314 if (txflags & ANEG_CFG_PS2)
5315 local_adv |= ADVERTISE_1000XPSE_ASYM;
5317 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5318 remote_adv |= LPA_1000XPAUSE;
5319 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5320 remote_adv |= LPA_1000XPAUSE_ASYM;
5322 tp->link_config.rmt_adv =
5323 mii_adv_to_ethtool_adv_x(remote_adv);
5325 tg3_setup_flow_control(tp, local_adv, remote_adv);
5327 current_link_up = 1;
5329 for (i = 0; i < 30; i++) {
5332 (MAC_STATUS_SYNC_CHANGED |
5333 MAC_STATUS_CFG_CHANGED));
5335 if ((tr32(MAC_STATUS) &
5336 (MAC_STATUS_SYNC_CHANGED |
5337 MAC_STATUS_CFG_CHANGED)) == 0)
5341 mac_status = tr32(MAC_STATUS);
5342 if (current_link_up == 0 &&
5343 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5344 !(mac_status & MAC_STATUS_RCVD_CFG))
5345 current_link_up = 1;
5347 tg3_setup_flow_control(tp, 0, 0);
5349 /* Forcing 1000FD link up. */
5350 current_link_up = 1;
5352 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5355 tw32_f(MAC_MODE, tp->mac_mode);
5360 return current_link_up;
5363 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5366 u16 orig_active_speed;
5367 u8 orig_active_duplex;
5369 int current_link_up;
5372 orig_pause_cfg = tp->link_config.active_flowctrl;
5373 orig_active_speed = tp->link_config.active_speed;
5374 orig_active_duplex = tp->link_config.active_duplex;
5376 if (!tg3_flag(tp, HW_AUTONEG) &&
5378 tg3_flag(tp, INIT_COMPLETE)) {
5379 mac_status = tr32(MAC_STATUS);
5380 mac_status &= (MAC_STATUS_PCS_SYNCED |
5381 MAC_STATUS_SIGNAL_DET |
5382 MAC_STATUS_CFG_CHANGED |
5383 MAC_STATUS_RCVD_CFG);
5384 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5385 MAC_STATUS_SIGNAL_DET)) {
5386 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5387 MAC_STATUS_CFG_CHANGED));
5392 tw32_f(MAC_TX_AUTO_NEG, 0);
5394 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5395 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5396 tw32_f(MAC_MODE, tp->mac_mode);
5399 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5400 tg3_init_bcm8002(tp);
5402 /* Enable link change event even when serdes polling. */
5403 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5406 current_link_up = 0;
5407 tp->link_config.rmt_adv = 0;
5408 mac_status = tr32(MAC_STATUS);
5410 if (tg3_flag(tp, HW_AUTONEG))
5411 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5413 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5415 tp->napi[0].hw_status->status =
5416 (SD_STATUS_UPDATED |
5417 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5419 for (i = 0; i < 100; i++) {
5420 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5421 MAC_STATUS_CFG_CHANGED));
5423 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5424 MAC_STATUS_CFG_CHANGED |
5425 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5429 mac_status = tr32(MAC_STATUS);
5430 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5431 current_link_up = 0;
5432 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5433 tp->serdes_counter == 0) {
5434 tw32_f(MAC_MODE, (tp->mac_mode |
5435 MAC_MODE_SEND_CONFIGS));
5437 tw32_f(MAC_MODE, tp->mac_mode);
5441 if (current_link_up == 1) {
5442 tp->link_config.active_speed = SPEED_1000;
5443 tp->link_config.active_duplex = DUPLEX_FULL;
5444 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5445 LED_CTRL_LNKLED_OVERRIDE |
5446 LED_CTRL_1000MBPS_ON));
5448 tp->link_config.active_speed = SPEED_UNKNOWN;
5449 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5450 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5451 LED_CTRL_LNKLED_OVERRIDE |
5452 LED_CTRL_TRAFFIC_OVERRIDE));
5455 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5456 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5457 if (orig_pause_cfg != now_pause_cfg ||
5458 orig_active_speed != tp->link_config.active_speed ||
5459 orig_active_duplex != tp->link_config.active_duplex)
5460 tg3_link_report(tp);
5466 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5468 int current_link_up = 0, err = 0;
5470 u16 current_speed = SPEED_UNKNOWN;
5471 u8 current_duplex = DUPLEX_UNKNOWN;
5472 u32 local_adv, remote_adv, sgsr;
5474 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5475 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5476 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5477 (sgsr & SERDES_TG3_SGMII_MODE)) {
5482 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5484 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5485 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5487 current_link_up = 1;
5488 if (sgsr & SERDES_TG3_SPEED_1000) {
5489 current_speed = SPEED_1000;
5490 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5491 } else if (sgsr & SERDES_TG3_SPEED_100) {
5492 current_speed = SPEED_100;
5493 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5495 current_speed = SPEED_10;
5496 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5499 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5500 current_duplex = DUPLEX_FULL;
5502 current_duplex = DUPLEX_HALF;
5505 tw32_f(MAC_MODE, tp->mac_mode);
5508 tg3_clear_mac_status(tp);
5510 goto fiber_setup_done;
5513 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5514 tw32_f(MAC_MODE, tp->mac_mode);
5517 tg3_clear_mac_status(tp);
5522 tp->link_config.rmt_adv = 0;
5524 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5525 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5526 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5527 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5528 bmsr |= BMSR_LSTATUS;
5530 bmsr &= ~BMSR_LSTATUS;
5533 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5535 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5536 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5537 /* do nothing, just check for link up at the end */
5538 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5541 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5542 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5543 ADVERTISE_1000XPAUSE |
5544 ADVERTISE_1000XPSE_ASYM |
5547 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5550 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5551 tg3_writephy(tp, MII_ADVERTISE, newadv);
5552 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5553 tg3_writephy(tp, MII_BMCR, bmcr);
5555 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5556 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5557 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5564 bmcr &= ~BMCR_SPEED1000;
5565 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5567 if (tp->link_config.duplex == DUPLEX_FULL)
5568 new_bmcr |= BMCR_FULLDPLX;
5570 if (new_bmcr != bmcr) {
5571 /* BMCR_SPEED1000 is a reserved bit that needs
5572 * to be set on write.
5574 new_bmcr |= BMCR_SPEED1000;
5576 /* Force a linkdown */
5580 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5581 adv &= ~(ADVERTISE_1000XFULL |
5582 ADVERTISE_1000XHALF |
5584 tg3_writephy(tp, MII_ADVERTISE, adv);
5585 tg3_writephy(tp, MII_BMCR, bmcr |
5589 tg3_carrier_off(tp);
5591 tg3_writephy(tp, MII_BMCR, new_bmcr);
5593 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5594 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5595 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5596 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5597 bmsr |= BMSR_LSTATUS;
5599 bmsr &= ~BMSR_LSTATUS;
5601 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5605 if (bmsr & BMSR_LSTATUS) {
5606 current_speed = SPEED_1000;
5607 current_link_up = 1;
5608 if (bmcr & BMCR_FULLDPLX)
5609 current_duplex = DUPLEX_FULL;
5611 current_duplex = DUPLEX_HALF;
5616 if (bmcr & BMCR_ANENABLE) {
5619 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5620 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5621 common = local_adv & remote_adv;
5622 if (common & (ADVERTISE_1000XHALF |
5623 ADVERTISE_1000XFULL)) {
5624 if (common & ADVERTISE_1000XFULL)
5625 current_duplex = DUPLEX_FULL;
5627 current_duplex = DUPLEX_HALF;
5629 tp->link_config.rmt_adv =
5630 mii_adv_to_ethtool_adv_x(remote_adv);
5631 } else if (!tg3_flag(tp, 5780_CLASS)) {
5632 /* Link is up via parallel detect */
5634 current_link_up = 0;
5640 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5641 tg3_setup_flow_control(tp, local_adv, remote_adv);
5643 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5644 if (tp->link_config.active_duplex == DUPLEX_HALF)
5645 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5647 tw32_f(MAC_MODE, tp->mac_mode);
5650 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5652 tp->link_config.active_speed = current_speed;
5653 tp->link_config.active_duplex = current_duplex;
5655 tg3_test_and_report_link_chg(tp, current_link_up);
5659 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5661 if (tp->serdes_counter) {
5662 /* Give autoneg time to complete. */
5663 tp->serdes_counter--;
5668 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5671 tg3_readphy(tp, MII_BMCR, &bmcr);
5672 if (bmcr & BMCR_ANENABLE) {
5675 /* Select shadow register 0x1f */
5676 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5677 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5679 /* Select expansion interrupt status register */
5680 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5681 MII_TG3_DSP_EXP1_INT_STAT);
5682 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5683 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5685 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5686 /* We have signal detect and not receiving
5687 * config code words, link is up by parallel
5691 bmcr &= ~BMCR_ANENABLE;
5692 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5693 tg3_writephy(tp, MII_BMCR, bmcr);
5694 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5697 } else if (tp->link_up &&
5698 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5699 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5702 /* Select expansion interrupt status register */
5703 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5704 MII_TG3_DSP_EXP1_INT_STAT);
5705 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5709 /* Config code words received, turn on autoneg. */
5710 tg3_readphy(tp, MII_BMCR, &bmcr);
5711 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5713 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5719 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5724 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5725 err = tg3_setup_fiber_phy(tp, force_reset);
5726 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5727 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5729 err = tg3_setup_copper_phy(tp, force_reset);
5731 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5734 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5735 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5737 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5742 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5743 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5744 tw32(GRC_MISC_CFG, val);
5747 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5748 (6 << TX_LENGTHS_IPG_SHIFT);
5749 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5750 tg3_asic_rev(tp) == ASIC_REV_5762)
5751 val |= tr32(MAC_TX_LENGTHS) &
5752 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5753 TX_LENGTHS_CNT_DWN_VAL_MSK);
5755 if (tp->link_config.active_speed == SPEED_1000 &&
5756 tp->link_config.active_duplex == DUPLEX_HALF)
5757 tw32(MAC_TX_LENGTHS, val |
5758 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5760 tw32(MAC_TX_LENGTHS, val |
5761 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5763 if (!tg3_flag(tp, 5705_PLUS)) {
5765 tw32(HOSTCC_STAT_COAL_TICKS,
5766 tp->coal.stats_block_coalesce_usecs);
5768 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5772 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5773 val = tr32(PCIE_PWR_MGMT_THRESH);
5775 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5778 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5779 tw32(PCIE_PWR_MGMT_THRESH, val);
5785 /* tp->lock must be held */
5786 static u64 tg3_refclk_read(struct tg3 *tp)
5788 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5789 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5792 /* tp->lock must be held */
5793 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5795 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5796 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5797 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5798 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5801 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5802 static inline void tg3_full_unlock(struct tg3 *tp);
5803 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5805 struct tg3 *tp = netdev_priv(dev);
5807 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5808 SOF_TIMESTAMPING_RX_SOFTWARE |
5809 SOF_TIMESTAMPING_SOFTWARE |
5810 SOF_TIMESTAMPING_TX_HARDWARE |
5811 SOF_TIMESTAMPING_RX_HARDWARE |
5812 SOF_TIMESTAMPING_RAW_HARDWARE;
5815 info->phc_index = ptp_clock_index(tp->ptp_clock);
5817 info->phc_index = -1;
5819 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5821 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5822 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5823 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5824 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5828 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5830 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5831 bool neg_adj = false;
5839 /* Frequency adjustment is performed using hardware with a 24 bit
5840 * accumulator and a programmable correction value. On each clk, the
5841 * correction value gets added to the accumulator and when it
5842 * overflows, the time counter is incremented/decremented.
5844 * So conversion from ppb to correction value is
5845 * ppb * (1 << 24) / 1000000000
5847 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5848 TG3_EAV_REF_CLK_CORRECT_MASK;
5850 tg3_full_lock(tp, 0);
5853 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5854 TG3_EAV_REF_CLK_CORRECT_EN |
5855 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5857 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5859 tg3_full_unlock(tp);
5864 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5866 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5868 tg3_full_lock(tp, 0);
5869 tp->ptp_adjust += delta;
5870 tg3_full_unlock(tp);
5875 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5879 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5881 tg3_full_lock(tp, 0);
5882 ns = tg3_refclk_read(tp);
5883 ns += tp->ptp_adjust;
5884 tg3_full_unlock(tp);
5886 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5887 ts->tv_nsec = remainder;
5892 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5893 const struct timespec *ts)
5896 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5898 ns = timespec_to_ns(ts);
5900 tg3_full_lock(tp, 0);
5901 tg3_refclk_write(tp, ns);
5903 tg3_full_unlock(tp);
5908 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5909 struct ptp_clock_request *rq, int on)
5914 static const struct ptp_clock_info tg3_ptp_caps = {
5915 .owner = THIS_MODULE,
5916 .name = "tg3 clock",
5917 .max_adj = 250000000,
5922 .adjfreq = tg3_ptp_adjfreq,
5923 .adjtime = tg3_ptp_adjtime,
5924 .gettime = tg3_ptp_gettime,
5925 .settime = tg3_ptp_settime,
5926 .enable = tg3_ptp_enable,
5929 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5930 struct skb_shared_hwtstamps *timestamp)
5932 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5933 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5937 /* tp->lock must be held */
5938 static void tg3_ptp_init(struct tg3 *tp)
5940 if (!tg3_flag(tp, PTP_CAPABLE))
5943 /* Initialize the hardware clock to the system time. */
5944 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5946 tp->ptp_info = tg3_ptp_caps;
5949 /* tp->lock must be held */
5950 static void tg3_ptp_resume(struct tg3 *tp)
5952 if (!tg3_flag(tp, PTP_CAPABLE))
5955 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5959 static void tg3_ptp_fini(struct tg3 *tp)
5961 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5964 ptp_clock_unregister(tp->ptp_clock);
5965 tp->ptp_clock = NULL;
5969 static inline int tg3_irq_sync(struct tg3 *tp)
5971 return tp->irq_sync;
5974 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5978 dst = (u32 *)((u8 *)dst + off);
5979 for (i = 0; i < len; i += sizeof(u32))
5980 *dst++ = tr32(off + i);
5983 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5985 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5986 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5987 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5988 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5989 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5990 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5991 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5992 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5993 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5994 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5995 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5996 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5997 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5998 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5999 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6000 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6001 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6002 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6003 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6005 if (tg3_flag(tp, SUPPORT_MSIX))
6006 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6008 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6009 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6010 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6011 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6012 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6013 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6014 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6015 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6017 if (!tg3_flag(tp, 5705_PLUS)) {
6018 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6019 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6020 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6023 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6024 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6025 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6026 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6027 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6029 if (tg3_flag(tp, NVRAM))
6030 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6033 static void tg3_dump_state(struct tg3 *tp)
6038 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6042 if (tg3_flag(tp, PCI_EXPRESS)) {
6043 /* Read up to but not including private PCI registers */
6044 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6045 regs[i / sizeof(u32)] = tr32(i);
6047 tg3_dump_legacy_regs(tp, regs);
6049 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6050 if (!regs[i + 0] && !regs[i + 1] &&
6051 !regs[i + 2] && !regs[i + 3])
6054 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6056 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6061 for (i = 0; i < tp->irq_cnt; i++) {
6062 struct tg3_napi *tnapi = &tp->napi[i];
6064 /* SW status block */
6066 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6068 tnapi->hw_status->status,
6069 tnapi->hw_status->status_tag,
6070 tnapi->hw_status->rx_jumbo_consumer,
6071 tnapi->hw_status->rx_consumer,
6072 tnapi->hw_status->rx_mini_consumer,
6073 tnapi->hw_status->idx[0].rx_producer,
6074 tnapi->hw_status->idx[0].tx_consumer);
6077 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6079 tnapi->last_tag, tnapi->last_irq_tag,
6080 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6082 tnapi->prodring.rx_std_prod_idx,
6083 tnapi->prodring.rx_std_cons_idx,
6084 tnapi->prodring.rx_jmb_prod_idx,
6085 tnapi->prodring.rx_jmb_cons_idx);
6089 /* This is called whenever we suspect that the system chipset is re-
6090 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6091 * is bogus tx completions. We try to recover by setting the
6092 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6095 static void tg3_tx_recover(struct tg3 *tp)
6097 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6098 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6100 netdev_warn(tp->dev,
6101 "The system may be re-ordering memory-mapped I/O "
6102 "cycles to the network device, attempting to recover. "
6103 "Please report the problem to the driver maintainer "
6104 "and include system chipset information.\n");
6106 spin_lock(&tp->lock);
6107 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6108 spin_unlock(&tp->lock);
6111 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6113 /* Tell compiler to fetch tx indices from memory. */
6115 return tnapi->tx_pending -
6116 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6119 /* Tigon3 never reports partial packet sends. So we do not
6120 * need special logic to handle SKBs that have not had all
6121 * of their frags sent yet, like SunGEM does.
6123 static void tg3_tx(struct tg3_napi *tnapi)
6125 struct tg3 *tp = tnapi->tp;
6126 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6127 u32 sw_idx = tnapi->tx_cons;
6128 struct netdev_queue *txq;
6129 int index = tnapi - tp->napi;
6130 unsigned int pkts_compl = 0, bytes_compl = 0;
6132 if (tg3_flag(tp, ENABLE_TSS))
6135 txq = netdev_get_tx_queue(tp->dev, index);
6137 while (sw_idx != hw_idx) {
6138 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6139 struct sk_buff *skb = ri->skb;
6142 if (unlikely(skb == NULL)) {
6147 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6148 struct skb_shared_hwtstamps timestamp;
6149 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6150 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6152 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6154 skb_tstamp_tx(skb, ×tamp);
6157 pci_unmap_single(tp->pdev,
6158 dma_unmap_addr(ri, mapping),
6164 while (ri->fragmented) {
6165 ri->fragmented = false;
6166 sw_idx = NEXT_TX(sw_idx);
6167 ri = &tnapi->tx_buffers[sw_idx];
6170 sw_idx = NEXT_TX(sw_idx);
6172 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6173 ri = &tnapi->tx_buffers[sw_idx];
6174 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6177 pci_unmap_page(tp->pdev,
6178 dma_unmap_addr(ri, mapping),
6179 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6182 while (ri->fragmented) {
6183 ri->fragmented = false;
6184 sw_idx = NEXT_TX(sw_idx);
6185 ri = &tnapi->tx_buffers[sw_idx];
6188 sw_idx = NEXT_TX(sw_idx);
6192 bytes_compl += skb->len;
6196 if (unlikely(tx_bug)) {
6202 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6204 tnapi->tx_cons = sw_idx;
6206 /* Need to make the tx_cons update visible to tg3_start_xmit()
6207 * before checking for netif_queue_stopped(). Without the
6208 * memory barrier, there is a small possibility that tg3_start_xmit()
6209 * will miss it and cause the queue to be stopped forever.
6213 if (unlikely(netif_tx_queue_stopped(txq) &&
6214 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6215 __netif_tx_lock(txq, smp_processor_id());
6216 if (netif_tx_queue_stopped(txq) &&
6217 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6218 netif_tx_wake_queue(txq);
6219 __netif_tx_unlock(txq);
6223 static void tg3_frag_free(bool is_frag, void *data)
6226 put_page(virt_to_head_page(data));
6231 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6233 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6234 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6239 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6240 map_sz, PCI_DMA_FROMDEVICE);
6241 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6246 /* Returns size of skb allocated or < 0 on error.
6248 * We only need to fill in the address because the other members
6249 * of the RX descriptor are invariant, see tg3_init_rings.
6251 * Note the purposeful assymetry of cpu vs. chip accesses. For
6252 * posting buffers we only dirty the first cache line of the RX
6253 * descriptor (containing the address). Whereas for the RX status
6254 * buffers the cpu only reads the last cacheline of the RX descriptor
6255 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6257 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6258 u32 opaque_key, u32 dest_idx_unmasked,
6259 unsigned int *frag_size)
6261 struct tg3_rx_buffer_desc *desc;
6262 struct ring_info *map;
6265 int skb_size, data_size, dest_idx;
6267 switch (opaque_key) {
6268 case RXD_OPAQUE_RING_STD:
6269 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6270 desc = &tpr->rx_std[dest_idx];
6271 map = &tpr->rx_std_buffers[dest_idx];
6272 data_size = tp->rx_pkt_map_sz;
6275 case RXD_OPAQUE_RING_JUMBO:
6276 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6277 desc = &tpr->rx_jmb[dest_idx].std;
6278 map = &tpr->rx_jmb_buffers[dest_idx];
6279 data_size = TG3_RX_JMB_MAP_SZ;
6286 /* Do not overwrite any of the map or rp information
6287 * until we are sure we can commit to a new buffer.
6289 * Callers depend upon this behavior and assume that
6290 * we leave everything unchanged if we fail.
6292 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6293 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6294 if (skb_size <= PAGE_SIZE) {
6295 data = netdev_alloc_frag(skb_size);
6296 *frag_size = skb_size;
6298 data = kmalloc(skb_size, GFP_ATOMIC);
6304 mapping = pci_map_single(tp->pdev,
6305 data + TG3_RX_OFFSET(tp),
6307 PCI_DMA_FROMDEVICE);
6308 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6309 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6314 dma_unmap_addr_set(map, mapping, mapping);
6316 desc->addr_hi = ((u64)mapping >> 32);
6317 desc->addr_lo = ((u64)mapping & 0xffffffff);
6322 /* We only need to move over in the address because the other
6323 * members of the RX descriptor are invariant. See notes above
6324 * tg3_alloc_rx_data for full details.
6326 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6327 struct tg3_rx_prodring_set *dpr,
6328 u32 opaque_key, int src_idx,
6329 u32 dest_idx_unmasked)
6331 struct tg3 *tp = tnapi->tp;
6332 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6333 struct ring_info *src_map, *dest_map;
6334 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6337 switch (opaque_key) {
6338 case RXD_OPAQUE_RING_STD:
6339 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6340 dest_desc = &dpr->rx_std[dest_idx];
6341 dest_map = &dpr->rx_std_buffers[dest_idx];
6342 src_desc = &spr->rx_std[src_idx];
6343 src_map = &spr->rx_std_buffers[src_idx];
6346 case RXD_OPAQUE_RING_JUMBO:
6347 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6348 dest_desc = &dpr->rx_jmb[dest_idx].std;
6349 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6350 src_desc = &spr->rx_jmb[src_idx].std;
6351 src_map = &spr->rx_jmb_buffers[src_idx];
6358 dest_map->data = src_map->data;
6359 dma_unmap_addr_set(dest_map, mapping,
6360 dma_unmap_addr(src_map, mapping));
6361 dest_desc->addr_hi = src_desc->addr_hi;
6362 dest_desc->addr_lo = src_desc->addr_lo;
6364 /* Ensure that the update to the skb happens after the physical
6365 * addresses have been transferred to the new BD location.
6369 src_map->data = NULL;
6372 /* The RX ring scheme is composed of multiple rings which post fresh
6373 * buffers to the chip, and one special ring the chip uses to report
6374 * status back to the host.
6376 * The special ring reports the status of received packets to the
6377 * host. The chip does not write into the original descriptor the
6378 * RX buffer was obtained from. The chip simply takes the original
6379 * descriptor as provided by the host, updates the status and length
6380 * field, then writes this into the next status ring entry.
6382 * Each ring the host uses to post buffers to the chip is described
6383 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6384 * it is first placed into the on-chip ram. When the packet's length
6385 * is known, it walks down the TG3_BDINFO entries to select the ring.
6386 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6387 * which is within the range of the new packet's length is chosen.
6389 * The "separate ring for rx status" scheme may sound queer, but it makes
6390 * sense from a cache coherency perspective. If only the host writes
6391 * to the buffer post rings, and only the chip writes to the rx status
6392 * rings, then cache lines never move beyond shared-modified state.
6393 * If both the host and chip were to write into the same ring, cache line
6394 * eviction could occur since both entities want it in an exclusive state.
6396 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6398 struct tg3 *tp = tnapi->tp;
6399 u32 work_mask, rx_std_posted = 0;
6400 u32 std_prod_idx, jmb_prod_idx;
6401 u32 sw_idx = tnapi->rx_rcb_ptr;
6404 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6406 hw_idx = *(tnapi->rx_rcb_prod_idx);
6408 * We need to order the read of hw_idx and the read of
6409 * the opaque cookie.
6414 std_prod_idx = tpr->rx_std_prod_idx;
6415 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6416 while (sw_idx != hw_idx && budget > 0) {
6417 struct ring_info *ri;
6418 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6420 struct sk_buff *skb;
6421 dma_addr_t dma_addr;
6422 u32 opaque_key, desc_idx, *post_ptr;
6426 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6427 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6428 if (opaque_key == RXD_OPAQUE_RING_STD) {
6429 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6430 dma_addr = dma_unmap_addr(ri, mapping);
6432 post_ptr = &std_prod_idx;
6434 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6435 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6436 dma_addr = dma_unmap_addr(ri, mapping);
6438 post_ptr = &jmb_prod_idx;
6440 goto next_pkt_nopost;
6442 work_mask |= opaque_key;
6444 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6445 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6447 tg3_recycle_rx(tnapi, tpr, opaque_key,
6448 desc_idx, *post_ptr);
6450 /* Other statistics kept track of by card. */
6455 prefetch(data + TG3_RX_OFFSET(tp));
6456 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6459 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6460 RXD_FLAG_PTPSTAT_PTPV1 ||
6461 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6462 RXD_FLAG_PTPSTAT_PTPV2) {
6463 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6464 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6467 if (len > TG3_RX_COPY_THRESH(tp)) {
6469 unsigned int frag_size;
6471 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6472 *post_ptr, &frag_size);
6476 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6477 PCI_DMA_FROMDEVICE);
6479 skb = build_skb(data, frag_size);
6481 tg3_frag_free(frag_size != 0, data);
6482 goto drop_it_no_recycle;
6484 skb_reserve(skb, TG3_RX_OFFSET(tp));
6485 /* Ensure that the update to the data happens
6486 * after the usage of the old DMA mapping.
6493 tg3_recycle_rx(tnapi, tpr, opaque_key,
6494 desc_idx, *post_ptr);
6496 skb = netdev_alloc_skb(tp->dev,
6497 len + TG3_RAW_IP_ALIGN);
6499 goto drop_it_no_recycle;
6501 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6502 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6504 data + TG3_RX_OFFSET(tp),
6506 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6511 tg3_hwclock_to_timestamp(tp, tstamp,
6512 skb_hwtstamps(skb));
6514 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6515 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6516 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6517 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6518 skb->ip_summed = CHECKSUM_UNNECESSARY;
6520 skb_checksum_none_assert(skb);
6522 skb->protocol = eth_type_trans(skb, tp->dev);
6524 if (len > (tp->dev->mtu + ETH_HLEN) &&
6525 skb->protocol != htons(ETH_P_8021Q)) {
6527 goto drop_it_no_recycle;
6530 if (desc->type_flags & RXD_FLAG_VLAN &&
6531 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6532 __vlan_hwaccel_put_tag(skb,
6533 desc->err_vlan & RXD_VLAN_MASK);
6535 napi_gro_receive(&tnapi->napi, skb);
6543 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6544 tpr->rx_std_prod_idx = std_prod_idx &
6545 tp->rx_std_ring_mask;
6546 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6547 tpr->rx_std_prod_idx);
6548 work_mask &= ~RXD_OPAQUE_RING_STD;
6553 sw_idx &= tp->rx_ret_ring_mask;
6555 /* Refresh hw_idx to see if there is new work */
6556 if (sw_idx == hw_idx) {
6557 hw_idx = *(tnapi->rx_rcb_prod_idx);
6562 /* ACK the status ring. */
6563 tnapi->rx_rcb_ptr = sw_idx;
6564 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6566 /* Refill RX ring(s). */
6567 if (!tg3_flag(tp, ENABLE_RSS)) {
6568 /* Sync BD data before updating mailbox */
6571 if (work_mask & RXD_OPAQUE_RING_STD) {
6572 tpr->rx_std_prod_idx = std_prod_idx &
6573 tp->rx_std_ring_mask;
6574 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6575 tpr->rx_std_prod_idx);
6577 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6578 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6579 tp->rx_jmb_ring_mask;
6580 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6581 tpr->rx_jmb_prod_idx);
6584 } else if (work_mask) {
6585 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6586 * updated before the producer indices can be updated.
6590 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6591 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6593 if (tnapi != &tp->napi[1]) {
6594 tp->rx_refill = true;
6595 napi_schedule(&tp->napi[1].napi);
6602 static void tg3_poll_link(struct tg3 *tp)
6604 /* handle link change and other phy events */
6605 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6606 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6608 if (sblk->status & SD_STATUS_LINK_CHG) {
6609 sblk->status = SD_STATUS_UPDATED |
6610 (sblk->status & ~SD_STATUS_LINK_CHG);
6611 spin_lock(&tp->lock);
6612 if (tg3_flag(tp, USE_PHYLIB)) {
6614 (MAC_STATUS_SYNC_CHANGED |
6615 MAC_STATUS_CFG_CHANGED |
6616 MAC_STATUS_MI_COMPLETION |
6617 MAC_STATUS_LNKSTATE_CHANGED));
6620 tg3_setup_phy(tp, 0);
6621 spin_unlock(&tp->lock);
6626 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6627 struct tg3_rx_prodring_set *dpr,
6628 struct tg3_rx_prodring_set *spr)
6630 u32 si, di, cpycnt, src_prod_idx;
6634 src_prod_idx = spr->rx_std_prod_idx;
6636 /* Make sure updates to the rx_std_buffers[] entries and the
6637 * standard producer index are seen in the correct order.
6641 if (spr->rx_std_cons_idx == src_prod_idx)
6644 if (spr->rx_std_cons_idx < src_prod_idx)
6645 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6647 cpycnt = tp->rx_std_ring_mask + 1 -
6648 spr->rx_std_cons_idx;
6650 cpycnt = min(cpycnt,
6651 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6653 si = spr->rx_std_cons_idx;
6654 di = dpr->rx_std_prod_idx;
6656 for (i = di; i < di + cpycnt; i++) {
6657 if (dpr->rx_std_buffers[i].data) {
6667 /* Ensure that updates to the rx_std_buffers ring and the
6668 * shadowed hardware producer ring from tg3_recycle_skb() are
6669 * ordered correctly WRT the skb check above.
6673 memcpy(&dpr->rx_std_buffers[di],
6674 &spr->rx_std_buffers[si],
6675 cpycnt * sizeof(struct ring_info));
6677 for (i = 0; i < cpycnt; i++, di++, si++) {
6678 struct tg3_rx_buffer_desc *sbd, *dbd;
6679 sbd = &spr->rx_std[si];
6680 dbd = &dpr->rx_std[di];
6681 dbd->addr_hi = sbd->addr_hi;
6682 dbd->addr_lo = sbd->addr_lo;
6685 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6686 tp->rx_std_ring_mask;
6687 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6688 tp->rx_std_ring_mask;
6692 src_prod_idx = spr->rx_jmb_prod_idx;
6694 /* Make sure updates to the rx_jmb_buffers[] entries and
6695 * the jumbo producer index are seen in the correct order.
6699 if (spr->rx_jmb_cons_idx == src_prod_idx)
6702 if (spr->rx_jmb_cons_idx < src_prod_idx)
6703 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6705 cpycnt = tp->rx_jmb_ring_mask + 1 -
6706 spr->rx_jmb_cons_idx;
6708 cpycnt = min(cpycnt,
6709 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6711 si = spr->rx_jmb_cons_idx;
6712 di = dpr->rx_jmb_prod_idx;
6714 for (i = di; i < di + cpycnt; i++) {
6715 if (dpr->rx_jmb_buffers[i].data) {
6725 /* Ensure that updates to the rx_jmb_buffers ring and the
6726 * shadowed hardware producer ring from tg3_recycle_skb() are
6727 * ordered correctly WRT the skb check above.
6731 memcpy(&dpr->rx_jmb_buffers[di],
6732 &spr->rx_jmb_buffers[si],
6733 cpycnt * sizeof(struct ring_info));
6735 for (i = 0; i < cpycnt; i++, di++, si++) {
6736 struct tg3_rx_buffer_desc *sbd, *dbd;
6737 sbd = &spr->rx_jmb[si].std;
6738 dbd = &dpr->rx_jmb[di].std;
6739 dbd->addr_hi = sbd->addr_hi;
6740 dbd->addr_lo = sbd->addr_lo;
6743 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6744 tp->rx_jmb_ring_mask;
6745 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6746 tp->rx_jmb_ring_mask;
6752 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6754 struct tg3 *tp = tnapi->tp;
6756 /* run TX completion thread */
6757 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6759 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6763 if (!tnapi->rx_rcb_prod_idx)
6766 /* run RX thread, within the bounds set by NAPI.
6767 * All RX "locking" is done by ensuring outside
6768 * code synchronizes with tg3->napi.poll()
6770 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6771 work_done += tg3_rx(tnapi, budget - work_done);
6773 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6774 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6776 u32 std_prod_idx = dpr->rx_std_prod_idx;
6777 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6779 tp->rx_refill = false;
6780 for (i = 1; i <= tp->rxq_cnt; i++)
6781 err |= tg3_rx_prodring_xfer(tp, dpr,
6782 &tp->napi[i].prodring);
6786 if (std_prod_idx != dpr->rx_std_prod_idx)
6787 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6788 dpr->rx_std_prod_idx);
6790 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6791 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6792 dpr->rx_jmb_prod_idx);
6797 tw32_f(HOSTCC_MODE, tp->coal_now);
6803 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6805 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6806 schedule_work(&tp->reset_task);
6809 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6811 cancel_work_sync(&tp->reset_task);
6812 tg3_flag_clear(tp, RESET_TASK_PENDING);
6813 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6816 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6818 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6819 struct tg3 *tp = tnapi->tp;
6821 struct tg3_hw_status *sblk = tnapi->hw_status;
6824 work_done = tg3_poll_work(tnapi, work_done, budget);
6826 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6829 if (unlikely(work_done >= budget))
6832 /* tp->last_tag is used in tg3_int_reenable() below
6833 * to tell the hw how much work has been processed,
6834 * so we must read it before checking for more work.
6836 tnapi->last_tag = sblk->status_tag;
6837 tnapi->last_irq_tag = tnapi->last_tag;
6840 /* check for RX/TX work to do */
6841 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6842 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6844 /* This test here is not race free, but will reduce
6845 * the number of interrupts by looping again.
6847 if (tnapi == &tp->napi[1] && tp->rx_refill)
6850 napi_complete(napi);
6851 /* Reenable interrupts. */
6852 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6854 /* This test here is synchronized by napi_schedule()
6855 * and napi_complete() to close the race condition.
6857 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6858 tw32(HOSTCC_MODE, tp->coalesce_mode |
6859 HOSTCC_MODE_ENABLE |
6870 /* work_done is guaranteed to be less than budget. */
6871 napi_complete(napi);
6872 tg3_reset_task_schedule(tp);
6876 static void tg3_process_error(struct tg3 *tp)
6879 bool real_error = false;
6881 if (tg3_flag(tp, ERROR_PROCESSED))
6884 /* Check Flow Attention register */
6885 val = tr32(HOSTCC_FLOW_ATTN);
6886 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6887 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6891 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6892 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6896 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6897 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6906 tg3_flag_set(tp, ERROR_PROCESSED);
6907 tg3_reset_task_schedule(tp);
6910 static int tg3_poll(struct napi_struct *napi, int budget)
6912 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6913 struct tg3 *tp = tnapi->tp;
6915 struct tg3_hw_status *sblk = tnapi->hw_status;
6918 if (sblk->status & SD_STATUS_ERROR)
6919 tg3_process_error(tp);
6923 work_done = tg3_poll_work(tnapi, work_done, budget);
6925 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6928 if (unlikely(work_done >= budget))
6931 if (tg3_flag(tp, TAGGED_STATUS)) {
6932 /* tp->last_tag is used in tg3_int_reenable() below
6933 * to tell the hw how much work has been processed,
6934 * so we must read it before checking for more work.
6936 tnapi->last_tag = sblk->status_tag;
6937 tnapi->last_irq_tag = tnapi->last_tag;
6940 sblk->status &= ~SD_STATUS_UPDATED;
6942 if (likely(!tg3_has_work(tnapi))) {
6943 napi_complete(napi);
6944 tg3_int_reenable(tnapi);
6952 /* work_done is guaranteed to be less than budget. */
6953 napi_complete(napi);
6954 tg3_reset_task_schedule(tp);
6958 static void tg3_napi_disable(struct tg3 *tp)
6962 for (i = tp->irq_cnt - 1; i >= 0; i--)
6963 napi_disable(&tp->napi[i].napi);
6966 static void tg3_napi_enable(struct tg3 *tp)
6970 for (i = 0; i < tp->irq_cnt; i++)
6971 napi_enable(&tp->napi[i].napi);
6974 static void tg3_napi_init(struct tg3 *tp)
6978 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6979 for (i = 1; i < tp->irq_cnt; i++)
6980 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6983 static void tg3_napi_fini(struct tg3 *tp)
6987 for (i = 0; i < tp->irq_cnt; i++)
6988 netif_napi_del(&tp->napi[i].napi);
6991 static inline void tg3_netif_stop(struct tg3 *tp)
6993 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6994 tg3_napi_disable(tp);
6995 netif_carrier_off(tp->dev);
6996 netif_tx_disable(tp->dev);
6999 /* tp->lock must be held */
7000 static inline void tg3_netif_start(struct tg3 *tp)
7004 /* NOTE: unconditional netif_tx_wake_all_queues is only
7005 * appropriate so long as all callers are assured to
7006 * have free tx slots (such as after tg3_init_hw)
7008 netif_tx_wake_all_queues(tp->dev);
7011 netif_carrier_on(tp->dev);
7013 tg3_napi_enable(tp);
7014 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7015 tg3_enable_ints(tp);
7018 static void tg3_irq_quiesce(struct tg3 *tp)
7022 BUG_ON(tp->irq_sync);
7027 for (i = 0; i < tp->irq_cnt; i++)
7028 synchronize_irq(tp->napi[i].irq_vec);
7031 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7032 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7033 * with as well. Most of the time, this is not necessary except when
7034 * shutting down the device.
7036 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7038 spin_lock_bh(&tp->lock);
7040 tg3_irq_quiesce(tp);
7043 static inline void tg3_full_unlock(struct tg3 *tp)
7045 spin_unlock_bh(&tp->lock);
7048 /* One-shot MSI handler - Chip automatically disables interrupt
7049 * after sending MSI so driver doesn't have to do it.
7051 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7053 struct tg3_napi *tnapi = dev_id;
7054 struct tg3 *tp = tnapi->tp;
7056 prefetch(tnapi->hw_status);
7058 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7060 if (likely(!tg3_irq_sync(tp)))
7061 napi_schedule(&tnapi->napi);
7066 /* MSI ISR - No need to check for interrupt sharing and no need to
7067 * flush status block and interrupt mailbox. PCI ordering rules
7068 * guarantee that MSI will arrive after the status block.
7070 static irqreturn_t tg3_msi(int irq, void *dev_id)
7072 struct tg3_napi *tnapi = dev_id;
7073 struct tg3 *tp = tnapi->tp;
7075 prefetch(tnapi->hw_status);
7077 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7079 * Writing any value to intr-mbox-0 clears PCI INTA# and
7080 * chip-internal interrupt pending events.
7081 * Writing non-zero to intr-mbox-0 additional tells the
7082 * NIC to stop sending us irqs, engaging "in-intr-handler"
7085 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7086 if (likely(!tg3_irq_sync(tp)))
7087 napi_schedule(&tnapi->napi);
7089 return IRQ_RETVAL(1);
7092 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7094 struct tg3_napi *tnapi = dev_id;
7095 struct tg3 *tp = tnapi->tp;
7096 struct tg3_hw_status *sblk = tnapi->hw_status;
7097 unsigned int handled = 1;
7099 /* In INTx mode, it is possible for the interrupt to arrive at
7100 * the CPU before the status block posted prior to the interrupt.
7101 * Reading the PCI State register will confirm whether the
7102 * interrupt is ours and will flush the status block.
7104 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7105 if (tg3_flag(tp, CHIP_RESETTING) ||
7106 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7113 * Writing any value to intr-mbox-0 clears PCI INTA# and
7114 * chip-internal interrupt pending events.
7115 * Writing non-zero to intr-mbox-0 additional tells the
7116 * NIC to stop sending us irqs, engaging "in-intr-handler"
7119 * Flush the mailbox to de-assert the IRQ immediately to prevent
7120 * spurious interrupts. The flush impacts performance but
7121 * excessive spurious interrupts can be worse in some cases.
7123 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7124 if (tg3_irq_sync(tp))
7126 sblk->status &= ~SD_STATUS_UPDATED;
7127 if (likely(tg3_has_work(tnapi))) {
7128 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7129 napi_schedule(&tnapi->napi);
7131 /* No work, shared interrupt perhaps? re-enable
7132 * interrupts, and flush that PCI write
7134 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7138 return IRQ_RETVAL(handled);
7141 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7143 struct tg3_napi *tnapi = dev_id;
7144 struct tg3 *tp = tnapi->tp;
7145 struct tg3_hw_status *sblk = tnapi->hw_status;
7146 unsigned int handled = 1;
7148 /* In INTx mode, it is possible for the interrupt to arrive at
7149 * the CPU before the status block posted prior to the interrupt.
7150 * Reading the PCI State register will confirm whether the
7151 * interrupt is ours and will flush the status block.
7153 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7154 if (tg3_flag(tp, CHIP_RESETTING) ||
7155 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7162 * writing any value to intr-mbox-0 clears PCI INTA# and
7163 * chip-internal interrupt pending events.
7164 * writing non-zero to intr-mbox-0 additional tells the
7165 * NIC to stop sending us irqs, engaging "in-intr-handler"
7168 * Flush the mailbox to de-assert the IRQ immediately to prevent
7169 * spurious interrupts. The flush impacts performance but
7170 * excessive spurious interrupts can be worse in some cases.
7172 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7175 * In a shared interrupt configuration, sometimes other devices'
7176 * interrupts will scream. We record the current status tag here
7177 * so that the above check can report that the screaming interrupts
7178 * are unhandled. Eventually they will be silenced.
7180 tnapi->last_irq_tag = sblk->status_tag;
7182 if (tg3_irq_sync(tp))
7185 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7187 napi_schedule(&tnapi->napi);
7190 return IRQ_RETVAL(handled);
7193 /* ISR for interrupt test */
7194 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7196 struct tg3_napi *tnapi = dev_id;
7197 struct tg3 *tp = tnapi->tp;
7198 struct tg3_hw_status *sblk = tnapi->hw_status;
7200 if ((sblk->status & SD_STATUS_UPDATED) ||
7201 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7202 tg3_disable_ints(tp);
7203 return IRQ_RETVAL(1);
7205 return IRQ_RETVAL(0);
7208 #ifdef CONFIG_NET_POLL_CONTROLLER
7209 static void tg3_poll_controller(struct net_device *dev)
7212 struct tg3 *tp = netdev_priv(dev);
7214 if (tg3_irq_sync(tp))
7217 for (i = 0; i < tp->irq_cnt; i++)
7218 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7222 static void tg3_tx_timeout(struct net_device *dev)
7224 struct tg3 *tp = netdev_priv(dev);
7226 if (netif_msg_tx_err(tp)) {
7227 netdev_err(dev, "transmit timed out, resetting\n");
7231 tg3_reset_task_schedule(tp);
7234 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7235 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7237 u32 base = (u32) mapping & 0xffffffff;
7239 return (base > 0xffffdcc0) && (base + len + 8 < base);
7242 /* Test for DMA addresses > 40-bit */
7243 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7246 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7247 if (tg3_flag(tp, 40BIT_DMA_BUG))
7248 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7255 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7256 dma_addr_t mapping, u32 len, u32 flags,
7259 txbd->addr_hi = ((u64) mapping >> 32);
7260 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7261 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7262 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7265 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7266 dma_addr_t map, u32 len, u32 flags,
7269 struct tg3 *tp = tnapi->tp;
7272 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7275 if (tg3_4g_overflow_test(map, len))
7278 if (tg3_40bit_overflow_test(tp, map, len))
7281 if (tp->dma_limit) {
7282 u32 prvidx = *entry;
7283 u32 tmp_flag = flags & ~TXD_FLAG_END;
7284 while (len > tp->dma_limit && *budget) {
7285 u32 frag_len = tp->dma_limit;
7286 len -= tp->dma_limit;
7288 /* Avoid the 8byte DMA problem */
7290 len += tp->dma_limit / 2;
7291 frag_len = tp->dma_limit / 2;
7294 tnapi->tx_buffers[*entry].fragmented = true;
7296 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7297 frag_len, tmp_flag, mss, vlan);
7300 *entry = NEXT_TX(*entry);
7307 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7308 len, flags, mss, vlan);
7310 *entry = NEXT_TX(*entry);
7313 tnapi->tx_buffers[prvidx].fragmented = false;
7317 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7318 len, flags, mss, vlan);
7319 *entry = NEXT_TX(*entry);
7325 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7328 struct sk_buff *skb;
7329 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7334 pci_unmap_single(tnapi->tp->pdev,
7335 dma_unmap_addr(txb, mapping),
7339 while (txb->fragmented) {
7340 txb->fragmented = false;
7341 entry = NEXT_TX(entry);
7342 txb = &tnapi->tx_buffers[entry];
7345 for (i = 0; i <= last; i++) {
7346 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7348 entry = NEXT_TX(entry);
7349 txb = &tnapi->tx_buffers[entry];
7351 pci_unmap_page(tnapi->tp->pdev,
7352 dma_unmap_addr(txb, mapping),
7353 skb_frag_size(frag), PCI_DMA_TODEVICE);
7355 while (txb->fragmented) {
7356 txb->fragmented = false;
7357 entry = NEXT_TX(entry);
7358 txb = &tnapi->tx_buffers[entry];
7363 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7364 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7365 struct sk_buff **pskb,
7366 u32 *entry, u32 *budget,
7367 u32 base_flags, u32 mss, u32 vlan)
7369 struct tg3 *tp = tnapi->tp;
7370 struct sk_buff *new_skb, *skb = *pskb;
7371 dma_addr_t new_addr = 0;
7374 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7375 new_skb = skb_copy(skb, GFP_ATOMIC);
7377 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7379 new_skb = skb_copy_expand(skb,
7380 skb_headroom(skb) + more_headroom,
7381 skb_tailroom(skb), GFP_ATOMIC);
7387 /* New SKB is guaranteed to be linear. */
7388 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7390 /* Make sure the mapping succeeded */
7391 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7392 dev_kfree_skb(new_skb);
7395 u32 save_entry = *entry;
7397 base_flags |= TXD_FLAG_END;
7399 tnapi->tx_buffers[*entry].skb = new_skb;
7400 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7403 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7404 new_skb->len, base_flags,
7406 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7407 dev_kfree_skb(new_skb);
7418 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7420 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7421 * TSO header is greater than 80 bytes.
7423 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7425 struct sk_buff *segs, *nskb;
7426 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7428 /* Estimate the number of fragments in the worst case */
7429 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7430 netif_stop_queue(tp->dev);
7432 /* netif_tx_stop_queue() must be done before checking
7433 * checking tx index in tg3_tx_avail() below, because in
7434 * tg3_tx(), we update tx index before checking for
7435 * netif_tx_queue_stopped().
7438 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7439 return NETDEV_TX_BUSY;
7441 netif_wake_queue(tp->dev);
7444 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7446 goto tg3_tso_bug_end;
7452 tg3_start_xmit(nskb, tp->dev);
7458 return NETDEV_TX_OK;
7461 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7462 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7464 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7466 struct tg3 *tp = netdev_priv(dev);
7467 u32 len, entry, base_flags, mss, vlan = 0;
7469 int i = -1, would_hit_hwbug;
7471 struct tg3_napi *tnapi;
7472 struct netdev_queue *txq;
7475 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7476 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7477 if (tg3_flag(tp, ENABLE_TSS))
7480 budget = tg3_tx_avail(tnapi);
7482 /* We are running in BH disabled context with netif_tx_lock
7483 * and TX reclaim runs via tp->napi.poll inside of a software
7484 * interrupt. Furthermore, IRQ processing runs lockless so we have
7485 * no IRQ context deadlocks to worry about either. Rejoice!
7487 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7488 if (!netif_tx_queue_stopped(txq)) {
7489 netif_tx_stop_queue(txq);
7491 /* This is a hard error, log it. */
7493 "BUG! Tx Ring full when queue awake!\n");
7495 return NETDEV_TX_BUSY;
7498 entry = tnapi->tx_prod;
7500 if (skb->ip_summed == CHECKSUM_PARTIAL)
7501 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7503 mss = skb_shinfo(skb)->gso_size;
7506 u32 tcp_opt_len, hdr_len;
7508 if (skb_header_cloned(skb) &&
7509 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7513 tcp_opt_len = tcp_optlen(skb);
7515 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7517 if (!skb_is_gso_v6(skb)) {
7519 iph->tot_len = htons(mss + hdr_len);
7522 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7523 tg3_flag(tp, TSO_BUG))
7524 return tg3_tso_bug(tp, skb);
7526 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7527 TXD_FLAG_CPU_POST_DMA);
7529 if (tg3_flag(tp, HW_TSO_1) ||
7530 tg3_flag(tp, HW_TSO_2) ||
7531 tg3_flag(tp, HW_TSO_3)) {
7532 tcp_hdr(skb)->check = 0;
7533 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7535 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7540 if (tg3_flag(tp, HW_TSO_3)) {
7541 mss |= (hdr_len & 0xc) << 12;
7543 base_flags |= 0x00000010;
7544 base_flags |= (hdr_len & 0x3e0) << 5;
7545 } else if (tg3_flag(tp, HW_TSO_2))
7546 mss |= hdr_len << 9;
7547 else if (tg3_flag(tp, HW_TSO_1) ||
7548 tg3_asic_rev(tp) == ASIC_REV_5705) {
7549 if (tcp_opt_len || iph->ihl > 5) {
7552 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7553 mss |= (tsflags << 11);
7556 if (tcp_opt_len || iph->ihl > 5) {
7559 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7560 base_flags |= tsflags << 12;
7565 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7566 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7567 base_flags |= TXD_FLAG_JMB_PKT;
7569 if (vlan_tx_tag_present(skb)) {
7570 base_flags |= TXD_FLAG_VLAN;
7571 vlan = vlan_tx_tag_get(skb);
7574 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7575 tg3_flag(tp, TX_TSTAMP_EN)) {
7576 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7577 base_flags |= TXD_FLAG_HWTSTAMP;
7580 len = skb_headlen(skb);
7582 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7583 if (pci_dma_mapping_error(tp->pdev, mapping))
7587 tnapi->tx_buffers[entry].skb = skb;
7588 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7590 would_hit_hwbug = 0;
7592 if (tg3_flag(tp, 5701_DMA_BUG))
7593 would_hit_hwbug = 1;
7595 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7596 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7598 would_hit_hwbug = 1;
7599 } else if (skb_shinfo(skb)->nr_frags > 0) {
7602 if (!tg3_flag(tp, HW_TSO_1) &&
7603 !tg3_flag(tp, HW_TSO_2) &&
7604 !tg3_flag(tp, HW_TSO_3))
7607 /* Now loop through additional data
7608 * fragments, and queue them.
7610 last = skb_shinfo(skb)->nr_frags - 1;
7611 for (i = 0; i <= last; i++) {
7612 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7614 len = skb_frag_size(frag);
7615 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7616 len, DMA_TO_DEVICE);
7618 tnapi->tx_buffers[entry].skb = NULL;
7619 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7621 if (dma_mapping_error(&tp->pdev->dev, mapping))
7625 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7627 ((i == last) ? TXD_FLAG_END : 0),
7629 would_hit_hwbug = 1;
7635 if (would_hit_hwbug) {
7636 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7638 /* If the workaround fails due to memory/mapping
7639 * failure, silently drop this packet.
7641 entry = tnapi->tx_prod;
7642 budget = tg3_tx_avail(tnapi);
7643 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7644 base_flags, mss, vlan))
7648 skb_tx_timestamp(skb);
7649 netdev_tx_sent_queue(txq, skb->len);
7651 /* Sync BD data before updating mailbox */
7654 /* Packets are ready, update Tx producer idx local and on card. */
7655 tw32_tx_mbox(tnapi->prodmbox, entry);
7657 tnapi->tx_prod = entry;
7658 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7659 netif_tx_stop_queue(txq);
7661 /* netif_tx_stop_queue() must be done before checking
7662 * checking tx index in tg3_tx_avail() below, because in
7663 * tg3_tx(), we update tx index before checking for
7664 * netif_tx_queue_stopped().
7667 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7668 netif_tx_wake_queue(txq);
7672 return NETDEV_TX_OK;
7675 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7676 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7681 return NETDEV_TX_OK;
7684 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7687 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7688 MAC_MODE_PORT_MODE_MASK);
7690 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7692 if (!tg3_flag(tp, 5705_PLUS))
7693 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7695 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7696 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7698 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7700 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7702 if (tg3_flag(tp, 5705_PLUS) ||
7703 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7704 tg3_asic_rev(tp) == ASIC_REV_5700)
7705 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7708 tw32(MAC_MODE, tp->mac_mode);
7712 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7714 u32 val, bmcr, mac_mode, ptest = 0;
7716 tg3_phy_toggle_apd(tp, false);
7717 tg3_phy_toggle_automdix(tp, 0);
7719 if (extlpbk && tg3_phy_set_extloopbk(tp))
7722 bmcr = BMCR_FULLDPLX;
7727 bmcr |= BMCR_SPEED100;
7731 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7733 bmcr |= BMCR_SPEED100;
7736 bmcr |= BMCR_SPEED1000;
7741 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7742 tg3_readphy(tp, MII_CTRL1000, &val);
7743 val |= CTL1000_AS_MASTER |
7744 CTL1000_ENABLE_MASTER;
7745 tg3_writephy(tp, MII_CTRL1000, val);
7747 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7748 MII_TG3_FET_PTEST_TRIM_2;
7749 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7752 bmcr |= BMCR_LOOPBACK;
7754 tg3_writephy(tp, MII_BMCR, bmcr);
7756 /* The write needs to be flushed for the FETs */
7757 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7758 tg3_readphy(tp, MII_BMCR, &bmcr);
7762 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7763 tg3_asic_rev(tp) == ASIC_REV_5785) {
7764 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7765 MII_TG3_FET_PTEST_FRC_TX_LINK |
7766 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7768 /* The write needs to be flushed for the AC131 */
7769 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7772 /* Reset to prevent losing 1st rx packet intermittently */
7773 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7774 tg3_flag(tp, 5780_CLASS)) {
7775 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7777 tw32_f(MAC_RX_MODE, tp->rx_mode);
7780 mac_mode = tp->mac_mode &
7781 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7782 if (speed == SPEED_1000)
7783 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7785 mac_mode |= MAC_MODE_PORT_MODE_MII;
7787 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7788 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7790 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7791 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7792 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7793 mac_mode |= MAC_MODE_LINK_POLARITY;
7795 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7796 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7799 tw32(MAC_MODE, mac_mode);
7805 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7807 struct tg3 *tp = netdev_priv(dev);
7809 if (features & NETIF_F_LOOPBACK) {
7810 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7813 spin_lock_bh(&tp->lock);
7814 tg3_mac_loopback(tp, true);
7815 netif_carrier_on(tp->dev);
7816 spin_unlock_bh(&tp->lock);
7817 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7819 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7822 spin_lock_bh(&tp->lock);
7823 tg3_mac_loopback(tp, false);
7824 /* Force link status check */
7825 tg3_setup_phy(tp, 1);
7826 spin_unlock_bh(&tp->lock);
7827 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7831 static netdev_features_t tg3_fix_features(struct net_device *dev,
7832 netdev_features_t features)
7834 struct tg3 *tp = netdev_priv(dev);
7836 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7837 features &= ~NETIF_F_ALL_TSO;
7842 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7844 netdev_features_t changed = dev->features ^ features;
7846 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7847 tg3_set_loopback(dev, features);
7852 static void tg3_rx_prodring_free(struct tg3 *tp,
7853 struct tg3_rx_prodring_set *tpr)
7857 if (tpr != &tp->napi[0].prodring) {
7858 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7859 i = (i + 1) & tp->rx_std_ring_mask)
7860 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7863 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7864 for (i = tpr->rx_jmb_cons_idx;
7865 i != tpr->rx_jmb_prod_idx;
7866 i = (i + 1) & tp->rx_jmb_ring_mask) {
7867 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7875 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7876 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7879 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7880 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7881 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7886 /* Initialize rx rings for packet processing.
7888 * The chip has been shut down and the driver detached from
7889 * the networking, so no interrupts or new tx packets will
7890 * end up in the driver. tp->{tx,}lock are held and thus
7893 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7894 struct tg3_rx_prodring_set *tpr)
7896 u32 i, rx_pkt_dma_sz;
7898 tpr->rx_std_cons_idx = 0;
7899 tpr->rx_std_prod_idx = 0;
7900 tpr->rx_jmb_cons_idx = 0;
7901 tpr->rx_jmb_prod_idx = 0;
7903 if (tpr != &tp->napi[0].prodring) {
7904 memset(&tpr->rx_std_buffers[0], 0,
7905 TG3_RX_STD_BUFF_RING_SIZE(tp));
7906 if (tpr->rx_jmb_buffers)
7907 memset(&tpr->rx_jmb_buffers[0], 0,
7908 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7912 /* Zero out all descriptors. */
7913 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7915 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7916 if (tg3_flag(tp, 5780_CLASS) &&
7917 tp->dev->mtu > ETH_DATA_LEN)
7918 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7919 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7921 /* Initialize invariants of the rings, we only set this
7922 * stuff once. This works because the card does not
7923 * write into the rx buffer posting rings.
7925 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7926 struct tg3_rx_buffer_desc *rxd;
7928 rxd = &tpr->rx_std[i];
7929 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7930 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7931 rxd->opaque = (RXD_OPAQUE_RING_STD |
7932 (i << RXD_OPAQUE_INDEX_SHIFT));
7935 /* Now allocate fresh SKBs for each rx ring. */
7936 for (i = 0; i < tp->rx_pending; i++) {
7937 unsigned int frag_size;
7939 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7941 netdev_warn(tp->dev,
7942 "Using a smaller RX standard ring. Only "
7943 "%d out of %d buffers were allocated "
7944 "successfully\n", i, tp->rx_pending);
7952 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7955 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7957 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7960 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7961 struct tg3_rx_buffer_desc *rxd;
7963 rxd = &tpr->rx_jmb[i].std;
7964 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7965 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7967 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7968 (i << RXD_OPAQUE_INDEX_SHIFT));
7971 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7972 unsigned int frag_size;
7974 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7976 netdev_warn(tp->dev,
7977 "Using a smaller RX jumbo ring. Only %d "
7978 "out of %d buffers were allocated "
7979 "successfully\n", i, tp->rx_jumbo_pending);
7982 tp->rx_jumbo_pending = i;
7991 tg3_rx_prodring_free(tp, tpr);
7995 static void tg3_rx_prodring_fini(struct tg3 *tp,
7996 struct tg3_rx_prodring_set *tpr)
7998 kfree(tpr->rx_std_buffers);
7999 tpr->rx_std_buffers = NULL;
8000 kfree(tpr->rx_jmb_buffers);
8001 tpr->rx_jmb_buffers = NULL;
8003 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8004 tpr->rx_std, tpr->rx_std_mapping);
8008 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8009 tpr->rx_jmb, tpr->rx_jmb_mapping);
8014 static int tg3_rx_prodring_init(struct tg3 *tp,
8015 struct tg3_rx_prodring_set *tpr)
8017 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8019 if (!tpr->rx_std_buffers)
8022 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8023 TG3_RX_STD_RING_BYTES(tp),
8024 &tpr->rx_std_mapping,
8029 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8030 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8032 if (!tpr->rx_jmb_buffers)
8035 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8036 TG3_RX_JMB_RING_BYTES(tp),
8037 &tpr->rx_jmb_mapping,
8046 tg3_rx_prodring_fini(tp, tpr);
8050 /* Free up pending packets in all rx/tx rings.
8052 * The chip has been shut down and the driver detached from
8053 * the networking, so no interrupts or new tx packets will
8054 * end up in the driver. tp->{tx,}lock is not held and we are not
8055 * in an interrupt context and thus may sleep.
8057 static void tg3_free_rings(struct tg3 *tp)
8061 for (j = 0; j < tp->irq_cnt; j++) {
8062 struct tg3_napi *tnapi = &tp->napi[j];
8064 tg3_rx_prodring_free(tp, &tnapi->prodring);
8066 if (!tnapi->tx_buffers)
8069 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8070 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8075 tg3_tx_skb_unmap(tnapi, i,
8076 skb_shinfo(skb)->nr_frags - 1);
8078 dev_kfree_skb_any(skb);
8080 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8084 /* Initialize tx/rx rings for packet processing.
8086 * The chip has been shut down and the driver detached from
8087 * the networking, so no interrupts or new tx packets will
8088 * end up in the driver. tp->{tx,}lock are held and thus
8091 static int tg3_init_rings(struct tg3 *tp)
8095 /* Free up all the SKBs. */
8098 for (i = 0; i < tp->irq_cnt; i++) {
8099 struct tg3_napi *tnapi = &tp->napi[i];
8101 tnapi->last_tag = 0;
8102 tnapi->last_irq_tag = 0;
8103 tnapi->hw_status->status = 0;
8104 tnapi->hw_status->status_tag = 0;
8105 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8110 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8112 tnapi->rx_rcb_ptr = 0;
8114 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8116 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8125 static void tg3_mem_tx_release(struct tg3 *tp)
8129 for (i = 0; i < tp->irq_max; i++) {
8130 struct tg3_napi *tnapi = &tp->napi[i];
8132 if (tnapi->tx_ring) {
8133 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8134 tnapi->tx_ring, tnapi->tx_desc_mapping);
8135 tnapi->tx_ring = NULL;
8138 kfree(tnapi->tx_buffers);
8139 tnapi->tx_buffers = NULL;
8143 static int tg3_mem_tx_acquire(struct tg3 *tp)
8146 struct tg3_napi *tnapi = &tp->napi[0];
8148 /* If multivector TSS is enabled, vector 0 does not handle
8149 * tx interrupts. Don't allocate any resources for it.
8151 if (tg3_flag(tp, ENABLE_TSS))
8154 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8155 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8156 TG3_TX_RING_SIZE, GFP_KERNEL);
8157 if (!tnapi->tx_buffers)
8160 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8162 &tnapi->tx_desc_mapping,
8164 if (!tnapi->tx_ring)
8171 tg3_mem_tx_release(tp);
8175 static void tg3_mem_rx_release(struct tg3 *tp)
8179 for (i = 0; i < tp->irq_max; i++) {
8180 struct tg3_napi *tnapi = &tp->napi[i];
8182 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8187 dma_free_coherent(&tp->pdev->dev,
8188 TG3_RX_RCB_RING_BYTES(tp),
8190 tnapi->rx_rcb_mapping);
8191 tnapi->rx_rcb = NULL;
8195 static int tg3_mem_rx_acquire(struct tg3 *tp)
8197 unsigned int i, limit;
8199 limit = tp->rxq_cnt;
8201 /* If RSS is enabled, we need a (dummy) producer ring
8202 * set on vector zero. This is the true hw prodring.
8204 if (tg3_flag(tp, ENABLE_RSS))
8207 for (i = 0; i < limit; i++) {
8208 struct tg3_napi *tnapi = &tp->napi[i];
8210 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8213 /* If multivector RSS is enabled, vector 0
8214 * does not handle rx or tx interrupts.
8215 * Don't allocate any resources for it.
8217 if (!i && tg3_flag(tp, ENABLE_RSS))
8220 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8221 TG3_RX_RCB_RING_BYTES(tp),
8222 &tnapi->rx_rcb_mapping,
8223 GFP_KERNEL | __GFP_ZERO);
8231 tg3_mem_rx_release(tp);
8236 * Must not be invoked with interrupt sources disabled and
8237 * the hardware shutdown down.
8239 static void tg3_free_consistent(struct tg3 *tp)
8243 for (i = 0; i < tp->irq_cnt; i++) {
8244 struct tg3_napi *tnapi = &tp->napi[i];
8246 if (tnapi->hw_status) {
8247 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8249 tnapi->status_mapping);
8250 tnapi->hw_status = NULL;
8254 tg3_mem_rx_release(tp);
8255 tg3_mem_tx_release(tp);
8258 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8259 tp->hw_stats, tp->stats_mapping);
8260 tp->hw_stats = NULL;
8265 * Must not be invoked with interrupt sources disabled and
8266 * the hardware shutdown down. Can sleep.
8268 static int tg3_alloc_consistent(struct tg3 *tp)
8272 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8273 sizeof(struct tg3_hw_stats),
8275 GFP_KERNEL | __GFP_ZERO);
8279 for (i = 0; i < tp->irq_cnt; i++) {
8280 struct tg3_napi *tnapi = &tp->napi[i];
8281 struct tg3_hw_status *sblk;
8283 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8285 &tnapi->status_mapping,
8286 GFP_KERNEL | __GFP_ZERO);
8287 if (!tnapi->hw_status)
8290 sblk = tnapi->hw_status;
8292 if (tg3_flag(tp, ENABLE_RSS)) {
8293 u16 *prodptr = NULL;
8296 * When RSS is enabled, the status block format changes
8297 * slightly. The "rx_jumbo_consumer", "reserved",
8298 * and "rx_mini_consumer" members get mapped to the
8299 * other three rx return ring producer indexes.
8303 prodptr = &sblk->idx[0].rx_producer;
8306 prodptr = &sblk->rx_jumbo_consumer;
8309 prodptr = &sblk->reserved;
8312 prodptr = &sblk->rx_mini_consumer;
8315 tnapi->rx_rcb_prod_idx = prodptr;
8317 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8321 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8327 tg3_free_consistent(tp);
8331 #define MAX_WAIT_CNT 1000
8333 /* To stop a block, clear the enable bit and poll till it
8334 * clears. tp->lock is held.
8336 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8341 if (tg3_flag(tp, 5705_PLUS)) {
8348 /* We can't enable/disable these bits of the
8349 * 5705/5750, just say success.
8362 for (i = 0; i < MAX_WAIT_CNT; i++) {
8365 if ((val & enable_bit) == 0)
8369 if (i == MAX_WAIT_CNT && !silent) {
8370 dev_err(&tp->pdev->dev,
8371 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8379 /* tp->lock is held. */
8380 static int tg3_abort_hw(struct tg3 *tp, int silent)
8384 tg3_disable_ints(tp);
8386 tp->rx_mode &= ~RX_MODE_ENABLE;
8387 tw32_f(MAC_RX_MODE, tp->rx_mode);
8390 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8391 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8392 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8393 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8394 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8395 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8397 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8398 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8399 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8400 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8401 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8402 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8403 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8405 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8406 tw32_f(MAC_MODE, tp->mac_mode);
8409 tp->tx_mode &= ~TX_MODE_ENABLE;
8410 tw32_f(MAC_TX_MODE, tp->tx_mode);
8412 for (i = 0; i < MAX_WAIT_CNT; i++) {
8414 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8417 if (i >= MAX_WAIT_CNT) {
8418 dev_err(&tp->pdev->dev,
8419 "%s timed out, TX_MODE_ENABLE will not clear "
8420 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8424 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8425 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8426 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8428 tw32(FTQ_RESET, 0xffffffff);
8429 tw32(FTQ_RESET, 0x00000000);
8431 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8432 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8434 for (i = 0; i < tp->irq_cnt; i++) {
8435 struct tg3_napi *tnapi = &tp->napi[i];
8436 if (tnapi->hw_status)
8437 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8443 /* Save PCI command register before chip reset */
8444 static void tg3_save_pci_state(struct tg3 *tp)
8446 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8449 /* Restore PCI state after chip reset */
8450 static void tg3_restore_pci_state(struct tg3 *tp)
8454 /* Re-enable indirect register accesses. */
8455 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8456 tp->misc_host_ctrl);
8458 /* Set MAX PCI retry to zero. */
8459 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8460 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8461 tg3_flag(tp, PCIX_MODE))
8462 val |= PCISTATE_RETRY_SAME_DMA;
8463 /* Allow reads and writes to the APE register and memory space. */
8464 if (tg3_flag(tp, ENABLE_APE))
8465 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8466 PCISTATE_ALLOW_APE_SHMEM_WR |
8467 PCISTATE_ALLOW_APE_PSPACE_WR;
8468 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8470 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8472 if (!tg3_flag(tp, PCI_EXPRESS)) {
8473 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8474 tp->pci_cacheline_sz);
8475 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8479 /* Make sure PCI-X relaxed ordering bit is clear. */
8480 if (tg3_flag(tp, PCIX_MODE)) {
8483 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8485 pcix_cmd &= ~PCI_X_CMD_ERO;
8486 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8490 if (tg3_flag(tp, 5780_CLASS)) {
8492 /* Chip reset on 5780 will reset MSI enable bit,
8493 * so need to restore it.
8495 if (tg3_flag(tp, USING_MSI)) {
8498 pci_read_config_word(tp->pdev,
8499 tp->msi_cap + PCI_MSI_FLAGS,
8501 pci_write_config_word(tp->pdev,
8502 tp->msi_cap + PCI_MSI_FLAGS,
8503 ctrl | PCI_MSI_FLAGS_ENABLE);
8504 val = tr32(MSGINT_MODE);
8505 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8510 /* tp->lock is held. */
8511 static int tg3_chip_reset(struct tg3 *tp)
8514 void (*write_op)(struct tg3 *, u32, u32);
8519 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8521 /* No matching tg3_nvram_unlock() after this because
8522 * chip reset below will undo the nvram lock.
8524 tp->nvram_lock_cnt = 0;
8526 /* GRC_MISC_CFG core clock reset will clear the memory
8527 * enable bit in PCI register 4 and the MSI enable bit
8528 * on some chips, so we save relevant registers here.
8530 tg3_save_pci_state(tp);
8532 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8533 tg3_flag(tp, 5755_PLUS))
8534 tw32(GRC_FASTBOOT_PC, 0);
8537 * We must avoid the readl() that normally takes place.
8538 * It locks machines, causes machine checks, and other
8539 * fun things. So, temporarily disable the 5701
8540 * hardware workaround, while we do the reset.
8542 write_op = tp->write32;
8543 if (write_op == tg3_write_flush_reg32)
8544 tp->write32 = tg3_write32;
8546 /* Prevent the irq handler from reading or writing PCI registers
8547 * during chip reset when the memory enable bit in the PCI command
8548 * register may be cleared. The chip does not generate interrupt
8549 * at this time, but the irq handler may still be called due to irq
8550 * sharing or irqpoll.
8552 tg3_flag_set(tp, CHIP_RESETTING);
8553 for (i = 0; i < tp->irq_cnt; i++) {
8554 struct tg3_napi *tnapi = &tp->napi[i];
8555 if (tnapi->hw_status) {
8556 tnapi->hw_status->status = 0;
8557 tnapi->hw_status->status_tag = 0;
8559 tnapi->last_tag = 0;
8560 tnapi->last_irq_tag = 0;
8564 for (i = 0; i < tp->irq_cnt; i++)
8565 synchronize_irq(tp->napi[i].irq_vec);
8567 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8568 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8569 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8573 val = GRC_MISC_CFG_CORECLK_RESET;
8575 if (tg3_flag(tp, PCI_EXPRESS)) {
8576 /* Force PCIe 1.0a mode */
8577 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8578 !tg3_flag(tp, 57765_PLUS) &&
8579 tr32(TG3_PCIE_PHY_TSTCTL) ==
8580 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8581 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8583 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8584 tw32(GRC_MISC_CFG, (1 << 29));
8589 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8590 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8591 tw32(GRC_VCPU_EXT_CTRL,
8592 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8595 /* Manage gphy power for all CPMU absent PCIe devices. */
8596 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8597 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8599 tw32(GRC_MISC_CFG, val);
8601 /* restore 5701 hardware bug workaround write method */
8602 tp->write32 = write_op;
8604 /* Unfortunately, we have to delay before the PCI read back.
8605 * Some 575X chips even will not respond to a PCI cfg access
8606 * when the reset command is given to the chip.
8608 * How do these hardware designers expect things to work
8609 * properly if the PCI write is posted for a long period
8610 * of time? It is always necessary to have some method by
8611 * which a register read back can occur to push the write
8612 * out which does the reset.
8614 * For most tg3 variants the trick below was working.
8619 /* Flush PCI posted writes. The normal MMIO registers
8620 * are inaccessible at this time so this is the only
8621 * way to make this reliably (actually, this is no longer
8622 * the case, see above). I tried to use indirect
8623 * register read/write but this upset some 5701 variants.
8625 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8629 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8632 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8636 /* Wait for link training to complete. */
8637 for (j = 0; j < 5000; j++)
8640 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8641 pci_write_config_dword(tp->pdev, 0xc4,
8642 cfg_val | (1 << 15));
8645 /* Clear the "no snoop" and "relaxed ordering" bits. */
8646 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8648 * Older PCIe devices only support the 128 byte
8649 * MPS setting. Enforce the restriction.
8651 if (!tg3_flag(tp, CPMU_PRESENT))
8652 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8653 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8655 /* Clear error status */
8656 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8657 PCI_EXP_DEVSTA_CED |
8658 PCI_EXP_DEVSTA_NFED |
8659 PCI_EXP_DEVSTA_FED |
8660 PCI_EXP_DEVSTA_URD);
8663 tg3_restore_pci_state(tp);
8665 tg3_flag_clear(tp, CHIP_RESETTING);
8666 tg3_flag_clear(tp, ERROR_PROCESSED);
8669 if (tg3_flag(tp, 5780_CLASS))
8670 val = tr32(MEMARB_MODE);
8671 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8673 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8675 tw32(0x5000, 0x400);
8678 if (tg3_flag(tp, IS_SSB_CORE)) {
8680 * BCM4785: In order to avoid repercussions from using
8681 * potentially defective internal ROM, stop the Rx RISC CPU,
8682 * which is not required.
8685 tg3_halt_cpu(tp, RX_CPU_BASE);
8688 tw32(GRC_MODE, tp->grc_mode);
8690 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8693 tw32(0xc4, val | (1 << 15));
8696 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8697 tg3_asic_rev(tp) == ASIC_REV_5705) {
8698 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8699 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8700 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8701 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8704 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8705 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8707 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8708 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8713 tw32_f(MAC_MODE, val);
8716 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8718 err = tg3_poll_fw(tp);
8724 if (tg3_flag(tp, PCI_EXPRESS) &&
8725 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8726 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8727 !tg3_flag(tp, 57765_PLUS)) {
8730 tw32(0x7c00, val | (1 << 25));
8733 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8734 val = tr32(TG3_CPMU_CLCK_ORIDE);
8735 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8738 /* Reprobe ASF enable state. */
8739 tg3_flag_clear(tp, ENABLE_ASF);
8740 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8741 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8742 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8745 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8746 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8747 tg3_flag_set(tp, ENABLE_ASF);
8748 tp->last_event_jiffies = jiffies;
8749 if (tg3_flag(tp, 5750_PLUS))
8750 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8757 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8758 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8760 /* tp->lock is held. */
8761 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8767 tg3_write_sig_pre_reset(tp, kind);
8769 tg3_abort_hw(tp, silent);
8770 err = tg3_chip_reset(tp);
8772 __tg3_set_mac_addr(tp, 0);
8774 tg3_write_sig_legacy(tp, kind);
8775 tg3_write_sig_post_reset(tp, kind);
8778 /* Save the stats across chip resets... */
8779 tg3_get_nstats(tp, &tp->net_stats_prev);
8780 tg3_get_estats(tp, &tp->estats_prev);
8782 /* And make sure the next sample is new data */
8783 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8792 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8794 struct tg3 *tp = netdev_priv(dev);
8795 struct sockaddr *addr = p;
8796 int err = 0, skip_mac_1 = 0;
8798 if (!is_valid_ether_addr(addr->sa_data))
8799 return -EADDRNOTAVAIL;
8801 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8803 if (!netif_running(dev))
8806 if (tg3_flag(tp, ENABLE_ASF)) {
8807 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8809 addr0_high = tr32(MAC_ADDR_0_HIGH);
8810 addr0_low = tr32(MAC_ADDR_0_LOW);
8811 addr1_high = tr32(MAC_ADDR_1_HIGH);
8812 addr1_low = tr32(MAC_ADDR_1_LOW);
8814 /* Skip MAC addr 1 if ASF is using it. */
8815 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8816 !(addr1_high == 0 && addr1_low == 0))
8819 spin_lock_bh(&tp->lock);
8820 __tg3_set_mac_addr(tp, skip_mac_1);
8821 spin_unlock_bh(&tp->lock);
8826 /* tp->lock is held. */
8827 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8828 dma_addr_t mapping, u32 maxlen_flags,
8832 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8833 ((u64) mapping >> 32));
8835 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8836 ((u64) mapping & 0xffffffff));
8838 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8841 if (!tg3_flag(tp, 5705_PLUS))
8843 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8848 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8852 if (!tg3_flag(tp, ENABLE_TSS)) {
8853 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8854 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8855 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8857 tw32(HOSTCC_TXCOL_TICKS, 0);
8858 tw32(HOSTCC_TXMAX_FRAMES, 0);
8859 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8861 for (; i < tp->txq_cnt; i++) {
8864 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8865 tw32(reg, ec->tx_coalesce_usecs);
8866 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8867 tw32(reg, ec->tx_max_coalesced_frames);
8868 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8869 tw32(reg, ec->tx_max_coalesced_frames_irq);
8873 for (; i < tp->irq_max - 1; i++) {
8874 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8875 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8876 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8880 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8883 u32 limit = tp->rxq_cnt;
8885 if (!tg3_flag(tp, ENABLE_RSS)) {
8886 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8887 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8888 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8891 tw32(HOSTCC_RXCOL_TICKS, 0);
8892 tw32(HOSTCC_RXMAX_FRAMES, 0);
8893 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8896 for (; i < limit; i++) {
8899 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8900 tw32(reg, ec->rx_coalesce_usecs);
8901 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8902 tw32(reg, ec->rx_max_coalesced_frames);
8903 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8904 tw32(reg, ec->rx_max_coalesced_frames_irq);
8907 for (; i < tp->irq_max - 1; i++) {
8908 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8909 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8910 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8914 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8916 tg3_coal_tx_init(tp, ec);
8917 tg3_coal_rx_init(tp, ec);
8919 if (!tg3_flag(tp, 5705_PLUS)) {
8920 u32 val = ec->stats_block_coalesce_usecs;
8922 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8923 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8928 tw32(HOSTCC_STAT_COAL_TICKS, val);
8932 /* tp->lock is held. */
8933 static void tg3_rings_reset(struct tg3 *tp)
8936 u32 stblk, txrcb, rxrcb, limit;
8937 struct tg3_napi *tnapi = &tp->napi[0];
8939 /* Disable all transmit rings but the first. */
8940 if (!tg3_flag(tp, 5705_PLUS))
8941 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8942 else if (tg3_flag(tp, 5717_PLUS))
8943 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8944 else if (tg3_flag(tp, 57765_CLASS) ||
8945 tg3_asic_rev(tp) == ASIC_REV_5762)
8946 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8948 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8950 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8951 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8952 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8953 BDINFO_FLAGS_DISABLED);
8956 /* Disable all receive return rings but the first. */
8957 if (tg3_flag(tp, 5717_PLUS))
8958 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8959 else if (!tg3_flag(tp, 5705_PLUS))
8960 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8961 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8962 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8963 tg3_flag(tp, 57765_CLASS))
8964 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8966 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8968 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8969 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8970 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8971 BDINFO_FLAGS_DISABLED);
8973 /* Disable interrupts */
8974 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8975 tp->napi[0].chk_msi_cnt = 0;
8976 tp->napi[0].last_rx_cons = 0;
8977 tp->napi[0].last_tx_cons = 0;
8979 /* Zero mailbox registers. */
8980 if (tg3_flag(tp, SUPPORT_MSIX)) {
8981 for (i = 1; i < tp->irq_max; i++) {
8982 tp->napi[i].tx_prod = 0;
8983 tp->napi[i].tx_cons = 0;
8984 if (tg3_flag(tp, ENABLE_TSS))
8985 tw32_mailbox(tp->napi[i].prodmbox, 0);
8986 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8987 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8988 tp->napi[i].chk_msi_cnt = 0;
8989 tp->napi[i].last_rx_cons = 0;
8990 tp->napi[i].last_tx_cons = 0;
8992 if (!tg3_flag(tp, ENABLE_TSS))
8993 tw32_mailbox(tp->napi[0].prodmbox, 0);
8995 tp->napi[0].tx_prod = 0;
8996 tp->napi[0].tx_cons = 0;
8997 tw32_mailbox(tp->napi[0].prodmbox, 0);
8998 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9001 /* Make sure the NIC-based send BD rings are disabled. */
9002 if (!tg3_flag(tp, 5705_PLUS)) {
9003 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9004 for (i = 0; i < 16; i++)
9005 tw32_tx_mbox(mbox + i * 8, 0);
9008 txrcb = NIC_SRAM_SEND_RCB;
9009 rxrcb = NIC_SRAM_RCV_RET_RCB;
9011 /* Clear status block in ram. */
9012 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9014 /* Set status block DMA address */
9015 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9016 ((u64) tnapi->status_mapping >> 32));
9017 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9018 ((u64) tnapi->status_mapping & 0xffffffff));
9020 if (tnapi->tx_ring) {
9021 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9022 (TG3_TX_RING_SIZE <<
9023 BDINFO_FLAGS_MAXLEN_SHIFT),
9024 NIC_SRAM_TX_BUFFER_DESC);
9025 txrcb += TG3_BDINFO_SIZE;
9028 if (tnapi->rx_rcb) {
9029 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9030 (tp->rx_ret_ring_mask + 1) <<
9031 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9032 rxrcb += TG3_BDINFO_SIZE;
9035 stblk = HOSTCC_STATBLCK_RING1;
9037 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9038 u64 mapping = (u64)tnapi->status_mapping;
9039 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9040 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9042 /* Clear status block in ram. */
9043 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9045 if (tnapi->tx_ring) {
9046 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9047 (TG3_TX_RING_SIZE <<
9048 BDINFO_FLAGS_MAXLEN_SHIFT),
9049 NIC_SRAM_TX_BUFFER_DESC);
9050 txrcb += TG3_BDINFO_SIZE;
9053 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9054 ((tp->rx_ret_ring_mask + 1) <<
9055 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9058 rxrcb += TG3_BDINFO_SIZE;
9062 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9064 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9066 if (!tg3_flag(tp, 5750_PLUS) ||
9067 tg3_flag(tp, 5780_CLASS) ||
9068 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9069 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9070 tg3_flag(tp, 57765_PLUS))
9071 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9072 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9073 tg3_asic_rev(tp) == ASIC_REV_5787)
9074 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9076 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9078 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9079 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9081 val = min(nic_rep_thresh, host_rep_thresh);
9082 tw32(RCVBDI_STD_THRESH, val);
9084 if (tg3_flag(tp, 57765_PLUS))
9085 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9087 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9090 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9092 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9094 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9095 tw32(RCVBDI_JUMBO_THRESH, val);
9097 if (tg3_flag(tp, 57765_PLUS))
9098 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9101 static inline u32 calc_crc(unsigned char *buf, int len)
9109 for (j = 0; j < len; j++) {
9112 for (k = 0; k < 8; k++) {
9125 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9127 /* accept or reject all multicast frames */
9128 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9129 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9130 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9131 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9134 static void __tg3_set_rx_mode(struct net_device *dev)
9136 struct tg3 *tp = netdev_priv(dev);
9139 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9140 RX_MODE_KEEP_VLAN_TAG);
9142 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9143 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9146 if (!tg3_flag(tp, ENABLE_ASF))
9147 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9150 if (dev->flags & IFF_PROMISC) {
9151 /* Promiscuous mode. */
9152 rx_mode |= RX_MODE_PROMISC;
9153 } else if (dev->flags & IFF_ALLMULTI) {
9154 /* Accept all multicast. */
9155 tg3_set_multi(tp, 1);
9156 } else if (netdev_mc_empty(dev)) {
9157 /* Reject all multicast. */
9158 tg3_set_multi(tp, 0);
9160 /* Accept one or more multicast(s). */
9161 struct netdev_hw_addr *ha;
9162 u32 mc_filter[4] = { 0, };
9167 netdev_for_each_mc_addr(ha, dev) {
9168 crc = calc_crc(ha->addr, ETH_ALEN);
9170 regidx = (bit & 0x60) >> 5;
9172 mc_filter[regidx] |= (1 << bit);
9175 tw32(MAC_HASH_REG_0, mc_filter[0]);
9176 tw32(MAC_HASH_REG_1, mc_filter[1]);
9177 tw32(MAC_HASH_REG_2, mc_filter[2]);
9178 tw32(MAC_HASH_REG_3, mc_filter[3]);
9181 if (rx_mode != tp->rx_mode) {
9182 tp->rx_mode = rx_mode;
9183 tw32_f(MAC_RX_MODE, rx_mode);
9188 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9192 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9193 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9196 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9200 if (!tg3_flag(tp, SUPPORT_MSIX))
9203 if (tp->rxq_cnt == 1) {
9204 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9208 /* Validate table against current IRQ count */
9209 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9210 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9214 if (i != TG3_RSS_INDIR_TBL_SIZE)
9215 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9218 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9221 u32 reg = MAC_RSS_INDIR_TBL_0;
9223 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9224 u32 val = tp->rss_ind_tbl[i];
9226 for (; i % 8; i++) {
9228 val |= tp->rss_ind_tbl[i];
9235 /* tp->lock is held. */
9236 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9238 u32 val, rdmac_mode;
9240 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9242 tg3_disable_ints(tp);
9246 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9248 if (tg3_flag(tp, INIT_COMPLETE))
9249 tg3_abort_hw(tp, 1);
9251 /* Enable MAC control of LPI */
9252 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9253 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9254 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9255 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9256 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9258 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9260 tw32_f(TG3_CPMU_EEE_CTRL,
9261 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9263 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9264 TG3_CPMU_EEEMD_LPI_IN_TX |
9265 TG3_CPMU_EEEMD_LPI_IN_RX |
9266 TG3_CPMU_EEEMD_EEE_ENABLE;
9268 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9269 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9271 if (tg3_flag(tp, ENABLE_APE))
9272 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9274 tw32_f(TG3_CPMU_EEE_MODE, val);
9276 tw32_f(TG3_CPMU_EEE_DBTMR1,
9277 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9278 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9280 tw32_f(TG3_CPMU_EEE_DBTMR2,
9281 TG3_CPMU_DBTMR2_APE_TX_2047US |
9282 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9288 err = tg3_chip_reset(tp);
9292 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9294 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9295 val = tr32(TG3_CPMU_CTRL);
9296 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9297 tw32(TG3_CPMU_CTRL, val);
9299 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9300 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9301 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9302 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9304 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9305 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9306 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9307 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9309 val = tr32(TG3_CPMU_HST_ACC);
9310 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9311 val |= CPMU_HST_ACC_MACCLK_6_25;
9312 tw32(TG3_CPMU_HST_ACC, val);
9315 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9316 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9317 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9318 PCIE_PWR_MGMT_L1_THRESH_4MS;
9319 tw32(PCIE_PWR_MGMT_THRESH, val);
9321 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9322 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9324 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9326 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9327 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9330 if (tg3_flag(tp, L1PLLPD_EN)) {
9331 u32 grc_mode = tr32(GRC_MODE);
9333 /* Access the lower 1K of PL PCIE block registers. */
9334 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9335 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9337 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9338 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9339 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9341 tw32(GRC_MODE, grc_mode);
9344 if (tg3_flag(tp, 57765_CLASS)) {
9345 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9346 u32 grc_mode = tr32(GRC_MODE);
9348 /* Access the lower 1K of PL PCIE block registers. */
9349 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9350 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9352 val = tr32(TG3_PCIE_TLDLPL_PORT +
9353 TG3_PCIE_PL_LO_PHYCTL5);
9354 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9355 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9357 tw32(GRC_MODE, grc_mode);
9360 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9363 /* Fix transmit hangs */
9364 val = tr32(TG3_CPMU_PADRNG_CTL);
9365 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9366 tw32(TG3_CPMU_PADRNG_CTL, val);
9368 grc_mode = tr32(GRC_MODE);
9370 /* Access the lower 1K of DL PCIE block registers. */
9371 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9372 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9374 val = tr32(TG3_PCIE_TLDLPL_PORT +
9375 TG3_PCIE_DL_LO_FTSMAX);
9376 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9377 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9378 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9380 tw32(GRC_MODE, grc_mode);
9383 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9384 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9385 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9386 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9389 /* This works around an issue with Athlon chipsets on
9390 * B3 tigon3 silicon. This bit has no effect on any
9391 * other revision. But do not set this on PCI Express
9392 * chips and don't even touch the clocks if the CPMU is present.
9394 if (!tg3_flag(tp, CPMU_PRESENT)) {
9395 if (!tg3_flag(tp, PCI_EXPRESS))
9396 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9397 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9400 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9401 tg3_flag(tp, PCIX_MODE)) {
9402 val = tr32(TG3PCI_PCISTATE);
9403 val |= PCISTATE_RETRY_SAME_DMA;
9404 tw32(TG3PCI_PCISTATE, val);
9407 if (tg3_flag(tp, ENABLE_APE)) {
9408 /* Allow reads and writes to the
9409 * APE register and memory space.
9411 val = tr32(TG3PCI_PCISTATE);
9412 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9413 PCISTATE_ALLOW_APE_SHMEM_WR |
9414 PCISTATE_ALLOW_APE_PSPACE_WR;
9415 tw32(TG3PCI_PCISTATE, val);
9418 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9419 /* Enable some hw fixes. */
9420 val = tr32(TG3PCI_MSI_DATA);
9421 val |= (1 << 26) | (1 << 28) | (1 << 29);
9422 tw32(TG3PCI_MSI_DATA, val);
9425 /* Descriptor ring init may make accesses to the
9426 * NIC SRAM area to setup the TX descriptors, so we
9427 * can only do this after the hardware has been
9428 * successfully reset.
9430 err = tg3_init_rings(tp);
9434 if (tg3_flag(tp, 57765_PLUS)) {
9435 val = tr32(TG3PCI_DMA_RW_CTRL) &
9436 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9437 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9438 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9439 if (!tg3_flag(tp, 57765_CLASS) &&
9440 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9441 tg3_asic_rev(tp) != ASIC_REV_5762)
9442 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9443 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9444 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9445 tg3_asic_rev(tp) != ASIC_REV_5761) {
9446 /* This value is determined during the probe time DMA
9447 * engine test, tg3_test_dma.
9449 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9452 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9453 GRC_MODE_4X_NIC_SEND_RINGS |
9454 GRC_MODE_NO_TX_PHDR_CSUM |
9455 GRC_MODE_NO_RX_PHDR_CSUM);
9456 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9458 /* Pseudo-header checksum is done by hardware logic and not
9459 * the offload processers, so make the chip do the pseudo-
9460 * header checksums on receive. For transmit it is more
9461 * convenient to do the pseudo-header checksum in software
9462 * as Linux does that on transmit for us in all cases.
9464 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9466 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9468 tw32(TG3_RX_PTP_CTL,
9469 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9471 if (tg3_flag(tp, PTP_CAPABLE))
9472 val |= GRC_MODE_TIME_SYNC_ENABLE;
9474 tw32(GRC_MODE, tp->grc_mode | val);
9476 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9477 val = tr32(GRC_MISC_CFG);
9479 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9480 tw32(GRC_MISC_CFG, val);
9482 /* Initialize MBUF/DESC pool. */
9483 if (tg3_flag(tp, 5750_PLUS)) {
9485 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9486 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9487 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9488 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9490 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9491 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9492 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9493 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9496 fw_len = tp->fw_len;
9497 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9498 tw32(BUFMGR_MB_POOL_ADDR,
9499 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9500 tw32(BUFMGR_MB_POOL_SIZE,
9501 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9504 if (tp->dev->mtu <= ETH_DATA_LEN) {
9505 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9506 tp->bufmgr_config.mbuf_read_dma_low_water);
9507 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9508 tp->bufmgr_config.mbuf_mac_rx_low_water);
9509 tw32(BUFMGR_MB_HIGH_WATER,
9510 tp->bufmgr_config.mbuf_high_water);
9512 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9513 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9514 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9515 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9516 tw32(BUFMGR_MB_HIGH_WATER,
9517 tp->bufmgr_config.mbuf_high_water_jumbo);
9519 tw32(BUFMGR_DMA_LOW_WATER,
9520 tp->bufmgr_config.dma_low_water);
9521 tw32(BUFMGR_DMA_HIGH_WATER,
9522 tp->bufmgr_config.dma_high_water);
9524 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9525 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9526 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9527 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9528 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9529 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9530 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9531 tw32(BUFMGR_MODE, val);
9532 for (i = 0; i < 2000; i++) {
9533 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9538 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9542 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9543 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9545 tg3_setup_rxbd_thresholds(tp);
9547 /* Initialize TG3_BDINFO's at:
9548 * RCVDBDI_STD_BD: standard eth size rx ring
9549 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9550 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9553 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9554 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9555 * ring attribute flags
9556 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9558 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9559 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9561 * The size of each ring is fixed in the firmware, but the location is
9564 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9565 ((u64) tpr->rx_std_mapping >> 32));
9566 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9567 ((u64) tpr->rx_std_mapping & 0xffffffff));
9568 if (!tg3_flag(tp, 5717_PLUS))
9569 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9570 NIC_SRAM_RX_BUFFER_DESC);
9572 /* Disable the mini ring */
9573 if (!tg3_flag(tp, 5705_PLUS))
9574 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9575 BDINFO_FLAGS_DISABLED);
9577 /* Program the jumbo buffer descriptor ring control
9578 * blocks on those devices that have them.
9580 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9581 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9583 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9584 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9585 ((u64) tpr->rx_jmb_mapping >> 32));
9586 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9587 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9588 val = TG3_RX_JMB_RING_SIZE(tp) <<
9589 BDINFO_FLAGS_MAXLEN_SHIFT;
9590 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9591 val | BDINFO_FLAGS_USE_EXT_RECV);
9592 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9593 tg3_flag(tp, 57765_CLASS) ||
9594 tg3_asic_rev(tp) == ASIC_REV_5762)
9595 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9596 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9598 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9599 BDINFO_FLAGS_DISABLED);
9602 if (tg3_flag(tp, 57765_PLUS)) {
9603 val = TG3_RX_STD_RING_SIZE(tp);
9604 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9605 val |= (TG3_RX_STD_DMA_SZ << 2);
9607 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9609 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9611 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9613 tpr->rx_std_prod_idx = tp->rx_pending;
9614 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9616 tpr->rx_jmb_prod_idx =
9617 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9618 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9620 tg3_rings_reset(tp);
9622 /* Initialize MAC address and backoff seed. */
9623 __tg3_set_mac_addr(tp, 0);
9625 /* MTU + ethernet header + FCS + optional VLAN tag */
9626 tw32(MAC_RX_MTU_SIZE,
9627 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9629 /* The slot time is changed by tg3_setup_phy if we
9630 * run at gigabit with half duplex.
9632 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9633 (6 << TX_LENGTHS_IPG_SHIFT) |
9634 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9636 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9637 tg3_asic_rev(tp) == ASIC_REV_5762)
9638 val |= tr32(MAC_TX_LENGTHS) &
9639 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9640 TX_LENGTHS_CNT_DWN_VAL_MSK);
9642 tw32(MAC_TX_LENGTHS, val);
9644 /* Receive rules. */
9645 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9646 tw32(RCVLPC_CONFIG, 0x0181);
9648 /* Calculate RDMAC_MODE setting early, we need it to determine
9649 * the RCVLPC_STATE_ENABLE mask.
9651 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9652 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9653 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9654 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9655 RDMAC_MODE_LNGREAD_ENAB);
9657 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9658 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9660 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9661 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9662 tg3_asic_rev(tp) == ASIC_REV_57780)
9663 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9664 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9665 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9667 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9668 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9669 if (tg3_flag(tp, TSO_CAPABLE) &&
9670 tg3_asic_rev(tp) == ASIC_REV_5705) {
9671 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9672 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9673 !tg3_flag(tp, IS_5788)) {
9674 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9678 if (tg3_flag(tp, PCI_EXPRESS))
9679 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9681 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9683 if (tp->dev->mtu <= ETH_DATA_LEN) {
9684 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9685 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9689 if (tg3_flag(tp, HW_TSO_1) ||
9690 tg3_flag(tp, HW_TSO_2) ||
9691 tg3_flag(tp, HW_TSO_3))
9692 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9694 if (tg3_flag(tp, 57765_PLUS) ||
9695 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9696 tg3_asic_rev(tp) == ASIC_REV_57780)
9697 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9699 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9700 tg3_asic_rev(tp) == ASIC_REV_5762)
9701 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9703 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9704 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9705 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9706 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9707 tg3_flag(tp, 57765_PLUS)) {
9710 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9711 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9713 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9716 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9717 tg3_asic_rev(tp) == ASIC_REV_5762) {
9718 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9719 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9720 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9721 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9722 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9723 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9725 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9728 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9729 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9730 tg3_asic_rev(tp) == ASIC_REV_5762) {
9733 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9734 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9736 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9740 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9741 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9744 /* Receive/send statistics. */
9745 if (tg3_flag(tp, 5750_PLUS)) {
9746 val = tr32(RCVLPC_STATS_ENABLE);
9747 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9748 tw32(RCVLPC_STATS_ENABLE, val);
9749 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9750 tg3_flag(tp, TSO_CAPABLE)) {
9751 val = tr32(RCVLPC_STATS_ENABLE);
9752 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9753 tw32(RCVLPC_STATS_ENABLE, val);
9755 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9757 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9758 tw32(SNDDATAI_STATSENAB, 0xffffff);
9759 tw32(SNDDATAI_STATSCTRL,
9760 (SNDDATAI_SCTRL_ENABLE |
9761 SNDDATAI_SCTRL_FASTUPD));
9763 /* Setup host coalescing engine. */
9764 tw32(HOSTCC_MODE, 0);
9765 for (i = 0; i < 2000; i++) {
9766 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9771 __tg3_set_coalesce(tp, &tp->coal);
9773 if (!tg3_flag(tp, 5705_PLUS)) {
9774 /* Status/statistics block address. See tg3_timer,
9775 * the tg3_periodic_fetch_stats call there, and
9776 * tg3_get_stats to see how this works for 5705/5750 chips.
9778 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9779 ((u64) tp->stats_mapping >> 32));
9780 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9781 ((u64) tp->stats_mapping & 0xffffffff));
9782 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9784 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9786 /* Clear statistics and status block memory areas */
9787 for (i = NIC_SRAM_STATS_BLK;
9788 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9790 tg3_write_mem(tp, i, 0);
9795 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9797 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9798 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9799 if (!tg3_flag(tp, 5705_PLUS))
9800 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9802 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9803 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9804 /* reset to prevent losing 1st rx packet intermittently */
9805 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9809 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9810 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9811 MAC_MODE_FHDE_ENABLE;
9812 if (tg3_flag(tp, ENABLE_APE))
9813 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9814 if (!tg3_flag(tp, 5705_PLUS) &&
9815 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9816 tg3_asic_rev(tp) != ASIC_REV_5700)
9817 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9818 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9821 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9822 * If TG3_FLAG_IS_NIC is zero, we should read the
9823 * register to preserve the GPIO settings for LOMs. The GPIOs,
9824 * whether used as inputs or outputs, are set by boot code after
9827 if (!tg3_flag(tp, IS_NIC)) {
9830 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9831 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9832 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9834 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9835 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9836 GRC_LCLCTRL_GPIO_OUTPUT3;
9838 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9839 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9841 tp->grc_local_ctrl &= ~gpio_mask;
9842 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9844 /* GPIO1 must be driven high for eeprom write protect */
9845 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9846 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9847 GRC_LCLCTRL_GPIO_OUTPUT1);
9849 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9852 if (tg3_flag(tp, USING_MSIX)) {
9853 val = tr32(MSGINT_MODE);
9854 val |= MSGINT_MODE_ENABLE;
9855 if (tp->irq_cnt > 1)
9856 val |= MSGINT_MODE_MULTIVEC_EN;
9857 if (!tg3_flag(tp, 1SHOT_MSI))
9858 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9859 tw32(MSGINT_MODE, val);
9862 if (!tg3_flag(tp, 5705_PLUS)) {
9863 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9867 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9868 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9869 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9870 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9871 WDMAC_MODE_LNGREAD_ENAB);
9873 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9874 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9875 if (tg3_flag(tp, TSO_CAPABLE) &&
9876 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9877 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9879 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9880 !tg3_flag(tp, IS_5788)) {
9881 val |= WDMAC_MODE_RX_ACCEL;
9885 /* Enable host coalescing bug fix */
9886 if (tg3_flag(tp, 5755_PLUS))
9887 val |= WDMAC_MODE_STATUS_TAG_FIX;
9889 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9890 val |= WDMAC_MODE_BURST_ALL_DATA;
9892 tw32_f(WDMAC_MODE, val);
9895 if (tg3_flag(tp, PCIX_MODE)) {
9898 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9900 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9901 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9902 pcix_cmd |= PCI_X_CMD_READ_2K;
9903 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9904 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9905 pcix_cmd |= PCI_X_CMD_READ_2K;
9907 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9911 tw32_f(RDMAC_MODE, rdmac_mode);
9914 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9915 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9916 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9919 if (i < TG3_NUM_RDMA_CHANNELS) {
9920 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9921 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9922 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9923 tg3_flag_set(tp, 5719_RDMA_BUG);
9927 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9928 if (!tg3_flag(tp, 5705_PLUS))
9929 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9931 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9933 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9935 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9937 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9938 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9939 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9940 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9941 val |= RCVDBDI_MODE_LRG_RING_SZ;
9942 tw32(RCVDBDI_MODE, val);
9943 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9944 if (tg3_flag(tp, HW_TSO_1) ||
9945 tg3_flag(tp, HW_TSO_2) ||
9946 tg3_flag(tp, HW_TSO_3))
9947 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9948 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9949 if (tg3_flag(tp, ENABLE_TSS))
9950 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9951 tw32(SNDBDI_MODE, val);
9952 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9954 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9955 err = tg3_load_5701_a0_firmware_fix(tp);
9960 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9961 /* Ignore any errors for the firmware download. If download
9962 * fails, the device will operate with EEE disabled
9964 tg3_load_57766_firmware(tp);
9967 if (tg3_flag(tp, TSO_CAPABLE)) {
9968 err = tg3_load_tso_firmware(tp);
9973 tp->tx_mode = TX_MODE_ENABLE;
9975 if (tg3_flag(tp, 5755_PLUS) ||
9976 tg3_asic_rev(tp) == ASIC_REV_5906)
9977 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9979 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9980 tg3_asic_rev(tp) == ASIC_REV_5762) {
9981 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9982 tp->tx_mode &= ~val;
9983 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9986 tw32_f(MAC_TX_MODE, tp->tx_mode);
9989 if (tg3_flag(tp, ENABLE_RSS)) {
9990 tg3_rss_write_indir_tbl(tp);
9992 /* Setup the "secret" hash key. */
9993 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9994 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9995 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9996 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9997 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9998 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9999 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10000 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10001 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10002 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10005 tp->rx_mode = RX_MODE_ENABLE;
10006 if (tg3_flag(tp, 5755_PLUS))
10007 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10009 if (tg3_flag(tp, ENABLE_RSS))
10010 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10011 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10012 RX_MODE_RSS_IPV6_HASH_EN |
10013 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10014 RX_MODE_RSS_IPV4_HASH_EN |
10015 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10017 tw32_f(MAC_RX_MODE, tp->rx_mode);
10020 tw32(MAC_LED_CTRL, tp->led_ctrl);
10022 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10023 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10024 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10027 tw32_f(MAC_RX_MODE, tp->rx_mode);
10030 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10031 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10032 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10033 /* Set drive transmission level to 1.2V */
10034 /* only if the signal pre-emphasis bit is not set */
10035 val = tr32(MAC_SERDES_CFG);
10038 tw32(MAC_SERDES_CFG, val);
10040 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10041 tw32(MAC_SERDES_CFG, 0x616000);
10044 /* Prevent chip from dropping frames when flow control
10047 if (tg3_flag(tp, 57765_CLASS))
10051 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10053 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10054 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10055 /* Use hardware link auto-negotiation */
10056 tg3_flag_set(tp, HW_AUTONEG);
10059 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10060 tg3_asic_rev(tp) == ASIC_REV_5714) {
10063 tmp = tr32(SERDES_RX_CTRL);
10064 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10065 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10066 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10067 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10070 if (!tg3_flag(tp, USE_PHYLIB)) {
10071 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10072 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10074 err = tg3_setup_phy(tp, 0);
10078 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10079 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10082 /* Clear CRC stats. */
10083 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10084 tg3_writephy(tp, MII_TG3_TEST1,
10085 tmp | MII_TG3_TEST1_CRC_EN);
10086 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10091 __tg3_set_rx_mode(tp->dev);
10093 /* Initialize receive rules. */
10094 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10095 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10096 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10097 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10099 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10103 if (tg3_flag(tp, ENABLE_ASF))
10107 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10109 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10111 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10113 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10115 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10117 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10119 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10121 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10123 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10125 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10127 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10129 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10131 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10133 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10141 if (tg3_flag(tp, ENABLE_APE))
10142 /* Write our heartbeat update interval to APE. */
10143 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10144 APE_HOST_HEARTBEAT_INT_DISABLE);
10146 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10151 /* Called at device open time to get the chip ready for
10152 * packet processing. Invoked with tp->lock held.
10154 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10156 tg3_switch_clocks(tp);
10158 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10160 return tg3_reset_hw(tp, reset_phy);
10163 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10167 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10168 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10170 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10173 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10174 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10175 memset(ocir, 0, TG3_OCIR_LEN);
10179 /* sysfs attributes for hwmon */
10180 static ssize_t tg3_show_temp(struct device *dev,
10181 struct device_attribute *devattr, char *buf)
10183 struct pci_dev *pdev = to_pci_dev(dev);
10184 struct net_device *netdev = pci_get_drvdata(pdev);
10185 struct tg3 *tp = netdev_priv(netdev);
10186 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10189 spin_lock_bh(&tp->lock);
10190 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10191 sizeof(temperature));
10192 spin_unlock_bh(&tp->lock);
10193 return sprintf(buf, "%u\n", temperature);
10197 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10198 TG3_TEMP_SENSOR_OFFSET);
10199 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10200 TG3_TEMP_CAUTION_OFFSET);
10201 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10202 TG3_TEMP_MAX_OFFSET);
10204 static struct attribute *tg3_attributes[] = {
10205 &sensor_dev_attr_temp1_input.dev_attr.attr,
10206 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10207 &sensor_dev_attr_temp1_max.dev_attr.attr,
10211 static const struct attribute_group tg3_group = {
10212 .attrs = tg3_attributes,
10215 static void tg3_hwmon_close(struct tg3 *tp)
10217 if (tp->hwmon_dev) {
10218 hwmon_device_unregister(tp->hwmon_dev);
10219 tp->hwmon_dev = NULL;
10220 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10224 static void tg3_hwmon_open(struct tg3 *tp)
10228 struct pci_dev *pdev = tp->pdev;
10229 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10231 tg3_sd_scan_scratchpad(tp, ocirs);
10233 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10234 if (!ocirs[i].src_data_length)
10237 size += ocirs[i].src_hdr_length;
10238 size += ocirs[i].src_data_length;
10244 /* Register hwmon sysfs hooks */
10245 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10247 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10251 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10252 if (IS_ERR(tp->hwmon_dev)) {
10253 tp->hwmon_dev = NULL;
10254 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10255 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10260 #define TG3_STAT_ADD32(PSTAT, REG) \
10261 do { u32 __val = tr32(REG); \
10262 (PSTAT)->low += __val; \
10263 if ((PSTAT)->low < __val) \
10264 (PSTAT)->high += 1; \
10267 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10269 struct tg3_hw_stats *sp = tp->hw_stats;
10274 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10275 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10276 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10277 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10278 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10279 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10280 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10281 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10282 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10283 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10284 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10285 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10286 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10287 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10288 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10289 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10292 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10293 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10294 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10295 tg3_flag_clear(tp, 5719_RDMA_BUG);
10298 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10299 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10300 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10301 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10302 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10303 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10304 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10305 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10306 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10307 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10308 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10309 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10310 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10311 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10313 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10314 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10315 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10316 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10317 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10319 u32 val = tr32(HOSTCC_FLOW_ATTN);
10320 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10322 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10323 sp->rx_discards.low += val;
10324 if (sp->rx_discards.low < val)
10325 sp->rx_discards.high += 1;
10327 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10329 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10332 static void tg3_chk_missed_msi(struct tg3 *tp)
10336 for (i = 0; i < tp->irq_cnt; i++) {
10337 struct tg3_napi *tnapi = &tp->napi[i];
10339 if (tg3_has_work(tnapi)) {
10340 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10341 tnapi->last_tx_cons == tnapi->tx_cons) {
10342 if (tnapi->chk_msi_cnt < 1) {
10343 tnapi->chk_msi_cnt++;
10349 tnapi->chk_msi_cnt = 0;
10350 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10351 tnapi->last_tx_cons = tnapi->tx_cons;
10355 static void tg3_timer(unsigned long __opaque)
10357 struct tg3 *tp = (struct tg3 *) __opaque;
10359 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10360 goto restart_timer;
10362 spin_lock(&tp->lock);
10364 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10365 tg3_flag(tp, 57765_CLASS))
10366 tg3_chk_missed_msi(tp);
10368 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10369 /* BCM4785: Flush posted writes from GbE to host memory. */
10373 if (!tg3_flag(tp, TAGGED_STATUS)) {
10374 /* All of this garbage is because when using non-tagged
10375 * IRQ status the mailbox/status_block protocol the chip
10376 * uses with the cpu is race prone.
10378 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10379 tw32(GRC_LOCAL_CTRL,
10380 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10382 tw32(HOSTCC_MODE, tp->coalesce_mode |
10383 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10386 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10387 spin_unlock(&tp->lock);
10388 tg3_reset_task_schedule(tp);
10389 goto restart_timer;
10393 /* This part only runs once per second. */
10394 if (!--tp->timer_counter) {
10395 if (tg3_flag(tp, 5705_PLUS))
10396 tg3_periodic_fetch_stats(tp);
10398 if (tp->setlpicnt && !--tp->setlpicnt)
10399 tg3_phy_eee_enable(tp);
10401 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10405 mac_stat = tr32(MAC_STATUS);
10408 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10409 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10411 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10415 tg3_setup_phy(tp, 0);
10416 } else if (tg3_flag(tp, POLL_SERDES)) {
10417 u32 mac_stat = tr32(MAC_STATUS);
10418 int need_setup = 0;
10421 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10424 if (!tp->link_up &&
10425 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10426 MAC_STATUS_SIGNAL_DET))) {
10430 if (!tp->serdes_counter) {
10433 ~MAC_MODE_PORT_MODE_MASK));
10435 tw32_f(MAC_MODE, tp->mac_mode);
10438 tg3_setup_phy(tp, 0);
10440 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10441 tg3_flag(tp, 5780_CLASS)) {
10442 tg3_serdes_parallel_detect(tp);
10445 tp->timer_counter = tp->timer_multiplier;
10448 /* Heartbeat is only sent once every 2 seconds.
10450 * The heartbeat is to tell the ASF firmware that the host
10451 * driver is still alive. In the event that the OS crashes,
10452 * ASF needs to reset the hardware to free up the FIFO space
10453 * that may be filled with rx packets destined for the host.
10454 * If the FIFO is full, ASF will no longer function properly.
10456 * Unintended resets have been reported on real time kernels
10457 * where the timer doesn't run on time. Netpoll will also have
10460 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10461 * to check the ring condition when the heartbeat is expiring
10462 * before doing the reset. This will prevent most unintended
10465 if (!--tp->asf_counter) {
10466 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10467 tg3_wait_for_event_ack(tp);
10469 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10470 FWCMD_NICDRV_ALIVE3);
10471 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10472 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10473 TG3_FW_UPDATE_TIMEOUT_SEC);
10475 tg3_generate_fw_event(tp);
10477 tp->asf_counter = tp->asf_multiplier;
10480 spin_unlock(&tp->lock);
10483 tp->timer.expires = jiffies + tp->timer_offset;
10484 add_timer(&tp->timer);
10487 static void tg3_timer_init(struct tg3 *tp)
10489 if (tg3_flag(tp, TAGGED_STATUS) &&
10490 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10491 !tg3_flag(tp, 57765_CLASS))
10492 tp->timer_offset = HZ;
10494 tp->timer_offset = HZ / 10;
10496 BUG_ON(tp->timer_offset > HZ);
10498 tp->timer_multiplier = (HZ / tp->timer_offset);
10499 tp->asf_multiplier = (HZ / tp->timer_offset) *
10500 TG3_FW_UPDATE_FREQ_SEC;
10502 init_timer(&tp->timer);
10503 tp->timer.data = (unsigned long) tp;
10504 tp->timer.function = tg3_timer;
10507 static void tg3_timer_start(struct tg3 *tp)
10509 tp->asf_counter = tp->asf_multiplier;
10510 tp->timer_counter = tp->timer_multiplier;
10512 tp->timer.expires = jiffies + tp->timer_offset;
10513 add_timer(&tp->timer);
10516 static void tg3_timer_stop(struct tg3 *tp)
10518 del_timer_sync(&tp->timer);
10521 /* Restart hardware after configuration changes, self-test, etc.
10522 * Invoked with tp->lock held.
10524 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10525 __releases(tp->lock)
10526 __acquires(tp->lock)
10530 err = tg3_init_hw(tp, reset_phy);
10532 netdev_err(tp->dev,
10533 "Failed to re-initialize device, aborting\n");
10534 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10535 tg3_full_unlock(tp);
10536 tg3_timer_stop(tp);
10538 tg3_napi_enable(tp);
10539 dev_close(tp->dev);
10540 tg3_full_lock(tp, 0);
10545 static void tg3_reset_task(struct work_struct *work)
10547 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10550 tg3_full_lock(tp, 0);
10552 if (!netif_running(tp->dev)) {
10553 tg3_flag_clear(tp, RESET_TASK_PENDING);
10554 tg3_full_unlock(tp);
10558 tg3_full_unlock(tp);
10562 tg3_netif_stop(tp);
10564 tg3_full_lock(tp, 1);
10566 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10567 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10568 tp->write32_rx_mbox = tg3_write_flush_reg32;
10569 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10570 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10573 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10574 err = tg3_init_hw(tp, 1);
10578 tg3_netif_start(tp);
10581 tg3_full_unlock(tp);
10586 tg3_flag_clear(tp, RESET_TASK_PENDING);
10589 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10592 unsigned long flags;
10594 struct tg3_napi *tnapi = &tp->napi[irq_num];
10596 if (tp->irq_cnt == 1)
10597 name = tp->dev->name;
10599 name = &tnapi->irq_lbl[0];
10600 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10601 name[IFNAMSIZ-1] = 0;
10604 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10606 if (tg3_flag(tp, 1SHOT_MSI))
10607 fn = tg3_msi_1shot;
10610 fn = tg3_interrupt;
10611 if (tg3_flag(tp, TAGGED_STATUS))
10612 fn = tg3_interrupt_tagged;
10613 flags = IRQF_SHARED;
10616 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10619 static int tg3_test_interrupt(struct tg3 *tp)
10621 struct tg3_napi *tnapi = &tp->napi[0];
10622 struct net_device *dev = tp->dev;
10623 int err, i, intr_ok = 0;
10626 if (!netif_running(dev))
10629 tg3_disable_ints(tp);
10631 free_irq(tnapi->irq_vec, tnapi);
10634 * Turn off MSI one shot mode. Otherwise this test has no
10635 * observable way to know whether the interrupt was delivered.
10637 if (tg3_flag(tp, 57765_PLUS)) {
10638 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10639 tw32(MSGINT_MODE, val);
10642 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10643 IRQF_SHARED, dev->name, tnapi);
10647 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10648 tg3_enable_ints(tp);
10650 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10653 for (i = 0; i < 5; i++) {
10654 u32 int_mbox, misc_host_ctrl;
10656 int_mbox = tr32_mailbox(tnapi->int_mbox);
10657 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10659 if ((int_mbox != 0) ||
10660 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10665 if (tg3_flag(tp, 57765_PLUS) &&
10666 tnapi->hw_status->status_tag != tnapi->last_tag)
10667 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10672 tg3_disable_ints(tp);
10674 free_irq(tnapi->irq_vec, tnapi);
10676 err = tg3_request_irq(tp, 0);
10682 /* Reenable MSI one shot mode. */
10683 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10684 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10685 tw32(MSGINT_MODE, val);
10693 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10694 * successfully restored
10696 static int tg3_test_msi(struct tg3 *tp)
10701 if (!tg3_flag(tp, USING_MSI))
10704 /* Turn off SERR reporting in case MSI terminates with Master
10707 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10708 pci_write_config_word(tp->pdev, PCI_COMMAND,
10709 pci_cmd & ~PCI_COMMAND_SERR);
10711 err = tg3_test_interrupt(tp);
10713 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10718 /* other failures */
10722 /* MSI test failed, go back to INTx mode */
10723 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10724 "to INTx mode. Please report this failure to the PCI "
10725 "maintainer and include system chipset information\n");
10727 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10729 pci_disable_msi(tp->pdev);
10731 tg3_flag_clear(tp, USING_MSI);
10732 tp->napi[0].irq_vec = tp->pdev->irq;
10734 err = tg3_request_irq(tp, 0);
10738 /* Need to reset the chip because the MSI cycle may have terminated
10739 * with Master Abort.
10741 tg3_full_lock(tp, 1);
10743 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10744 err = tg3_init_hw(tp, 1);
10746 tg3_full_unlock(tp);
10749 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10754 static int tg3_request_firmware(struct tg3 *tp)
10756 const struct tg3_firmware_hdr *fw_hdr;
10758 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10759 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10764 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10766 /* Firmware blob starts with version numbers, followed by
10767 * start address and _full_ length including BSS sections
10768 * (which must be longer than the actual data, of course
10771 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10772 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10773 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10774 tp->fw_len, tp->fw_needed);
10775 release_firmware(tp->fw);
10780 /* We no longer need firmware; we have it. */
10781 tp->fw_needed = NULL;
10785 static u32 tg3_irq_count(struct tg3 *tp)
10787 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10790 /* We want as many rx rings enabled as there are cpus.
10791 * In multiqueue MSI-X mode, the first MSI-X vector
10792 * only deals with link interrupts, etc, so we add
10793 * one to the number of vectors we are requesting.
10795 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10801 static bool tg3_enable_msix(struct tg3 *tp)
10804 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10806 tp->txq_cnt = tp->txq_req;
10807 tp->rxq_cnt = tp->rxq_req;
10809 tp->rxq_cnt = netif_get_num_default_rss_queues();
10810 if (tp->rxq_cnt > tp->rxq_max)
10811 tp->rxq_cnt = tp->rxq_max;
10813 /* Disable multiple TX rings by default. Simple round-robin hardware
10814 * scheduling of the TX rings can cause starvation of rings with
10815 * small packets when other rings have TSO or jumbo packets.
10820 tp->irq_cnt = tg3_irq_count(tp);
10822 for (i = 0; i < tp->irq_max; i++) {
10823 msix_ent[i].entry = i;
10824 msix_ent[i].vector = 0;
10827 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10830 } else if (rc != 0) {
10831 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10833 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10836 tp->rxq_cnt = max(rc - 1, 1);
10838 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10841 for (i = 0; i < tp->irq_max; i++)
10842 tp->napi[i].irq_vec = msix_ent[i].vector;
10844 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10845 pci_disable_msix(tp->pdev);
10849 if (tp->irq_cnt == 1)
10852 tg3_flag_set(tp, ENABLE_RSS);
10854 if (tp->txq_cnt > 1)
10855 tg3_flag_set(tp, ENABLE_TSS);
10857 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10862 static void tg3_ints_init(struct tg3 *tp)
10864 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10865 !tg3_flag(tp, TAGGED_STATUS)) {
10866 /* All MSI supporting chips should support tagged
10867 * status. Assert that this is the case.
10869 netdev_warn(tp->dev,
10870 "MSI without TAGGED_STATUS? Not using MSI\n");
10874 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10875 tg3_flag_set(tp, USING_MSIX);
10876 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10877 tg3_flag_set(tp, USING_MSI);
10879 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10880 u32 msi_mode = tr32(MSGINT_MODE);
10881 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10882 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10883 if (!tg3_flag(tp, 1SHOT_MSI))
10884 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10885 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10888 if (!tg3_flag(tp, USING_MSIX)) {
10890 tp->napi[0].irq_vec = tp->pdev->irq;
10893 if (tp->irq_cnt == 1) {
10896 netif_set_real_num_tx_queues(tp->dev, 1);
10897 netif_set_real_num_rx_queues(tp->dev, 1);
10901 static void tg3_ints_fini(struct tg3 *tp)
10903 if (tg3_flag(tp, USING_MSIX))
10904 pci_disable_msix(tp->pdev);
10905 else if (tg3_flag(tp, USING_MSI))
10906 pci_disable_msi(tp->pdev);
10907 tg3_flag_clear(tp, USING_MSI);
10908 tg3_flag_clear(tp, USING_MSIX);
10909 tg3_flag_clear(tp, ENABLE_RSS);
10910 tg3_flag_clear(tp, ENABLE_TSS);
10913 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10916 struct net_device *dev = tp->dev;
10920 * Setup interrupts first so we know how
10921 * many NAPI resources to allocate
10925 tg3_rss_check_indir_tbl(tp);
10927 /* The placement of this call is tied
10928 * to the setup and use of Host TX descriptors.
10930 err = tg3_alloc_consistent(tp);
10936 tg3_napi_enable(tp);
10938 for (i = 0; i < tp->irq_cnt; i++) {
10939 struct tg3_napi *tnapi = &tp->napi[i];
10940 err = tg3_request_irq(tp, i);
10942 for (i--; i >= 0; i--) {
10943 tnapi = &tp->napi[i];
10944 free_irq(tnapi->irq_vec, tnapi);
10950 tg3_full_lock(tp, 0);
10952 err = tg3_init_hw(tp, reset_phy);
10954 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10955 tg3_free_rings(tp);
10958 tg3_full_unlock(tp);
10963 if (test_irq && tg3_flag(tp, USING_MSI)) {
10964 err = tg3_test_msi(tp);
10967 tg3_full_lock(tp, 0);
10968 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10969 tg3_free_rings(tp);
10970 tg3_full_unlock(tp);
10975 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10976 u32 val = tr32(PCIE_TRANSACTION_CFG);
10978 tw32(PCIE_TRANSACTION_CFG,
10979 val | PCIE_TRANS_CFG_1SHOT_MSI);
10985 tg3_hwmon_open(tp);
10987 tg3_full_lock(tp, 0);
10989 tg3_timer_start(tp);
10990 tg3_flag_set(tp, INIT_COMPLETE);
10991 tg3_enable_ints(tp);
10996 tg3_ptp_resume(tp);
10999 tg3_full_unlock(tp);
11001 netif_tx_start_all_queues(dev);
11004 * Reset loopback feature if it was turned on while the device was down
11005 * make sure that it's installed properly now.
11007 if (dev->features & NETIF_F_LOOPBACK)
11008 tg3_set_loopback(dev, dev->features);
11013 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11014 struct tg3_napi *tnapi = &tp->napi[i];
11015 free_irq(tnapi->irq_vec, tnapi);
11019 tg3_napi_disable(tp);
11021 tg3_free_consistent(tp);
11029 static void tg3_stop(struct tg3 *tp)
11033 tg3_reset_task_cancel(tp);
11034 tg3_netif_stop(tp);
11036 tg3_timer_stop(tp);
11038 tg3_hwmon_close(tp);
11042 tg3_full_lock(tp, 1);
11044 tg3_disable_ints(tp);
11046 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11047 tg3_free_rings(tp);
11048 tg3_flag_clear(tp, INIT_COMPLETE);
11050 tg3_full_unlock(tp);
11052 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11053 struct tg3_napi *tnapi = &tp->napi[i];
11054 free_irq(tnapi->irq_vec, tnapi);
11061 tg3_free_consistent(tp);
11064 static int tg3_open(struct net_device *dev)
11066 struct tg3 *tp = netdev_priv(dev);
11069 if (tp->fw_needed) {
11070 err = tg3_request_firmware(tp);
11071 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11073 netdev_warn(tp->dev, "EEE capability disabled\n");
11074 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11075 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11076 netdev_warn(tp->dev, "EEE capability restored\n");
11077 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11079 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11083 netdev_warn(tp->dev, "TSO capability disabled\n");
11084 tg3_flag_clear(tp, TSO_CAPABLE);
11085 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11086 netdev_notice(tp->dev, "TSO capability restored\n");
11087 tg3_flag_set(tp, TSO_CAPABLE);
11091 tg3_carrier_off(tp);
11093 err = tg3_power_up(tp);
11097 tg3_full_lock(tp, 0);
11099 tg3_disable_ints(tp);
11100 tg3_flag_clear(tp, INIT_COMPLETE);
11102 tg3_full_unlock(tp);
11104 err = tg3_start(tp, true, true, true);
11106 tg3_frob_aux_power(tp, false);
11107 pci_set_power_state(tp->pdev, PCI_D3hot);
11110 if (tg3_flag(tp, PTP_CAPABLE)) {
11111 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11113 if (IS_ERR(tp->ptp_clock))
11114 tp->ptp_clock = NULL;
11120 static int tg3_close(struct net_device *dev)
11122 struct tg3 *tp = netdev_priv(dev);
11128 /* Clear stats across close / open calls */
11129 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11130 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11132 tg3_power_down(tp);
11134 tg3_carrier_off(tp);
11139 static inline u64 get_stat64(tg3_stat64_t *val)
11141 return ((u64)val->high << 32) | ((u64)val->low);
11144 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11146 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11148 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11149 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11150 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11153 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11154 tg3_writephy(tp, MII_TG3_TEST1,
11155 val | MII_TG3_TEST1_CRC_EN);
11156 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11160 tp->phy_crc_errors += val;
11162 return tp->phy_crc_errors;
11165 return get_stat64(&hw_stats->rx_fcs_errors);
11168 #define ESTAT_ADD(member) \
11169 estats->member = old_estats->member + \
11170 get_stat64(&hw_stats->member)
11172 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11174 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11175 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11177 ESTAT_ADD(rx_octets);
11178 ESTAT_ADD(rx_fragments);
11179 ESTAT_ADD(rx_ucast_packets);
11180 ESTAT_ADD(rx_mcast_packets);
11181 ESTAT_ADD(rx_bcast_packets);
11182 ESTAT_ADD(rx_fcs_errors);
11183 ESTAT_ADD(rx_align_errors);
11184 ESTAT_ADD(rx_xon_pause_rcvd);
11185 ESTAT_ADD(rx_xoff_pause_rcvd);
11186 ESTAT_ADD(rx_mac_ctrl_rcvd);
11187 ESTAT_ADD(rx_xoff_entered);
11188 ESTAT_ADD(rx_frame_too_long_errors);
11189 ESTAT_ADD(rx_jabbers);
11190 ESTAT_ADD(rx_undersize_packets);
11191 ESTAT_ADD(rx_in_length_errors);
11192 ESTAT_ADD(rx_out_length_errors);
11193 ESTAT_ADD(rx_64_or_less_octet_packets);
11194 ESTAT_ADD(rx_65_to_127_octet_packets);
11195 ESTAT_ADD(rx_128_to_255_octet_packets);
11196 ESTAT_ADD(rx_256_to_511_octet_packets);
11197 ESTAT_ADD(rx_512_to_1023_octet_packets);
11198 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11199 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11200 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11201 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11202 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11204 ESTAT_ADD(tx_octets);
11205 ESTAT_ADD(tx_collisions);
11206 ESTAT_ADD(tx_xon_sent);
11207 ESTAT_ADD(tx_xoff_sent);
11208 ESTAT_ADD(tx_flow_control);
11209 ESTAT_ADD(tx_mac_errors);
11210 ESTAT_ADD(tx_single_collisions);
11211 ESTAT_ADD(tx_mult_collisions);
11212 ESTAT_ADD(tx_deferred);
11213 ESTAT_ADD(tx_excessive_collisions);
11214 ESTAT_ADD(tx_late_collisions);
11215 ESTAT_ADD(tx_collide_2times);
11216 ESTAT_ADD(tx_collide_3times);
11217 ESTAT_ADD(tx_collide_4times);
11218 ESTAT_ADD(tx_collide_5times);
11219 ESTAT_ADD(tx_collide_6times);
11220 ESTAT_ADD(tx_collide_7times);
11221 ESTAT_ADD(tx_collide_8times);
11222 ESTAT_ADD(tx_collide_9times);
11223 ESTAT_ADD(tx_collide_10times);
11224 ESTAT_ADD(tx_collide_11times);
11225 ESTAT_ADD(tx_collide_12times);
11226 ESTAT_ADD(tx_collide_13times);
11227 ESTAT_ADD(tx_collide_14times);
11228 ESTAT_ADD(tx_collide_15times);
11229 ESTAT_ADD(tx_ucast_packets);
11230 ESTAT_ADD(tx_mcast_packets);
11231 ESTAT_ADD(tx_bcast_packets);
11232 ESTAT_ADD(tx_carrier_sense_errors);
11233 ESTAT_ADD(tx_discards);
11234 ESTAT_ADD(tx_errors);
11236 ESTAT_ADD(dma_writeq_full);
11237 ESTAT_ADD(dma_write_prioq_full);
11238 ESTAT_ADD(rxbds_empty);
11239 ESTAT_ADD(rx_discards);
11240 ESTAT_ADD(rx_errors);
11241 ESTAT_ADD(rx_threshold_hit);
11243 ESTAT_ADD(dma_readq_full);
11244 ESTAT_ADD(dma_read_prioq_full);
11245 ESTAT_ADD(tx_comp_queue_full);
11247 ESTAT_ADD(ring_set_send_prod_index);
11248 ESTAT_ADD(ring_status_update);
11249 ESTAT_ADD(nic_irqs);
11250 ESTAT_ADD(nic_avoided_irqs);
11251 ESTAT_ADD(nic_tx_threshold_hit);
11253 ESTAT_ADD(mbuf_lwm_thresh_hit);
11256 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11258 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11259 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11261 stats->rx_packets = old_stats->rx_packets +
11262 get_stat64(&hw_stats->rx_ucast_packets) +
11263 get_stat64(&hw_stats->rx_mcast_packets) +
11264 get_stat64(&hw_stats->rx_bcast_packets);
11266 stats->tx_packets = old_stats->tx_packets +
11267 get_stat64(&hw_stats->tx_ucast_packets) +
11268 get_stat64(&hw_stats->tx_mcast_packets) +
11269 get_stat64(&hw_stats->tx_bcast_packets);
11271 stats->rx_bytes = old_stats->rx_bytes +
11272 get_stat64(&hw_stats->rx_octets);
11273 stats->tx_bytes = old_stats->tx_bytes +
11274 get_stat64(&hw_stats->tx_octets);
11276 stats->rx_errors = old_stats->rx_errors +
11277 get_stat64(&hw_stats->rx_errors);
11278 stats->tx_errors = old_stats->tx_errors +
11279 get_stat64(&hw_stats->tx_errors) +
11280 get_stat64(&hw_stats->tx_mac_errors) +
11281 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11282 get_stat64(&hw_stats->tx_discards);
11284 stats->multicast = old_stats->multicast +
11285 get_stat64(&hw_stats->rx_mcast_packets);
11286 stats->collisions = old_stats->collisions +
11287 get_stat64(&hw_stats->tx_collisions);
11289 stats->rx_length_errors = old_stats->rx_length_errors +
11290 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11291 get_stat64(&hw_stats->rx_undersize_packets);
11293 stats->rx_over_errors = old_stats->rx_over_errors +
11294 get_stat64(&hw_stats->rxbds_empty);
11295 stats->rx_frame_errors = old_stats->rx_frame_errors +
11296 get_stat64(&hw_stats->rx_align_errors);
11297 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11298 get_stat64(&hw_stats->tx_discards);
11299 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11300 get_stat64(&hw_stats->tx_carrier_sense_errors);
11302 stats->rx_crc_errors = old_stats->rx_crc_errors +
11303 tg3_calc_crc_errors(tp);
11305 stats->rx_missed_errors = old_stats->rx_missed_errors +
11306 get_stat64(&hw_stats->rx_discards);
11308 stats->rx_dropped = tp->rx_dropped;
11309 stats->tx_dropped = tp->tx_dropped;
11312 static int tg3_get_regs_len(struct net_device *dev)
11314 return TG3_REG_BLK_SIZE;
11317 static void tg3_get_regs(struct net_device *dev,
11318 struct ethtool_regs *regs, void *_p)
11320 struct tg3 *tp = netdev_priv(dev);
11324 memset(_p, 0, TG3_REG_BLK_SIZE);
11326 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11329 tg3_full_lock(tp, 0);
11331 tg3_dump_legacy_regs(tp, (u32 *)_p);
11333 tg3_full_unlock(tp);
11336 static int tg3_get_eeprom_len(struct net_device *dev)
11338 struct tg3 *tp = netdev_priv(dev);
11340 return tp->nvram_size;
11343 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11345 struct tg3 *tp = netdev_priv(dev);
11348 u32 i, offset, len, b_offset, b_count;
11351 if (tg3_flag(tp, NO_NVRAM))
11354 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11357 offset = eeprom->offset;
11361 eeprom->magic = TG3_EEPROM_MAGIC;
11364 /* adjustments to start on required 4 byte boundary */
11365 b_offset = offset & 3;
11366 b_count = 4 - b_offset;
11367 if (b_count > len) {
11368 /* i.e. offset=1 len=2 */
11371 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11374 memcpy(data, ((char *)&val) + b_offset, b_count);
11377 eeprom->len += b_count;
11380 /* read bytes up to the last 4 byte boundary */
11381 pd = &data[eeprom->len];
11382 for (i = 0; i < (len - (len & 3)); i += 4) {
11383 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11388 memcpy(pd + i, &val, 4);
11393 /* read last bytes not ending on 4 byte boundary */
11394 pd = &data[eeprom->len];
11396 b_offset = offset + len - b_count;
11397 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11400 memcpy(pd, &val, b_count);
11401 eeprom->len += b_count;
11406 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11408 struct tg3 *tp = netdev_priv(dev);
11410 u32 offset, len, b_offset, odd_len;
11414 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11417 if (tg3_flag(tp, NO_NVRAM) ||
11418 eeprom->magic != TG3_EEPROM_MAGIC)
11421 offset = eeprom->offset;
11424 if ((b_offset = (offset & 3))) {
11425 /* adjustments to start on required 4 byte boundary */
11426 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11437 /* adjustments to end on required 4 byte boundary */
11439 len = (len + 3) & ~3;
11440 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11446 if (b_offset || odd_len) {
11447 buf = kmalloc(len, GFP_KERNEL);
11451 memcpy(buf, &start, 4);
11453 memcpy(buf+len-4, &end, 4);
11454 memcpy(buf + b_offset, data, eeprom->len);
11457 ret = tg3_nvram_write_block(tp, offset, len, buf);
11465 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11467 struct tg3 *tp = netdev_priv(dev);
11469 if (tg3_flag(tp, USE_PHYLIB)) {
11470 struct phy_device *phydev;
11471 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11473 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11474 return phy_ethtool_gset(phydev, cmd);
11477 cmd->supported = (SUPPORTED_Autoneg);
11479 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11480 cmd->supported |= (SUPPORTED_1000baseT_Half |
11481 SUPPORTED_1000baseT_Full);
11483 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11484 cmd->supported |= (SUPPORTED_100baseT_Half |
11485 SUPPORTED_100baseT_Full |
11486 SUPPORTED_10baseT_Half |
11487 SUPPORTED_10baseT_Full |
11489 cmd->port = PORT_TP;
11491 cmd->supported |= SUPPORTED_FIBRE;
11492 cmd->port = PORT_FIBRE;
11495 cmd->advertising = tp->link_config.advertising;
11496 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11497 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11498 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11499 cmd->advertising |= ADVERTISED_Pause;
11501 cmd->advertising |= ADVERTISED_Pause |
11502 ADVERTISED_Asym_Pause;
11504 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11505 cmd->advertising |= ADVERTISED_Asym_Pause;
11508 if (netif_running(dev) && tp->link_up) {
11509 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11510 cmd->duplex = tp->link_config.active_duplex;
11511 cmd->lp_advertising = tp->link_config.rmt_adv;
11512 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11513 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11514 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11516 cmd->eth_tp_mdix = ETH_TP_MDI;
11519 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11520 cmd->duplex = DUPLEX_UNKNOWN;
11521 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11523 cmd->phy_address = tp->phy_addr;
11524 cmd->transceiver = XCVR_INTERNAL;
11525 cmd->autoneg = tp->link_config.autoneg;
11531 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11533 struct tg3 *tp = netdev_priv(dev);
11534 u32 speed = ethtool_cmd_speed(cmd);
11536 if (tg3_flag(tp, USE_PHYLIB)) {
11537 struct phy_device *phydev;
11538 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11540 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11541 return phy_ethtool_sset(phydev, cmd);
11544 if (cmd->autoneg != AUTONEG_ENABLE &&
11545 cmd->autoneg != AUTONEG_DISABLE)
11548 if (cmd->autoneg == AUTONEG_DISABLE &&
11549 cmd->duplex != DUPLEX_FULL &&
11550 cmd->duplex != DUPLEX_HALF)
11553 if (cmd->autoneg == AUTONEG_ENABLE) {
11554 u32 mask = ADVERTISED_Autoneg |
11556 ADVERTISED_Asym_Pause;
11558 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11559 mask |= ADVERTISED_1000baseT_Half |
11560 ADVERTISED_1000baseT_Full;
11562 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11563 mask |= ADVERTISED_100baseT_Half |
11564 ADVERTISED_100baseT_Full |
11565 ADVERTISED_10baseT_Half |
11566 ADVERTISED_10baseT_Full |
11569 mask |= ADVERTISED_FIBRE;
11571 if (cmd->advertising & ~mask)
11574 mask &= (ADVERTISED_1000baseT_Half |
11575 ADVERTISED_1000baseT_Full |
11576 ADVERTISED_100baseT_Half |
11577 ADVERTISED_100baseT_Full |
11578 ADVERTISED_10baseT_Half |
11579 ADVERTISED_10baseT_Full);
11581 cmd->advertising &= mask;
11583 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11584 if (speed != SPEED_1000)
11587 if (cmd->duplex != DUPLEX_FULL)
11590 if (speed != SPEED_100 &&
11596 tg3_full_lock(tp, 0);
11598 tp->link_config.autoneg = cmd->autoneg;
11599 if (cmd->autoneg == AUTONEG_ENABLE) {
11600 tp->link_config.advertising = (cmd->advertising |
11601 ADVERTISED_Autoneg);
11602 tp->link_config.speed = SPEED_UNKNOWN;
11603 tp->link_config.duplex = DUPLEX_UNKNOWN;
11605 tp->link_config.advertising = 0;
11606 tp->link_config.speed = speed;
11607 tp->link_config.duplex = cmd->duplex;
11610 tg3_warn_mgmt_link_flap(tp);
11612 if (netif_running(dev))
11613 tg3_setup_phy(tp, 1);
11615 tg3_full_unlock(tp);
11620 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11622 struct tg3 *tp = netdev_priv(dev);
11624 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11625 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11626 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11627 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11630 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11632 struct tg3 *tp = netdev_priv(dev);
11634 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11635 wol->supported = WAKE_MAGIC;
11637 wol->supported = 0;
11639 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11640 wol->wolopts = WAKE_MAGIC;
11641 memset(&wol->sopass, 0, sizeof(wol->sopass));
11644 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11646 struct tg3 *tp = netdev_priv(dev);
11647 struct device *dp = &tp->pdev->dev;
11649 if (wol->wolopts & ~WAKE_MAGIC)
11651 if ((wol->wolopts & WAKE_MAGIC) &&
11652 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11655 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11657 spin_lock_bh(&tp->lock);
11658 if (device_may_wakeup(dp))
11659 tg3_flag_set(tp, WOL_ENABLE);
11661 tg3_flag_clear(tp, WOL_ENABLE);
11662 spin_unlock_bh(&tp->lock);
11667 static u32 tg3_get_msglevel(struct net_device *dev)
11669 struct tg3 *tp = netdev_priv(dev);
11670 return tp->msg_enable;
11673 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11675 struct tg3 *tp = netdev_priv(dev);
11676 tp->msg_enable = value;
11679 static int tg3_nway_reset(struct net_device *dev)
11681 struct tg3 *tp = netdev_priv(dev);
11684 if (!netif_running(dev))
11687 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11690 tg3_warn_mgmt_link_flap(tp);
11692 if (tg3_flag(tp, USE_PHYLIB)) {
11693 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11695 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11699 spin_lock_bh(&tp->lock);
11701 tg3_readphy(tp, MII_BMCR, &bmcr);
11702 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11703 ((bmcr & BMCR_ANENABLE) ||
11704 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11705 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11709 spin_unlock_bh(&tp->lock);
11715 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11717 struct tg3 *tp = netdev_priv(dev);
11719 ering->rx_max_pending = tp->rx_std_ring_mask;
11720 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11721 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11723 ering->rx_jumbo_max_pending = 0;
11725 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11727 ering->rx_pending = tp->rx_pending;
11728 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11729 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11731 ering->rx_jumbo_pending = 0;
11733 ering->tx_pending = tp->napi[0].tx_pending;
11736 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11738 struct tg3 *tp = netdev_priv(dev);
11739 int i, irq_sync = 0, err = 0;
11741 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11742 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11743 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11744 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11745 (tg3_flag(tp, TSO_BUG) &&
11746 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11749 if (netif_running(dev)) {
11751 tg3_netif_stop(tp);
11755 tg3_full_lock(tp, irq_sync);
11757 tp->rx_pending = ering->rx_pending;
11759 if (tg3_flag(tp, MAX_RXPEND_64) &&
11760 tp->rx_pending > 63)
11761 tp->rx_pending = 63;
11762 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11764 for (i = 0; i < tp->irq_max; i++)
11765 tp->napi[i].tx_pending = ering->tx_pending;
11767 if (netif_running(dev)) {
11768 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11769 err = tg3_restart_hw(tp, 0);
11771 tg3_netif_start(tp);
11774 tg3_full_unlock(tp);
11776 if (irq_sync && !err)
11782 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11784 struct tg3 *tp = netdev_priv(dev);
11786 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11788 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11789 epause->rx_pause = 1;
11791 epause->rx_pause = 0;
11793 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11794 epause->tx_pause = 1;
11796 epause->tx_pause = 0;
11799 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11801 struct tg3 *tp = netdev_priv(dev);
11804 if (tp->link_config.autoneg == AUTONEG_ENABLE)
11805 tg3_warn_mgmt_link_flap(tp);
11807 if (tg3_flag(tp, USE_PHYLIB)) {
11809 struct phy_device *phydev;
11811 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11813 if (!(phydev->supported & SUPPORTED_Pause) ||
11814 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11815 (epause->rx_pause != epause->tx_pause)))
11818 tp->link_config.flowctrl = 0;
11819 if (epause->rx_pause) {
11820 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11822 if (epause->tx_pause) {
11823 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11824 newadv = ADVERTISED_Pause;
11826 newadv = ADVERTISED_Pause |
11827 ADVERTISED_Asym_Pause;
11828 } else if (epause->tx_pause) {
11829 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11830 newadv = ADVERTISED_Asym_Pause;
11834 if (epause->autoneg)
11835 tg3_flag_set(tp, PAUSE_AUTONEG);
11837 tg3_flag_clear(tp, PAUSE_AUTONEG);
11839 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11840 u32 oldadv = phydev->advertising &
11841 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11842 if (oldadv != newadv) {
11843 phydev->advertising &=
11844 ~(ADVERTISED_Pause |
11845 ADVERTISED_Asym_Pause);
11846 phydev->advertising |= newadv;
11847 if (phydev->autoneg) {
11849 * Always renegotiate the link to
11850 * inform our link partner of our
11851 * flow control settings, even if the
11852 * flow control is forced. Let
11853 * tg3_adjust_link() do the final
11854 * flow control setup.
11856 return phy_start_aneg(phydev);
11860 if (!epause->autoneg)
11861 tg3_setup_flow_control(tp, 0, 0);
11863 tp->link_config.advertising &=
11864 ~(ADVERTISED_Pause |
11865 ADVERTISED_Asym_Pause);
11866 tp->link_config.advertising |= newadv;
11871 if (netif_running(dev)) {
11872 tg3_netif_stop(tp);
11876 tg3_full_lock(tp, irq_sync);
11878 if (epause->autoneg)
11879 tg3_flag_set(tp, PAUSE_AUTONEG);
11881 tg3_flag_clear(tp, PAUSE_AUTONEG);
11882 if (epause->rx_pause)
11883 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11885 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11886 if (epause->tx_pause)
11887 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11889 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11891 if (netif_running(dev)) {
11892 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11893 err = tg3_restart_hw(tp, 0);
11895 tg3_netif_start(tp);
11898 tg3_full_unlock(tp);
11904 static int tg3_get_sset_count(struct net_device *dev, int sset)
11908 return TG3_NUM_TEST;
11910 return TG3_NUM_STATS;
11912 return -EOPNOTSUPP;
11916 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11917 u32 *rules __always_unused)
11919 struct tg3 *tp = netdev_priv(dev);
11921 if (!tg3_flag(tp, SUPPORT_MSIX))
11922 return -EOPNOTSUPP;
11924 switch (info->cmd) {
11925 case ETHTOOL_GRXRINGS:
11926 if (netif_running(tp->dev))
11927 info->data = tp->rxq_cnt;
11929 info->data = num_online_cpus();
11930 if (info->data > TG3_RSS_MAX_NUM_QS)
11931 info->data = TG3_RSS_MAX_NUM_QS;
11934 /* The first interrupt vector only
11935 * handles link interrupts.
11941 return -EOPNOTSUPP;
11945 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11948 struct tg3 *tp = netdev_priv(dev);
11950 if (tg3_flag(tp, SUPPORT_MSIX))
11951 size = TG3_RSS_INDIR_TBL_SIZE;
11956 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11958 struct tg3 *tp = netdev_priv(dev);
11961 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11962 indir[i] = tp->rss_ind_tbl[i];
11967 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11969 struct tg3 *tp = netdev_priv(dev);
11972 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11973 tp->rss_ind_tbl[i] = indir[i];
11975 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11978 /* It is legal to write the indirection
11979 * table while the device is running.
11981 tg3_full_lock(tp, 0);
11982 tg3_rss_write_indir_tbl(tp);
11983 tg3_full_unlock(tp);
11988 static void tg3_get_channels(struct net_device *dev,
11989 struct ethtool_channels *channel)
11991 struct tg3 *tp = netdev_priv(dev);
11992 u32 deflt_qs = netif_get_num_default_rss_queues();
11994 channel->max_rx = tp->rxq_max;
11995 channel->max_tx = tp->txq_max;
11997 if (netif_running(dev)) {
11998 channel->rx_count = tp->rxq_cnt;
11999 channel->tx_count = tp->txq_cnt;
12002 channel->rx_count = tp->rxq_req;
12004 channel->rx_count = min(deflt_qs, tp->rxq_max);
12007 channel->tx_count = tp->txq_req;
12009 channel->tx_count = min(deflt_qs, tp->txq_max);
12013 static int tg3_set_channels(struct net_device *dev,
12014 struct ethtool_channels *channel)
12016 struct tg3 *tp = netdev_priv(dev);
12018 if (!tg3_flag(tp, SUPPORT_MSIX))
12019 return -EOPNOTSUPP;
12021 if (channel->rx_count > tp->rxq_max ||
12022 channel->tx_count > tp->txq_max)
12025 tp->rxq_req = channel->rx_count;
12026 tp->txq_req = channel->tx_count;
12028 if (!netif_running(dev))
12033 tg3_carrier_off(tp);
12035 tg3_start(tp, true, false, false);
12040 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12042 switch (stringset) {
12044 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12047 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12050 WARN_ON(1); /* we need a WARN() */
12055 static int tg3_set_phys_id(struct net_device *dev,
12056 enum ethtool_phys_id_state state)
12058 struct tg3 *tp = netdev_priv(dev);
12060 if (!netif_running(tp->dev))
12064 case ETHTOOL_ID_ACTIVE:
12065 return 1; /* cycle on/off once per second */
12067 case ETHTOOL_ID_ON:
12068 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12069 LED_CTRL_1000MBPS_ON |
12070 LED_CTRL_100MBPS_ON |
12071 LED_CTRL_10MBPS_ON |
12072 LED_CTRL_TRAFFIC_OVERRIDE |
12073 LED_CTRL_TRAFFIC_BLINK |
12074 LED_CTRL_TRAFFIC_LED);
12077 case ETHTOOL_ID_OFF:
12078 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12079 LED_CTRL_TRAFFIC_OVERRIDE);
12082 case ETHTOOL_ID_INACTIVE:
12083 tw32(MAC_LED_CTRL, tp->led_ctrl);
12090 static void tg3_get_ethtool_stats(struct net_device *dev,
12091 struct ethtool_stats *estats, u64 *tmp_stats)
12093 struct tg3 *tp = netdev_priv(dev);
12096 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12098 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12101 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12105 u32 offset = 0, len = 0;
12108 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12111 if (magic == TG3_EEPROM_MAGIC) {
12112 for (offset = TG3_NVM_DIR_START;
12113 offset < TG3_NVM_DIR_END;
12114 offset += TG3_NVM_DIRENT_SIZE) {
12115 if (tg3_nvram_read(tp, offset, &val))
12118 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12119 TG3_NVM_DIRTYPE_EXTVPD)
12123 if (offset != TG3_NVM_DIR_END) {
12124 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12125 if (tg3_nvram_read(tp, offset + 4, &offset))
12128 offset = tg3_nvram_logical_addr(tp, offset);
12132 if (!offset || !len) {
12133 offset = TG3_NVM_VPD_OFF;
12134 len = TG3_NVM_VPD_LEN;
12137 buf = kmalloc(len, GFP_KERNEL);
12141 if (magic == TG3_EEPROM_MAGIC) {
12142 for (i = 0; i < len; i += 4) {
12143 /* The data is in little-endian format in NVRAM.
12144 * Use the big-endian read routines to preserve
12145 * the byte order as it exists in NVRAM.
12147 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12153 unsigned int pos = 0;
12155 ptr = (u8 *)&buf[0];
12156 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12157 cnt = pci_read_vpd(tp->pdev, pos,
12159 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12177 #define NVRAM_TEST_SIZE 0x100
12178 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12179 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12180 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12181 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12182 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12183 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12184 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12185 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12187 static int tg3_test_nvram(struct tg3 *tp)
12189 u32 csum, magic, len;
12191 int i, j, k, err = 0, size;
12193 if (tg3_flag(tp, NO_NVRAM))
12196 if (tg3_nvram_read(tp, 0, &magic) != 0)
12199 if (magic == TG3_EEPROM_MAGIC)
12200 size = NVRAM_TEST_SIZE;
12201 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12202 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12203 TG3_EEPROM_SB_FORMAT_1) {
12204 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12205 case TG3_EEPROM_SB_REVISION_0:
12206 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12208 case TG3_EEPROM_SB_REVISION_2:
12209 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12211 case TG3_EEPROM_SB_REVISION_3:
12212 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12214 case TG3_EEPROM_SB_REVISION_4:
12215 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12217 case TG3_EEPROM_SB_REVISION_5:
12218 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12220 case TG3_EEPROM_SB_REVISION_6:
12221 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12228 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12229 size = NVRAM_SELFBOOT_HW_SIZE;
12233 buf = kmalloc(size, GFP_KERNEL);
12238 for (i = 0, j = 0; i < size; i += 4, j++) {
12239 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12246 /* Selfboot format */
12247 magic = be32_to_cpu(buf[0]);
12248 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12249 TG3_EEPROM_MAGIC_FW) {
12250 u8 *buf8 = (u8 *) buf, csum8 = 0;
12252 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12253 TG3_EEPROM_SB_REVISION_2) {
12254 /* For rev 2, the csum doesn't include the MBA. */
12255 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12257 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12260 for (i = 0; i < size; i++)
12273 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12274 TG3_EEPROM_MAGIC_HW) {
12275 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12276 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12277 u8 *buf8 = (u8 *) buf;
12279 /* Separate the parity bits and the data bytes. */
12280 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12281 if ((i == 0) || (i == 8)) {
12285 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12286 parity[k++] = buf8[i] & msk;
12288 } else if (i == 16) {
12292 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12293 parity[k++] = buf8[i] & msk;
12296 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12297 parity[k++] = buf8[i] & msk;
12300 data[j++] = buf8[i];
12304 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12305 u8 hw8 = hweight8(data[i]);
12307 if ((hw8 & 0x1) && parity[i])
12309 else if (!(hw8 & 0x1) && !parity[i])
12318 /* Bootstrap checksum at offset 0x10 */
12319 csum = calc_crc((unsigned char *) buf, 0x10);
12320 if (csum != le32_to_cpu(buf[0x10/4]))
12323 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12324 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12325 if (csum != le32_to_cpu(buf[0xfc/4]))
12330 buf = tg3_vpd_readblock(tp, &len);
12334 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12336 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12340 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12343 i += PCI_VPD_LRDT_TAG_SIZE;
12344 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12345 PCI_VPD_RO_KEYWORD_CHKSUM);
12349 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12351 for (i = 0; i <= j; i++)
12352 csum8 += ((u8 *)buf)[i];
12366 #define TG3_SERDES_TIMEOUT_SEC 2
12367 #define TG3_COPPER_TIMEOUT_SEC 6
12369 static int tg3_test_link(struct tg3 *tp)
12373 if (!netif_running(tp->dev))
12376 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12377 max = TG3_SERDES_TIMEOUT_SEC;
12379 max = TG3_COPPER_TIMEOUT_SEC;
12381 for (i = 0; i < max; i++) {
12385 if (msleep_interruptible(1000))
12392 /* Only test the commonly used registers */
12393 static int tg3_test_registers(struct tg3 *tp)
12395 int i, is_5705, is_5750;
12396 u32 offset, read_mask, write_mask, val, save_val, read_val;
12400 #define TG3_FL_5705 0x1
12401 #define TG3_FL_NOT_5705 0x2
12402 #define TG3_FL_NOT_5788 0x4
12403 #define TG3_FL_NOT_5750 0x8
12407 /* MAC Control Registers */
12408 { MAC_MODE, TG3_FL_NOT_5705,
12409 0x00000000, 0x00ef6f8c },
12410 { MAC_MODE, TG3_FL_5705,
12411 0x00000000, 0x01ef6b8c },
12412 { MAC_STATUS, TG3_FL_NOT_5705,
12413 0x03800107, 0x00000000 },
12414 { MAC_STATUS, TG3_FL_5705,
12415 0x03800100, 0x00000000 },
12416 { MAC_ADDR_0_HIGH, 0x0000,
12417 0x00000000, 0x0000ffff },
12418 { MAC_ADDR_0_LOW, 0x0000,
12419 0x00000000, 0xffffffff },
12420 { MAC_RX_MTU_SIZE, 0x0000,
12421 0x00000000, 0x0000ffff },
12422 { MAC_TX_MODE, 0x0000,
12423 0x00000000, 0x00000070 },
12424 { MAC_TX_LENGTHS, 0x0000,
12425 0x00000000, 0x00003fff },
12426 { MAC_RX_MODE, TG3_FL_NOT_5705,
12427 0x00000000, 0x000007fc },
12428 { MAC_RX_MODE, TG3_FL_5705,
12429 0x00000000, 0x000007dc },
12430 { MAC_HASH_REG_0, 0x0000,
12431 0x00000000, 0xffffffff },
12432 { MAC_HASH_REG_1, 0x0000,
12433 0x00000000, 0xffffffff },
12434 { MAC_HASH_REG_2, 0x0000,
12435 0x00000000, 0xffffffff },
12436 { MAC_HASH_REG_3, 0x0000,
12437 0x00000000, 0xffffffff },
12439 /* Receive Data and Receive BD Initiator Control Registers. */
12440 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12441 0x00000000, 0xffffffff },
12442 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12443 0x00000000, 0xffffffff },
12444 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12445 0x00000000, 0x00000003 },
12446 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12447 0x00000000, 0xffffffff },
12448 { RCVDBDI_STD_BD+0, 0x0000,
12449 0x00000000, 0xffffffff },
12450 { RCVDBDI_STD_BD+4, 0x0000,
12451 0x00000000, 0xffffffff },
12452 { RCVDBDI_STD_BD+8, 0x0000,
12453 0x00000000, 0xffff0002 },
12454 { RCVDBDI_STD_BD+0xc, 0x0000,
12455 0x00000000, 0xffffffff },
12457 /* Receive BD Initiator Control Registers. */
12458 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12459 0x00000000, 0xffffffff },
12460 { RCVBDI_STD_THRESH, TG3_FL_5705,
12461 0x00000000, 0x000003ff },
12462 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12463 0x00000000, 0xffffffff },
12465 /* Host Coalescing Control Registers. */
12466 { HOSTCC_MODE, TG3_FL_NOT_5705,
12467 0x00000000, 0x00000004 },
12468 { HOSTCC_MODE, TG3_FL_5705,
12469 0x00000000, 0x000000f6 },
12470 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12471 0x00000000, 0xffffffff },
12472 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12473 0x00000000, 0x000003ff },
12474 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12475 0x00000000, 0xffffffff },
12476 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12477 0x00000000, 0x000003ff },
12478 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12479 0x00000000, 0xffffffff },
12480 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12481 0x00000000, 0x000000ff },
12482 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12483 0x00000000, 0xffffffff },
12484 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12485 0x00000000, 0x000000ff },
12486 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12487 0x00000000, 0xffffffff },
12488 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12489 0x00000000, 0xffffffff },
12490 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12491 0x00000000, 0xffffffff },
12492 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12493 0x00000000, 0x000000ff },
12494 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12495 0x00000000, 0xffffffff },
12496 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12497 0x00000000, 0x000000ff },
12498 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12499 0x00000000, 0xffffffff },
12500 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12501 0x00000000, 0xffffffff },
12502 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12503 0x00000000, 0xffffffff },
12504 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12505 0x00000000, 0xffffffff },
12506 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12507 0x00000000, 0xffffffff },
12508 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12509 0xffffffff, 0x00000000 },
12510 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12511 0xffffffff, 0x00000000 },
12513 /* Buffer Manager Control Registers. */
12514 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12515 0x00000000, 0x007fff80 },
12516 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12517 0x00000000, 0x007fffff },
12518 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12519 0x00000000, 0x0000003f },
12520 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12521 0x00000000, 0x000001ff },
12522 { BUFMGR_MB_HIGH_WATER, 0x0000,
12523 0x00000000, 0x000001ff },
12524 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12525 0xffffffff, 0x00000000 },
12526 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12527 0xffffffff, 0x00000000 },
12529 /* Mailbox Registers */
12530 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12531 0x00000000, 0x000001ff },
12532 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12533 0x00000000, 0x000001ff },
12534 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12535 0x00000000, 0x000007ff },
12536 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12537 0x00000000, 0x000001ff },
12539 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12542 is_5705 = is_5750 = 0;
12543 if (tg3_flag(tp, 5705_PLUS)) {
12545 if (tg3_flag(tp, 5750_PLUS))
12549 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12550 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12553 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12556 if (tg3_flag(tp, IS_5788) &&
12557 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12560 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12563 offset = (u32) reg_tbl[i].offset;
12564 read_mask = reg_tbl[i].read_mask;
12565 write_mask = reg_tbl[i].write_mask;
12567 /* Save the original register content */
12568 save_val = tr32(offset);
12570 /* Determine the read-only value. */
12571 read_val = save_val & read_mask;
12573 /* Write zero to the register, then make sure the read-only bits
12574 * are not changed and the read/write bits are all zeros.
12578 val = tr32(offset);
12580 /* Test the read-only and read/write bits. */
12581 if (((val & read_mask) != read_val) || (val & write_mask))
12584 /* Write ones to all the bits defined by RdMask and WrMask, then
12585 * make sure the read-only bits are not changed and the
12586 * read/write bits are all ones.
12588 tw32(offset, read_mask | write_mask);
12590 val = tr32(offset);
12592 /* Test the read-only bits. */
12593 if ((val & read_mask) != read_val)
12596 /* Test the read/write bits. */
12597 if ((val & write_mask) != write_mask)
12600 tw32(offset, save_val);
12606 if (netif_msg_hw(tp))
12607 netdev_err(tp->dev,
12608 "Register test failed at offset %x\n", offset);
12609 tw32(offset, save_val);
12613 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12615 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12619 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12620 for (j = 0; j < len; j += 4) {
12623 tg3_write_mem(tp, offset + j, test_pattern[i]);
12624 tg3_read_mem(tp, offset + j, &val);
12625 if (val != test_pattern[i])
12632 static int tg3_test_memory(struct tg3 *tp)
12634 static struct mem_entry {
12637 } mem_tbl_570x[] = {
12638 { 0x00000000, 0x00b50},
12639 { 0x00002000, 0x1c000},
12640 { 0xffffffff, 0x00000}
12641 }, mem_tbl_5705[] = {
12642 { 0x00000100, 0x0000c},
12643 { 0x00000200, 0x00008},
12644 { 0x00004000, 0x00800},
12645 { 0x00006000, 0x01000},
12646 { 0x00008000, 0x02000},
12647 { 0x00010000, 0x0e000},
12648 { 0xffffffff, 0x00000}
12649 }, mem_tbl_5755[] = {
12650 { 0x00000200, 0x00008},
12651 { 0x00004000, 0x00800},
12652 { 0x00006000, 0x00800},
12653 { 0x00008000, 0x02000},
12654 { 0x00010000, 0x0c000},
12655 { 0xffffffff, 0x00000}
12656 }, mem_tbl_5906[] = {
12657 { 0x00000200, 0x00008},
12658 { 0x00004000, 0x00400},
12659 { 0x00006000, 0x00400},
12660 { 0x00008000, 0x01000},
12661 { 0x00010000, 0x01000},
12662 { 0xffffffff, 0x00000}
12663 }, mem_tbl_5717[] = {
12664 { 0x00000200, 0x00008},
12665 { 0x00010000, 0x0a000},
12666 { 0x00020000, 0x13c00},
12667 { 0xffffffff, 0x00000}
12668 }, mem_tbl_57765[] = {
12669 { 0x00000200, 0x00008},
12670 { 0x00004000, 0x00800},
12671 { 0x00006000, 0x09800},
12672 { 0x00010000, 0x0a000},
12673 { 0xffffffff, 0x00000}
12675 struct mem_entry *mem_tbl;
12679 if (tg3_flag(tp, 5717_PLUS))
12680 mem_tbl = mem_tbl_5717;
12681 else if (tg3_flag(tp, 57765_CLASS) ||
12682 tg3_asic_rev(tp) == ASIC_REV_5762)
12683 mem_tbl = mem_tbl_57765;
12684 else if (tg3_flag(tp, 5755_PLUS))
12685 mem_tbl = mem_tbl_5755;
12686 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12687 mem_tbl = mem_tbl_5906;
12688 else if (tg3_flag(tp, 5705_PLUS))
12689 mem_tbl = mem_tbl_5705;
12691 mem_tbl = mem_tbl_570x;
12693 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12694 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12702 #define TG3_TSO_MSS 500
12704 #define TG3_TSO_IP_HDR_LEN 20
12705 #define TG3_TSO_TCP_HDR_LEN 20
12706 #define TG3_TSO_TCP_OPT_LEN 12
12708 static const u8 tg3_tso_header[] = {
12710 0x45, 0x00, 0x00, 0x00,
12711 0x00, 0x00, 0x40, 0x00,
12712 0x40, 0x06, 0x00, 0x00,
12713 0x0a, 0x00, 0x00, 0x01,
12714 0x0a, 0x00, 0x00, 0x02,
12715 0x0d, 0x00, 0xe0, 0x00,
12716 0x00, 0x00, 0x01, 0x00,
12717 0x00, 0x00, 0x02, 0x00,
12718 0x80, 0x10, 0x10, 0x00,
12719 0x14, 0x09, 0x00, 0x00,
12720 0x01, 0x01, 0x08, 0x0a,
12721 0x11, 0x11, 0x11, 0x11,
12722 0x11, 0x11, 0x11, 0x11,
12725 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12727 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12728 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12730 struct sk_buff *skb;
12731 u8 *tx_data, *rx_data;
12733 int num_pkts, tx_len, rx_len, i, err;
12734 struct tg3_rx_buffer_desc *desc;
12735 struct tg3_napi *tnapi, *rnapi;
12736 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12738 tnapi = &tp->napi[0];
12739 rnapi = &tp->napi[0];
12740 if (tp->irq_cnt > 1) {
12741 if (tg3_flag(tp, ENABLE_RSS))
12742 rnapi = &tp->napi[1];
12743 if (tg3_flag(tp, ENABLE_TSS))
12744 tnapi = &tp->napi[1];
12746 coal_now = tnapi->coal_now | rnapi->coal_now;
12751 skb = netdev_alloc_skb(tp->dev, tx_len);
12755 tx_data = skb_put(skb, tx_len);
12756 memcpy(tx_data, tp->dev->dev_addr, 6);
12757 memset(tx_data + 6, 0x0, 8);
12759 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12761 if (tso_loopback) {
12762 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12764 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12765 TG3_TSO_TCP_OPT_LEN;
12767 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12768 sizeof(tg3_tso_header));
12771 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12772 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12774 /* Set the total length field in the IP header */
12775 iph->tot_len = htons((u16)(mss + hdr_len));
12777 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12778 TXD_FLAG_CPU_POST_DMA);
12780 if (tg3_flag(tp, HW_TSO_1) ||
12781 tg3_flag(tp, HW_TSO_2) ||
12782 tg3_flag(tp, HW_TSO_3)) {
12784 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12785 th = (struct tcphdr *)&tx_data[val];
12788 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12790 if (tg3_flag(tp, HW_TSO_3)) {
12791 mss |= (hdr_len & 0xc) << 12;
12792 if (hdr_len & 0x10)
12793 base_flags |= 0x00000010;
12794 base_flags |= (hdr_len & 0x3e0) << 5;
12795 } else if (tg3_flag(tp, HW_TSO_2))
12796 mss |= hdr_len << 9;
12797 else if (tg3_flag(tp, HW_TSO_1) ||
12798 tg3_asic_rev(tp) == ASIC_REV_5705) {
12799 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12801 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12804 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12807 data_off = ETH_HLEN;
12809 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12810 tx_len > VLAN_ETH_FRAME_LEN)
12811 base_flags |= TXD_FLAG_JMB_PKT;
12814 for (i = data_off; i < tx_len; i++)
12815 tx_data[i] = (u8) (i & 0xff);
12817 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12818 if (pci_dma_mapping_error(tp->pdev, map)) {
12819 dev_kfree_skb(skb);
12823 val = tnapi->tx_prod;
12824 tnapi->tx_buffers[val].skb = skb;
12825 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12827 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12832 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12834 budget = tg3_tx_avail(tnapi);
12835 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12836 base_flags | TXD_FLAG_END, mss, 0)) {
12837 tnapi->tx_buffers[val].skb = NULL;
12838 dev_kfree_skb(skb);
12844 /* Sync BD data before updating mailbox */
12847 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12848 tr32_mailbox(tnapi->prodmbox);
12852 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12853 for (i = 0; i < 35; i++) {
12854 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12859 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12860 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12861 if ((tx_idx == tnapi->tx_prod) &&
12862 (rx_idx == (rx_start_idx + num_pkts)))
12866 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12867 dev_kfree_skb(skb);
12869 if (tx_idx != tnapi->tx_prod)
12872 if (rx_idx != rx_start_idx + num_pkts)
12876 while (rx_idx != rx_start_idx) {
12877 desc = &rnapi->rx_rcb[rx_start_idx++];
12878 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12879 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12881 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12882 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12885 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12888 if (!tso_loopback) {
12889 if (rx_len != tx_len)
12892 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12893 if (opaque_key != RXD_OPAQUE_RING_STD)
12896 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12899 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12900 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12901 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12905 if (opaque_key == RXD_OPAQUE_RING_STD) {
12906 rx_data = tpr->rx_std_buffers[desc_idx].data;
12907 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12909 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12910 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12911 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12916 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12917 PCI_DMA_FROMDEVICE);
12919 rx_data += TG3_RX_OFFSET(tp);
12920 for (i = data_off; i < rx_len; i++, val++) {
12921 if (*(rx_data + i) != (u8) (val & 0xff))
12928 /* tg3_free_rings will unmap and free the rx_data */
12933 #define TG3_STD_LOOPBACK_FAILED 1
12934 #define TG3_JMB_LOOPBACK_FAILED 2
12935 #define TG3_TSO_LOOPBACK_FAILED 4
12936 #define TG3_LOOPBACK_FAILED \
12937 (TG3_STD_LOOPBACK_FAILED | \
12938 TG3_JMB_LOOPBACK_FAILED | \
12939 TG3_TSO_LOOPBACK_FAILED)
12941 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12945 u32 jmb_pkt_sz = 9000;
12948 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12950 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12951 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12953 if (!netif_running(tp->dev)) {
12954 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12955 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12957 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12961 err = tg3_reset_hw(tp, 1);
12963 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12964 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12966 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12970 if (tg3_flag(tp, ENABLE_RSS)) {
12973 /* Reroute all rx packets to the 1st queue */
12974 for (i = MAC_RSS_INDIR_TBL_0;
12975 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12979 /* HW errata - mac loopback fails in some cases on 5780.
12980 * Normal traffic and PHY loopback are not affected by
12981 * errata. Also, the MAC loopback test is deprecated for
12982 * all newer ASIC revisions.
12984 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12985 !tg3_flag(tp, CPMU_PRESENT)) {
12986 tg3_mac_loopback(tp, true);
12988 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12989 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12991 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12992 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12993 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12995 tg3_mac_loopback(tp, false);
12998 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12999 !tg3_flag(tp, USE_PHYLIB)) {
13002 tg3_phy_lpbk_set(tp, 0, false);
13004 /* Wait for link */
13005 for (i = 0; i < 100; i++) {
13006 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13011 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13012 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13013 if (tg3_flag(tp, TSO_CAPABLE) &&
13014 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13015 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13016 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13017 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13018 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13021 tg3_phy_lpbk_set(tp, 0, true);
13023 /* All link indications report up, but the hardware
13024 * isn't really ready for about 20 msec. Double it
13029 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13030 data[TG3_EXT_LOOPB_TEST] |=
13031 TG3_STD_LOOPBACK_FAILED;
13032 if (tg3_flag(tp, TSO_CAPABLE) &&
13033 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13034 data[TG3_EXT_LOOPB_TEST] |=
13035 TG3_TSO_LOOPBACK_FAILED;
13036 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13037 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13038 data[TG3_EXT_LOOPB_TEST] |=
13039 TG3_JMB_LOOPBACK_FAILED;
13042 /* Re-enable gphy autopowerdown. */
13043 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13044 tg3_phy_toggle_apd(tp, true);
13047 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13048 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13051 tp->phy_flags |= eee_cap;
13056 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13059 struct tg3 *tp = netdev_priv(dev);
13060 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13062 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13063 tg3_power_up(tp)) {
13064 etest->flags |= ETH_TEST_FL_FAILED;
13065 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13069 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13071 if (tg3_test_nvram(tp) != 0) {
13072 etest->flags |= ETH_TEST_FL_FAILED;
13073 data[TG3_NVRAM_TEST] = 1;
13075 if (!doextlpbk && tg3_test_link(tp)) {
13076 etest->flags |= ETH_TEST_FL_FAILED;
13077 data[TG3_LINK_TEST] = 1;
13079 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13080 int err, err2 = 0, irq_sync = 0;
13082 if (netif_running(dev)) {
13084 tg3_netif_stop(tp);
13088 tg3_full_lock(tp, irq_sync);
13089 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13090 err = tg3_nvram_lock(tp);
13091 tg3_halt_cpu(tp, RX_CPU_BASE);
13092 if (!tg3_flag(tp, 5705_PLUS))
13093 tg3_halt_cpu(tp, TX_CPU_BASE);
13095 tg3_nvram_unlock(tp);
13097 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13100 if (tg3_test_registers(tp) != 0) {
13101 etest->flags |= ETH_TEST_FL_FAILED;
13102 data[TG3_REGISTER_TEST] = 1;
13105 if (tg3_test_memory(tp) != 0) {
13106 etest->flags |= ETH_TEST_FL_FAILED;
13107 data[TG3_MEMORY_TEST] = 1;
13111 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13113 if (tg3_test_loopback(tp, data, doextlpbk))
13114 etest->flags |= ETH_TEST_FL_FAILED;
13116 tg3_full_unlock(tp);
13118 if (tg3_test_interrupt(tp) != 0) {
13119 etest->flags |= ETH_TEST_FL_FAILED;
13120 data[TG3_INTERRUPT_TEST] = 1;
13123 tg3_full_lock(tp, 0);
13125 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13126 if (netif_running(dev)) {
13127 tg3_flag_set(tp, INIT_COMPLETE);
13128 err2 = tg3_restart_hw(tp, 1);
13130 tg3_netif_start(tp);
13133 tg3_full_unlock(tp);
13135 if (irq_sync && !err2)
13138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13139 tg3_power_down(tp);
13143 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13144 struct ifreq *ifr, int cmd)
13146 struct tg3 *tp = netdev_priv(dev);
13147 struct hwtstamp_config stmpconf;
13149 if (!tg3_flag(tp, PTP_CAPABLE))
13152 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13155 if (stmpconf.flags)
13158 switch (stmpconf.tx_type) {
13159 case HWTSTAMP_TX_ON:
13160 tg3_flag_set(tp, TX_TSTAMP_EN);
13162 case HWTSTAMP_TX_OFF:
13163 tg3_flag_clear(tp, TX_TSTAMP_EN);
13169 switch (stmpconf.rx_filter) {
13170 case HWTSTAMP_FILTER_NONE:
13173 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13174 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13175 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13177 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13178 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13179 TG3_RX_PTP_CTL_SYNC_EVNT;
13181 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13182 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13183 TG3_RX_PTP_CTL_DELAY_REQ;
13185 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13186 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13187 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13189 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13190 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13191 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13193 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13194 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13195 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13197 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13198 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13199 TG3_RX_PTP_CTL_SYNC_EVNT;
13201 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13202 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13203 TG3_RX_PTP_CTL_SYNC_EVNT;
13205 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13206 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13207 TG3_RX_PTP_CTL_SYNC_EVNT;
13209 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13210 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13211 TG3_RX_PTP_CTL_DELAY_REQ;
13213 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13214 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13215 TG3_RX_PTP_CTL_DELAY_REQ;
13217 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13218 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13219 TG3_RX_PTP_CTL_DELAY_REQ;
13225 if (netif_running(dev) && tp->rxptpctl)
13226 tw32(TG3_RX_PTP_CTL,
13227 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13229 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13233 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13235 struct mii_ioctl_data *data = if_mii(ifr);
13236 struct tg3 *tp = netdev_priv(dev);
13239 if (tg3_flag(tp, USE_PHYLIB)) {
13240 struct phy_device *phydev;
13241 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13243 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13244 return phy_mii_ioctl(phydev, ifr, cmd);
13249 data->phy_id = tp->phy_addr;
13252 case SIOCGMIIREG: {
13255 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13256 break; /* We have no PHY */
13258 if (!netif_running(dev))
13261 spin_lock_bh(&tp->lock);
13262 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13263 data->reg_num & 0x1f, &mii_regval);
13264 spin_unlock_bh(&tp->lock);
13266 data->val_out = mii_regval;
13272 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13273 break; /* We have no PHY */
13275 if (!netif_running(dev))
13278 spin_lock_bh(&tp->lock);
13279 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13280 data->reg_num & 0x1f, data->val_in);
13281 spin_unlock_bh(&tp->lock);
13285 case SIOCSHWTSTAMP:
13286 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13292 return -EOPNOTSUPP;
13295 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13297 struct tg3 *tp = netdev_priv(dev);
13299 memcpy(ec, &tp->coal, sizeof(*ec));
13303 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13305 struct tg3 *tp = netdev_priv(dev);
13306 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13307 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13309 if (!tg3_flag(tp, 5705_PLUS)) {
13310 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13311 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13312 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13313 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13316 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13317 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13318 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13319 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13320 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13321 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13322 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13323 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13324 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13325 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13328 /* No rx interrupts will be generated if both are zero */
13329 if ((ec->rx_coalesce_usecs == 0) &&
13330 (ec->rx_max_coalesced_frames == 0))
13333 /* No tx interrupts will be generated if both are zero */
13334 if ((ec->tx_coalesce_usecs == 0) &&
13335 (ec->tx_max_coalesced_frames == 0))
13338 /* Only copy relevant parameters, ignore all others. */
13339 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13340 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13341 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13342 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13343 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13344 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13345 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13346 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13347 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13349 if (netif_running(dev)) {
13350 tg3_full_lock(tp, 0);
13351 __tg3_set_coalesce(tp, &tp->coal);
13352 tg3_full_unlock(tp);
13357 static const struct ethtool_ops tg3_ethtool_ops = {
13358 .get_settings = tg3_get_settings,
13359 .set_settings = tg3_set_settings,
13360 .get_drvinfo = tg3_get_drvinfo,
13361 .get_regs_len = tg3_get_regs_len,
13362 .get_regs = tg3_get_regs,
13363 .get_wol = tg3_get_wol,
13364 .set_wol = tg3_set_wol,
13365 .get_msglevel = tg3_get_msglevel,
13366 .set_msglevel = tg3_set_msglevel,
13367 .nway_reset = tg3_nway_reset,
13368 .get_link = ethtool_op_get_link,
13369 .get_eeprom_len = tg3_get_eeprom_len,
13370 .get_eeprom = tg3_get_eeprom,
13371 .set_eeprom = tg3_set_eeprom,
13372 .get_ringparam = tg3_get_ringparam,
13373 .set_ringparam = tg3_set_ringparam,
13374 .get_pauseparam = tg3_get_pauseparam,
13375 .set_pauseparam = tg3_set_pauseparam,
13376 .self_test = tg3_self_test,
13377 .get_strings = tg3_get_strings,
13378 .set_phys_id = tg3_set_phys_id,
13379 .get_ethtool_stats = tg3_get_ethtool_stats,
13380 .get_coalesce = tg3_get_coalesce,
13381 .set_coalesce = tg3_set_coalesce,
13382 .get_sset_count = tg3_get_sset_count,
13383 .get_rxnfc = tg3_get_rxnfc,
13384 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13385 .get_rxfh_indir = tg3_get_rxfh_indir,
13386 .set_rxfh_indir = tg3_set_rxfh_indir,
13387 .get_channels = tg3_get_channels,
13388 .set_channels = tg3_set_channels,
13389 .get_ts_info = tg3_get_ts_info,
13392 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13393 struct rtnl_link_stats64 *stats)
13395 struct tg3 *tp = netdev_priv(dev);
13397 spin_lock_bh(&tp->lock);
13398 if (!tp->hw_stats) {
13399 spin_unlock_bh(&tp->lock);
13400 return &tp->net_stats_prev;
13403 tg3_get_nstats(tp, stats);
13404 spin_unlock_bh(&tp->lock);
13409 static void tg3_set_rx_mode(struct net_device *dev)
13411 struct tg3 *tp = netdev_priv(dev);
13413 if (!netif_running(dev))
13416 tg3_full_lock(tp, 0);
13417 __tg3_set_rx_mode(dev);
13418 tg3_full_unlock(tp);
13421 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13424 dev->mtu = new_mtu;
13426 if (new_mtu > ETH_DATA_LEN) {
13427 if (tg3_flag(tp, 5780_CLASS)) {
13428 netdev_update_features(dev);
13429 tg3_flag_clear(tp, TSO_CAPABLE);
13431 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13434 if (tg3_flag(tp, 5780_CLASS)) {
13435 tg3_flag_set(tp, TSO_CAPABLE);
13436 netdev_update_features(dev);
13438 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13442 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13444 struct tg3 *tp = netdev_priv(dev);
13445 int err, reset_phy = 0;
13447 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13450 if (!netif_running(dev)) {
13451 /* We'll just catch it later when the
13454 tg3_set_mtu(dev, tp, new_mtu);
13460 tg3_netif_stop(tp);
13462 tg3_full_lock(tp, 1);
13464 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13466 tg3_set_mtu(dev, tp, new_mtu);
13468 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13469 * breaks all requests to 256 bytes.
13471 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13474 err = tg3_restart_hw(tp, reset_phy);
13477 tg3_netif_start(tp);
13479 tg3_full_unlock(tp);
13487 static const struct net_device_ops tg3_netdev_ops = {
13488 .ndo_open = tg3_open,
13489 .ndo_stop = tg3_close,
13490 .ndo_start_xmit = tg3_start_xmit,
13491 .ndo_get_stats64 = tg3_get_stats64,
13492 .ndo_validate_addr = eth_validate_addr,
13493 .ndo_set_rx_mode = tg3_set_rx_mode,
13494 .ndo_set_mac_address = tg3_set_mac_addr,
13495 .ndo_do_ioctl = tg3_ioctl,
13496 .ndo_tx_timeout = tg3_tx_timeout,
13497 .ndo_change_mtu = tg3_change_mtu,
13498 .ndo_fix_features = tg3_fix_features,
13499 .ndo_set_features = tg3_set_features,
13500 #ifdef CONFIG_NET_POLL_CONTROLLER
13501 .ndo_poll_controller = tg3_poll_controller,
13505 static void tg3_get_eeprom_size(struct tg3 *tp)
13507 u32 cursize, val, magic;
13509 tp->nvram_size = EEPROM_CHIP_SIZE;
13511 if (tg3_nvram_read(tp, 0, &magic) != 0)
13514 if ((magic != TG3_EEPROM_MAGIC) &&
13515 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13516 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13520 * Size the chip by reading offsets at increasing powers of two.
13521 * When we encounter our validation signature, we know the addressing
13522 * has wrapped around, and thus have our chip size.
13526 while (cursize < tp->nvram_size) {
13527 if (tg3_nvram_read(tp, cursize, &val) != 0)
13536 tp->nvram_size = cursize;
13539 static void tg3_get_nvram_size(struct tg3 *tp)
13543 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13546 /* Selfboot format */
13547 if (val != TG3_EEPROM_MAGIC) {
13548 tg3_get_eeprom_size(tp);
13552 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13554 /* This is confusing. We want to operate on the
13555 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13556 * call will read from NVRAM and byteswap the data
13557 * according to the byteswapping settings for all
13558 * other register accesses. This ensures the data we
13559 * want will always reside in the lower 16-bits.
13560 * However, the data in NVRAM is in LE format, which
13561 * means the data from the NVRAM read will always be
13562 * opposite the endianness of the CPU. The 16-bit
13563 * byteswap then brings the data to CPU endianness.
13565 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13569 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13572 static void tg3_get_nvram_info(struct tg3 *tp)
13576 nvcfg1 = tr32(NVRAM_CFG1);
13577 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13578 tg3_flag_set(tp, FLASH);
13580 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13581 tw32(NVRAM_CFG1, nvcfg1);
13584 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13585 tg3_flag(tp, 5780_CLASS)) {
13586 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13587 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13588 tp->nvram_jedecnum = JEDEC_ATMEL;
13589 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13590 tg3_flag_set(tp, NVRAM_BUFFERED);
13592 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13593 tp->nvram_jedecnum = JEDEC_ATMEL;
13594 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13596 case FLASH_VENDOR_ATMEL_EEPROM:
13597 tp->nvram_jedecnum = JEDEC_ATMEL;
13598 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13599 tg3_flag_set(tp, NVRAM_BUFFERED);
13601 case FLASH_VENDOR_ST:
13602 tp->nvram_jedecnum = JEDEC_ST;
13603 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13604 tg3_flag_set(tp, NVRAM_BUFFERED);
13606 case FLASH_VENDOR_SAIFUN:
13607 tp->nvram_jedecnum = JEDEC_SAIFUN;
13608 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13610 case FLASH_VENDOR_SST_SMALL:
13611 case FLASH_VENDOR_SST_LARGE:
13612 tp->nvram_jedecnum = JEDEC_SST;
13613 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13617 tp->nvram_jedecnum = JEDEC_ATMEL;
13618 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13619 tg3_flag_set(tp, NVRAM_BUFFERED);
13623 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13625 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13626 case FLASH_5752PAGE_SIZE_256:
13627 tp->nvram_pagesize = 256;
13629 case FLASH_5752PAGE_SIZE_512:
13630 tp->nvram_pagesize = 512;
13632 case FLASH_5752PAGE_SIZE_1K:
13633 tp->nvram_pagesize = 1024;
13635 case FLASH_5752PAGE_SIZE_2K:
13636 tp->nvram_pagesize = 2048;
13638 case FLASH_5752PAGE_SIZE_4K:
13639 tp->nvram_pagesize = 4096;
13641 case FLASH_5752PAGE_SIZE_264:
13642 tp->nvram_pagesize = 264;
13644 case FLASH_5752PAGE_SIZE_528:
13645 tp->nvram_pagesize = 528;
13650 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13654 nvcfg1 = tr32(NVRAM_CFG1);
13656 /* NVRAM protection for TPM */
13657 if (nvcfg1 & (1 << 27))
13658 tg3_flag_set(tp, PROTECTED_NVRAM);
13660 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13661 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13662 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13663 tp->nvram_jedecnum = JEDEC_ATMEL;
13664 tg3_flag_set(tp, NVRAM_BUFFERED);
13666 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13667 tp->nvram_jedecnum = JEDEC_ATMEL;
13668 tg3_flag_set(tp, NVRAM_BUFFERED);
13669 tg3_flag_set(tp, FLASH);
13671 case FLASH_5752VENDOR_ST_M45PE10:
13672 case FLASH_5752VENDOR_ST_M45PE20:
13673 case FLASH_5752VENDOR_ST_M45PE40:
13674 tp->nvram_jedecnum = JEDEC_ST;
13675 tg3_flag_set(tp, NVRAM_BUFFERED);
13676 tg3_flag_set(tp, FLASH);
13680 if (tg3_flag(tp, FLASH)) {
13681 tg3_nvram_get_pagesize(tp, nvcfg1);
13683 /* For eeprom, set pagesize to maximum eeprom size */
13684 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13686 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13687 tw32(NVRAM_CFG1, nvcfg1);
13691 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13693 u32 nvcfg1, protect = 0;
13695 nvcfg1 = tr32(NVRAM_CFG1);
13697 /* NVRAM protection for TPM */
13698 if (nvcfg1 & (1 << 27)) {
13699 tg3_flag_set(tp, PROTECTED_NVRAM);
13703 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13705 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13706 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13707 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13708 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13709 tp->nvram_jedecnum = JEDEC_ATMEL;
13710 tg3_flag_set(tp, NVRAM_BUFFERED);
13711 tg3_flag_set(tp, FLASH);
13712 tp->nvram_pagesize = 264;
13713 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13714 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13715 tp->nvram_size = (protect ? 0x3e200 :
13716 TG3_NVRAM_SIZE_512KB);
13717 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13718 tp->nvram_size = (protect ? 0x1f200 :
13719 TG3_NVRAM_SIZE_256KB);
13721 tp->nvram_size = (protect ? 0x1f200 :
13722 TG3_NVRAM_SIZE_128KB);
13724 case FLASH_5752VENDOR_ST_M45PE10:
13725 case FLASH_5752VENDOR_ST_M45PE20:
13726 case FLASH_5752VENDOR_ST_M45PE40:
13727 tp->nvram_jedecnum = JEDEC_ST;
13728 tg3_flag_set(tp, NVRAM_BUFFERED);
13729 tg3_flag_set(tp, FLASH);
13730 tp->nvram_pagesize = 256;
13731 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13732 tp->nvram_size = (protect ?
13733 TG3_NVRAM_SIZE_64KB :
13734 TG3_NVRAM_SIZE_128KB);
13735 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13736 tp->nvram_size = (protect ?
13737 TG3_NVRAM_SIZE_64KB :
13738 TG3_NVRAM_SIZE_256KB);
13740 tp->nvram_size = (protect ?
13741 TG3_NVRAM_SIZE_128KB :
13742 TG3_NVRAM_SIZE_512KB);
13747 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13751 nvcfg1 = tr32(NVRAM_CFG1);
13753 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13754 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13755 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13756 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13757 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13758 tp->nvram_jedecnum = JEDEC_ATMEL;
13759 tg3_flag_set(tp, NVRAM_BUFFERED);
13760 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13762 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13763 tw32(NVRAM_CFG1, nvcfg1);
13765 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13766 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13767 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13768 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13769 tp->nvram_jedecnum = JEDEC_ATMEL;
13770 tg3_flag_set(tp, NVRAM_BUFFERED);
13771 tg3_flag_set(tp, FLASH);
13772 tp->nvram_pagesize = 264;
13774 case FLASH_5752VENDOR_ST_M45PE10:
13775 case FLASH_5752VENDOR_ST_M45PE20:
13776 case FLASH_5752VENDOR_ST_M45PE40:
13777 tp->nvram_jedecnum = JEDEC_ST;
13778 tg3_flag_set(tp, NVRAM_BUFFERED);
13779 tg3_flag_set(tp, FLASH);
13780 tp->nvram_pagesize = 256;
13785 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13787 u32 nvcfg1, protect = 0;
13789 nvcfg1 = tr32(NVRAM_CFG1);
13791 /* NVRAM protection for TPM */
13792 if (nvcfg1 & (1 << 27)) {
13793 tg3_flag_set(tp, PROTECTED_NVRAM);
13797 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13799 case FLASH_5761VENDOR_ATMEL_ADB021D:
13800 case FLASH_5761VENDOR_ATMEL_ADB041D:
13801 case FLASH_5761VENDOR_ATMEL_ADB081D:
13802 case FLASH_5761VENDOR_ATMEL_ADB161D:
13803 case FLASH_5761VENDOR_ATMEL_MDB021D:
13804 case FLASH_5761VENDOR_ATMEL_MDB041D:
13805 case FLASH_5761VENDOR_ATMEL_MDB081D:
13806 case FLASH_5761VENDOR_ATMEL_MDB161D:
13807 tp->nvram_jedecnum = JEDEC_ATMEL;
13808 tg3_flag_set(tp, NVRAM_BUFFERED);
13809 tg3_flag_set(tp, FLASH);
13810 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13811 tp->nvram_pagesize = 256;
13813 case FLASH_5761VENDOR_ST_A_M45PE20:
13814 case FLASH_5761VENDOR_ST_A_M45PE40:
13815 case FLASH_5761VENDOR_ST_A_M45PE80:
13816 case FLASH_5761VENDOR_ST_A_M45PE16:
13817 case FLASH_5761VENDOR_ST_M_M45PE20:
13818 case FLASH_5761VENDOR_ST_M_M45PE40:
13819 case FLASH_5761VENDOR_ST_M_M45PE80:
13820 case FLASH_5761VENDOR_ST_M_M45PE16:
13821 tp->nvram_jedecnum = JEDEC_ST;
13822 tg3_flag_set(tp, NVRAM_BUFFERED);
13823 tg3_flag_set(tp, FLASH);
13824 tp->nvram_pagesize = 256;
13829 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13832 case FLASH_5761VENDOR_ATMEL_ADB161D:
13833 case FLASH_5761VENDOR_ATMEL_MDB161D:
13834 case FLASH_5761VENDOR_ST_A_M45PE16:
13835 case FLASH_5761VENDOR_ST_M_M45PE16:
13836 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13838 case FLASH_5761VENDOR_ATMEL_ADB081D:
13839 case FLASH_5761VENDOR_ATMEL_MDB081D:
13840 case FLASH_5761VENDOR_ST_A_M45PE80:
13841 case FLASH_5761VENDOR_ST_M_M45PE80:
13842 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13844 case FLASH_5761VENDOR_ATMEL_ADB041D:
13845 case FLASH_5761VENDOR_ATMEL_MDB041D:
13846 case FLASH_5761VENDOR_ST_A_M45PE40:
13847 case FLASH_5761VENDOR_ST_M_M45PE40:
13848 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13850 case FLASH_5761VENDOR_ATMEL_ADB021D:
13851 case FLASH_5761VENDOR_ATMEL_MDB021D:
13852 case FLASH_5761VENDOR_ST_A_M45PE20:
13853 case FLASH_5761VENDOR_ST_M_M45PE20:
13854 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13860 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13862 tp->nvram_jedecnum = JEDEC_ATMEL;
13863 tg3_flag_set(tp, NVRAM_BUFFERED);
13864 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13867 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13871 nvcfg1 = tr32(NVRAM_CFG1);
13873 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13874 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13875 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13876 tp->nvram_jedecnum = JEDEC_ATMEL;
13877 tg3_flag_set(tp, NVRAM_BUFFERED);
13878 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13880 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13881 tw32(NVRAM_CFG1, nvcfg1);
13883 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13884 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13885 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13886 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13887 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13888 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13889 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13890 tp->nvram_jedecnum = JEDEC_ATMEL;
13891 tg3_flag_set(tp, NVRAM_BUFFERED);
13892 tg3_flag_set(tp, FLASH);
13894 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13895 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13896 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13897 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13898 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13900 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13901 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13902 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13904 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13905 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13906 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13910 case FLASH_5752VENDOR_ST_M45PE10:
13911 case FLASH_5752VENDOR_ST_M45PE20:
13912 case FLASH_5752VENDOR_ST_M45PE40:
13913 tp->nvram_jedecnum = JEDEC_ST;
13914 tg3_flag_set(tp, NVRAM_BUFFERED);
13915 tg3_flag_set(tp, FLASH);
13917 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13918 case FLASH_5752VENDOR_ST_M45PE10:
13919 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13921 case FLASH_5752VENDOR_ST_M45PE20:
13922 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13924 case FLASH_5752VENDOR_ST_M45PE40:
13925 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13930 tg3_flag_set(tp, NO_NVRAM);
13934 tg3_nvram_get_pagesize(tp, nvcfg1);
13935 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13936 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13940 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13944 nvcfg1 = tr32(NVRAM_CFG1);
13946 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13947 case FLASH_5717VENDOR_ATMEL_EEPROM:
13948 case FLASH_5717VENDOR_MICRO_EEPROM:
13949 tp->nvram_jedecnum = JEDEC_ATMEL;
13950 tg3_flag_set(tp, NVRAM_BUFFERED);
13951 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13953 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13954 tw32(NVRAM_CFG1, nvcfg1);
13956 case FLASH_5717VENDOR_ATMEL_MDB011D:
13957 case FLASH_5717VENDOR_ATMEL_ADB011B:
13958 case FLASH_5717VENDOR_ATMEL_ADB011D:
13959 case FLASH_5717VENDOR_ATMEL_MDB021D:
13960 case FLASH_5717VENDOR_ATMEL_ADB021B:
13961 case FLASH_5717VENDOR_ATMEL_ADB021D:
13962 case FLASH_5717VENDOR_ATMEL_45USPT:
13963 tp->nvram_jedecnum = JEDEC_ATMEL;
13964 tg3_flag_set(tp, NVRAM_BUFFERED);
13965 tg3_flag_set(tp, FLASH);
13967 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13968 case FLASH_5717VENDOR_ATMEL_MDB021D:
13969 /* Detect size with tg3_nvram_get_size() */
13971 case FLASH_5717VENDOR_ATMEL_ADB021B:
13972 case FLASH_5717VENDOR_ATMEL_ADB021D:
13973 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13976 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13980 case FLASH_5717VENDOR_ST_M_M25PE10:
13981 case FLASH_5717VENDOR_ST_A_M25PE10:
13982 case FLASH_5717VENDOR_ST_M_M45PE10:
13983 case FLASH_5717VENDOR_ST_A_M45PE10:
13984 case FLASH_5717VENDOR_ST_M_M25PE20:
13985 case FLASH_5717VENDOR_ST_A_M25PE20:
13986 case FLASH_5717VENDOR_ST_M_M45PE20:
13987 case FLASH_5717VENDOR_ST_A_M45PE20:
13988 case FLASH_5717VENDOR_ST_25USPT:
13989 case FLASH_5717VENDOR_ST_45USPT:
13990 tp->nvram_jedecnum = JEDEC_ST;
13991 tg3_flag_set(tp, NVRAM_BUFFERED);
13992 tg3_flag_set(tp, FLASH);
13994 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13995 case FLASH_5717VENDOR_ST_M_M25PE20:
13996 case FLASH_5717VENDOR_ST_M_M45PE20:
13997 /* Detect size with tg3_nvram_get_size() */
13999 case FLASH_5717VENDOR_ST_A_M25PE20:
14000 case FLASH_5717VENDOR_ST_A_M45PE20:
14001 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14004 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14009 tg3_flag_set(tp, NO_NVRAM);
14013 tg3_nvram_get_pagesize(tp, nvcfg1);
14014 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14015 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14018 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14020 u32 nvcfg1, nvmpinstrp;
14022 nvcfg1 = tr32(NVRAM_CFG1);
14023 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14025 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14026 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14027 tg3_flag_set(tp, NO_NVRAM);
14031 switch (nvmpinstrp) {
14032 case FLASH_5762_EEPROM_HD:
14033 nvmpinstrp = FLASH_5720_EEPROM_HD;
14035 case FLASH_5762_EEPROM_LD:
14036 nvmpinstrp = FLASH_5720_EEPROM_LD;
14038 case FLASH_5720VENDOR_M_ST_M45PE20:
14039 /* This pinstrap supports multiple sizes, so force it
14040 * to read the actual size from location 0xf0.
14042 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14047 switch (nvmpinstrp) {
14048 case FLASH_5720_EEPROM_HD:
14049 case FLASH_5720_EEPROM_LD:
14050 tp->nvram_jedecnum = JEDEC_ATMEL;
14051 tg3_flag_set(tp, NVRAM_BUFFERED);
14053 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14054 tw32(NVRAM_CFG1, nvcfg1);
14055 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14056 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14058 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14060 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14061 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14062 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14063 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14064 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14065 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14066 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14067 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14068 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14069 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14070 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14071 case FLASH_5720VENDOR_ATMEL_45USPT:
14072 tp->nvram_jedecnum = JEDEC_ATMEL;
14073 tg3_flag_set(tp, NVRAM_BUFFERED);
14074 tg3_flag_set(tp, FLASH);
14076 switch (nvmpinstrp) {
14077 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14078 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14079 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14080 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14082 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14083 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14084 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14085 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14087 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14088 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14089 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14092 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14093 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14097 case FLASH_5720VENDOR_M_ST_M25PE10:
14098 case FLASH_5720VENDOR_M_ST_M45PE10:
14099 case FLASH_5720VENDOR_A_ST_M25PE10:
14100 case FLASH_5720VENDOR_A_ST_M45PE10:
14101 case FLASH_5720VENDOR_M_ST_M25PE20:
14102 case FLASH_5720VENDOR_M_ST_M45PE20:
14103 case FLASH_5720VENDOR_A_ST_M25PE20:
14104 case FLASH_5720VENDOR_A_ST_M45PE20:
14105 case FLASH_5720VENDOR_M_ST_M25PE40:
14106 case FLASH_5720VENDOR_M_ST_M45PE40:
14107 case FLASH_5720VENDOR_A_ST_M25PE40:
14108 case FLASH_5720VENDOR_A_ST_M45PE40:
14109 case FLASH_5720VENDOR_M_ST_M25PE80:
14110 case FLASH_5720VENDOR_M_ST_M45PE80:
14111 case FLASH_5720VENDOR_A_ST_M25PE80:
14112 case FLASH_5720VENDOR_A_ST_M45PE80:
14113 case FLASH_5720VENDOR_ST_25USPT:
14114 case FLASH_5720VENDOR_ST_45USPT:
14115 tp->nvram_jedecnum = JEDEC_ST;
14116 tg3_flag_set(tp, NVRAM_BUFFERED);
14117 tg3_flag_set(tp, FLASH);
14119 switch (nvmpinstrp) {
14120 case FLASH_5720VENDOR_M_ST_M25PE20:
14121 case FLASH_5720VENDOR_M_ST_M45PE20:
14122 case FLASH_5720VENDOR_A_ST_M25PE20:
14123 case FLASH_5720VENDOR_A_ST_M45PE20:
14124 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14126 case FLASH_5720VENDOR_M_ST_M25PE40:
14127 case FLASH_5720VENDOR_M_ST_M45PE40:
14128 case FLASH_5720VENDOR_A_ST_M25PE40:
14129 case FLASH_5720VENDOR_A_ST_M45PE40:
14130 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14132 case FLASH_5720VENDOR_M_ST_M25PE80:
14133 case FLASH_5720VENDOR_M_ST_M45PE80:
14134 case FLASH_5720VENDOR_A_ST_M25PE80:
14135 case FLASH_5720VENDOR_A_ST_M45PE80:
14136 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14139 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14140 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14145 tg3_flag_set(tp, NO_NVRAM);
14149 tg3_nvram_get_pagesize(tp, nvcfg1);
14150 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14151 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14153 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14156 if (tg3_nvram_read(tp, 0, &val))
14159 if (val != TG3_EEPROM_MAGIC &&
14160 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14161 tg3_flag_set(tp, NO_NVRAM);
14165 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14166 static void tg3_nvram_init(struct tg3 *tp)
14168 if (tg3_flag(tp, IS_SSB_CORE)) {
14169 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14170 tg3_flag_clear(tp, NVRAM);
14171 tg3_flag_clear(tp, NVRAM_BUFFERED);
14172 tg3_flag_set(tp, NO_NVRAM);
14176 tw32_f(GRC_EEPROM_ADDR,
14177 (EEPROM_ADDR_FSM_RESET |
14178 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14179 EEPROM_ADDR_CLKPERD_SHIFT)));
14183 /* Enable seeprom accesses. */
14184 tw32_f(GRC_LOCAL_CTRL,
14185 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14188 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14189 tg3_asic_rev(tp) != ASIC_REV_5701) {
14190 tg3_flag_set(tp, NVRAM);
14192 if (tg3_nvram_lock(tp)) {
14193 netdev_warn(tp->dev,
14194 "Cannot get nvram lock, %s failed\n",
14198 tg3_enable_nvram_access(tp);
14200 tp->nvram_size = 0;
14202 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14203 tg3_get_5752_nvram_info(tp);
14204 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14205 tg3_get_5755_nvram_info(tp);
14206 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14207 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14208 tg3_asic_rev(tp) == ASIC_REV_5785)
14209 tg3_get_5787_nvram_info(tp);
14210 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14211 tg3_get_5761_nvram_info(tp);
14212 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14213 tg3_get_5906_nvram_info(tp);
14214 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14215 tg3_flag(tp, 57765_CLASS))
14216 tg3_get_57780_nvram_info(tp);
14217 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14218 tg3_asic_rev(tp) == ASIC_REV_5719)
14219 tg3_get_5717_nvram_info(tp);
14220 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14221 tg3_asic_rev(tp) == ASIC_REV_5762)
14222 tg3_get_5720_nvram_info(tp);
14224 tg3_get_nvram_info(tp);
14226 if (tp->nvram_size == 0)
14227 tg3_get_nvram_size(tp);
14229 tg3_disable_nvram_access(tp);
14230 tg3_nvram_unlock(tp);
14233 tg3_flag_clear(tp, NVRAM);
14234 tg3_flag_clear(tp, NVRAM_BUFFERED);
14236 tg3_get_eeprom_size(tp);
14240 struct subsys_tbl_ent {
14241 u16 subsys_vendor, subsys_devid;
14245 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14246 /* Broadcom boards. */
14247 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14248 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14249 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14250 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14251 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14252 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14253 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14254 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14255 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14256 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14257 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14258 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14259 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14260 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14261 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14262 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14263 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14264 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14265 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14266 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14267 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14268 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14271 { TG3PCI_SUBVENDOR_ID_3COM,
14272 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14273 { TG3PCI_SUBVENDOR_ID_3COM,
14274 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14275 { TG3PCI_SUBVENDOR_ID_3COM,
14276 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14277 { TG3PCI_SUBVENDOR_ID_3COM,
14278 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14279 { TG3PCI_SUBVENDOR_ID_3COM,
14280 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14283 { TG3PCI_SUBVENDOR_ID_DELL,
14284 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14285 { TG3PCI_SUBVENDOR_ID_DELL,
14286 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14287 { TG3PCI_SUBVENDOR_ID_DELL,
14288 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14289 { TG3PCI_SUBVENDOR_ID_DELL,
14290 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14292 /* Compaq boards. */
14293 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14294 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14295 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14296 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14297 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14298 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14299 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14300 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14301 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14302 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14305 { TG3PCI_SUBVENDOR_ID_IBM,
14306 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14309 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14313 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14314 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14315 tp->pdev->subsystem_vendor) &&
14316 (subsys_id_to_phy_id[i].subsys_devid ==
14317 tp->pdev->subsystem_device))
14318 return &subsys_id_to_phy_id[i];
14323 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14327 tp->phy_id = TG3_PHY_ID_INVALID;
14328 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14330 /* Assume an onboard device and WOL capable by default. */
14331 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14332 tg3_flag_set(tp, WOL_CAP);
14334 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14335 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14336 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14337 tg3_flag_set(tp, IS_NIC);
14339 val = tr32(VCPU_CFGSHDW);
14340 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14341 tg3_flag_set(tp, ASPM_WORKAROUND);
14342 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14343 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14344 tg3_flag_set(tp, WOL_ENABLE);
14345 device_set_wakeup_enable(&tp->pdev->dev, true);
14350 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14351 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14352 u32 nic_cfg, led_cfg;
14353 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14354 int eeprom_phy_serdes = 0;
14356 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14357 tp->nic_sram_data_cfg = nic_cfg;
14359 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14360 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14361 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14362 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14363 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14364 (ver > 0) && (ver < 0x100))
14365 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14367 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14368 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14370 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14371 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14372 eeprom_phy_serdes = 1;
14374 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14375 if (nic_phy_id != 0) {
14376 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14377 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14379 eeprom_phy_id = (id1 >> 16) << 10;
14380 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14381 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14385 tp->phy_id = eeprom_phy_id;
14386 if (eeprom_phy_serdes) {
14387 if (!tg3_flag(tp, 5705_PLUS))
14388 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14390 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14393 if (tg3_flag(tp, 5750_PLUS))
14394 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14395 SHASTA_EXT_LED_MODE_MASK);
14397 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14401 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14402 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14405 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14406 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14409 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14410 tp->led_ctrl = LED_CTRL_MODE_MAC;
14412 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14413 * read on some older 5700/5701 bootcode.
14415 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14416 tg3_asic_rev(tp) == ASIC_REV_5701)
14417 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14421 case SHASTA_EXT_LED_SHARED:
14422 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14423 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14424 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14425 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14426 LED_CTRL_MODE_PHY_2);
14429 case SHASTA_EXT_LED_MAC:
14430 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14433 case SHASTA_EXT_LED_COMBO:
14434 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14435 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14436 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14437 LED_CTRL_MODE_PHY_2);
14442 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14443 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14444 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14445 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14447 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14448 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14450 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14451 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14452 if ((tp->pdev->subsystem_vendor ==
14453 PCI_VENDOR_ID_ARIMA) &&
14454 (tp->pdev->subsystem_device == 0x205a ||
14455 tp->pdev->subsystem_device == 0x2063))
14456 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14458 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14459 tg3_flag_set(tp, IS_NIC);
14462 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14463 tg3_flag_set(tp, ENABLE_ASF);
14464 if (tg3_flag(tp, 5750_PLUS))
14465 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14468 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14469 tg3_flag(tp, 5750_PLUS))
14470 tg3_flag_set(tp, ENABLE_APE);
14472 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14473 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14474 tg3_flag_clear(tp, WOL_CAP);
14476 if (tg3_flag(tp, WOL_CAP) &&
14477 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14478 tg3_flag_set(tp, WOL_ENABLE);
14479 device_set_wakeup_enable(&tp->pdev->dev, true);
14482 if (cfg2 & (1 << 17))
14483 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14485 /* serdes signal pre-emphasis in register 0x590 set by */
14486 /* bootcode if bit 18 is set */
14487 if (cfg2 & (1 << 18))
14488 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14490 if ((tg3_flag(tp, 57765_PLUS) ||
14491 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14492 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14493 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14494 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14496 if (tg3_flag(tp, PCI_EXPRESS) &&
14497 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14498 !tg3_flag(tp, 57765_PLUS)) {
14501 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14502 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14503 tg3_flag_set(tp, ASPM_WORKAROUND);
14506 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14507 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14508 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14509 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14510 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14511 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14514 if (tg3_flag(tp, WOL_CAP))
14515 device_set_wakeup_enable(&tp->pdev->dev,
14516 tg3_flag(tp, WOL_ENABLE));
14518 device_set_wakeup_capable(&tp->pdev->dev, false);
14521 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14524 u32 val2, off = offset * 8;
14526 err = tg3_nvram_lock(tp);
14530 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14531 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14532 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14533 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14536 for (i = 0; i < 100; i++) {
14537 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14538 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14539 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14545 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14547 tg3_nvram_unlock(tp);
14548 if (val2 & APE_OTP_STATUS_CMD_DONE)
14554 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14559 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14560 tw32(OTP_CTRL, cmd);
14562 /* Wait for up to 1 ms for command to execute. */
14563 for (i = 0; i < 100; i++) {
14564 val = tr32(OTP_STATUS);
14565 if (val & OTP_STATUS_CMD_DONE)
14570 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14573 /* Read the gphy configuration from the OTP region of the chip. The gphy
14574 * configuration is a 32-bit value that straddles the alignment boundary.
14575 * We do two 32-bit reads and then shift and merge the results.
14577 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14579 u32 bhalf_otp, thalf_otp;
14581 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14583 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14586 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14588 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14591 thalf_otp = tr32(OTP_READ_DATA);
14593 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14595 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14598 bhalf_otp = tr32(OTP_READ_DATA);
14600 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14603 static void tg3_phy_init_link_config(struct tg3 *tp)
14605 u32 adv = ADVERTISED_Autoneg;
14607 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14608 adv |= ADVERTISED_1000baseT_Half |
14609 ADVERTISED_1000baseT_Full;
14611 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14612 adv |= ADVERTISED_100baseT_Half |
14613 ADVERTISED_100baseT_Full |
14614 ADVERTISED_10baseT_Half |
14615 ADVERTISED_10baseT_Full |
14618 adv |= ADVERTISED_FIBRE;
14620 tp->link_config.advertising = adv;
14621 tp->link_config.speed = SPEED_UNKNOWN;
14622 tp->link_config.duplex = DUPLEX_UNKNOWN;
14623 tp->link_config.autoneg = AUTONEG_ENABLE;
14624 tp->link_config.active_speed = SPEED_UNKNOWN;
14625 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14630 static int tg3_phy_probe(struct tg3 *tp)
14632 u32 hw_phy_id_1, hw_phy_id_2;
14633 u32 hw_phy_id, hw_phy_id_masked;
14636 /* flow control autonegotiation is default behavior */
14637 tg3_flag_set(tp, PAUSE_AUTONEG);
14638 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14640 if (tg3_flag(tp, ENABLE_APE)) {
14641 switch (tp->pci_fn) {
14643 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14646 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14649 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14652 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14657 if (tg3_flag(tp, USE_PHYLIB))
14658 return tg3_phy_init(tp);
14660 /* Reading the PHY ID register can conflict with ASF
14661 * firmware access to the PHY hardware.
14664 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14665 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14667 /* Now read the physical PHY_ID from the chip and verify
14668 * that it is sane. If it doesn't look good, we fall back
14669 * to either the hard-coded table based PHY_ID and failing
14670 * that the value found in the eeprom area.
14672 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14673 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14675 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14676 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14677 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14679 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14682 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14683 tp->phy_id = hw_phy_id;
14684 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14685 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14687 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14689 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14690 /* Do nothing, phy ID already set up in
14691 * tg3_get_eeprom_hw_cfg().
14694 struct subsys_tbl_ent *p;
14696 /* No eeprom signature? Try the hardcoded
14697 * subsys device table.
14699 p = tg3_lookup_by_subsys(tp);
14701 tp->phy_id = p->phy_id;
14702 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14703 /* For now we saw the IDs 0xbc050cd0,
14704 * 0xbc050f80 and 0xbc050c30 on devices
14705 * connected to an BCM4785 and there are
14706 * probably more. Just assume that the phy is
14707 * supported when it is connected to a SSB core
14714 tp->phy_id == TG3_PHY_ID_BCM8002)
14715 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14719 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14720 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14721 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14722 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14723 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14724 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14725 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14726 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14727 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14728 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14730 tg3_phy_init_link_config(tp);
14732 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14733 !tg3_flag(tp, ENABLE_APE) &&
14734 !tg3_flag(tp, ENABLE_ASF)) {
14737 tg3_readphy(tp, MII_BMSR, &bmsr);
14738 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14739 (bmsr & BMSR_LSTATUS))
14740 goto skip_phy_reset;
14742 err = tg3_phy_reset(tp);
14746 tg3_phy_set_wirespeed(tp);
14748 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14749 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14750 tp->link_config.flowctrl);
14752 tg3_writephy(tp, MII_BMCR,
14753 BMCR_ANENABLE | BMCR_ANRESTART);
14758 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14759 err = tg3_init_5401phy_dsp(tp);
14763 err = tg3_init_5401phy_dsp(tp);
14769 static void tg3_read_vpd(struct tg3 *tp)
14772 unsigned int block_end, rosize, len;
14776 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14780 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14782 goto out_not_found;
14784 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14785 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14786 i += PCI_VPD_LRDT_TAG_SIZE;
14788 if (block_end > vpdlen)
14789 goto out_not_found;
14791 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14792 PCI_VPD_RO_KEYWORD_MFR_ID);
14794 len = pci_vpd_info_field_size(&vpd_data[j]);
14796 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14797 if (j + len > block_end || len != 4 ||
14798 memcmp(&vpd_data[j], "1028", 4))
14801 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14802 PCI_VPD_RO_KEYWORD_VENDOR0);
14806 len = pci_vpd_info_field_size(&vpd_data[j]);
14808 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14809 if (j + len > block_end)
14812 if (len >= sizeof(tp->fw_ver))
14813 len = sizeof(tp->fw_ver) - 1;
14814 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14815 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14820 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14821 PCI_VPD_RO_KEYWORD_PARTNO);
14823 goto out_not_found;
14825 len = pci_vpd_info_field_size(&vpd_data[i]);
14827 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14828 if (len > TG3_BPN_SIZE ||
14829 (len + i) > vpdlen)
14830 goto out_not_found;
14832 memcpy(tp->board_part_number, &vpd_data[i], len);
14836 if (tp->board_part_number[0])
14840 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14841 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14842 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14843 strcpy(tp->board_part_number, "BCM5717");
14844 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14845 strcpy(tp->board_part_number, "BCM5718");
14848 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14849 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14850 strcpy(tp->board_part_number, "BCM57780");
14851 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14852 strcpy(tp->board_part_number, "BCM57760");
14853 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14854 strcpy(tp->board_part_number, "BCM57790");
14855 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14856 strcpy(tp->board_part_number, "BCM57788");
14859 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14860 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14861 strcpy(tp->board_part_number, "BCM57761");
14862 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14863 strcpy(tp->board_part_number, "BCM57765");
14864 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14865 strcpy(tp->board_part_number, "BCM57781");
14866 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14867 strcpy(tp->board_part_number, "BCM57785");
14868 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14869 strcpy(tp->board_part_number, "BCM57791");
14870 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14871 strcpy(tp->board_part_number, "BCM57795");
14874 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14875 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14876 strcpy(tp->board_part_number, "BCM57762");
14877 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14878 strcpy(tp->board_part_number, "BCM57766");
14879 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14880 strcpy(tp->board_part_number, "BCM57782");
14881 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14882 strcpy(tp->board_part_number, "BCM57786");
14885 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14886 strcpy(tp->board_part_number, "BCM95906");
14889 strcpy(tp->board_part_number, "none");
14893 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14897 if (tg3_nvram_read(tp, offset, &val) ||
14898 (val & 0xfc000000) != 0x0c000000 ||
14899 tg3_nvram_read(tp, offset + 4, &val) ||
14906 static void tg3_read_bc_ver(struct tg3 *tp)
14908 u32 val, offset, start, ver_offset;
14910 bool newver = false;
14912 if (tg3_nvram_read(tp, 0xc, &offset) ||
14913 tg3_nvram_read(tp, 0x4, &start))
14916 offset = tg3_nvram_logical_addr(tp, offset);
14918 if (tg3_nvram_read(tp, offset, &val))
14921 if ((val & 0xfc000000) == 0x0c000000) {
14922 if (tg3_nvram_read(tp, offset + 4, &val))
14929 dst_off = strlen(tp->fw_ver);
14932 if (TG3_VER_SIZE - dst_off < 16 ||
14933 tg3_nvram_read(tp, offset + 8, &ver_offset))
14936 offset = offset + ver_offset - start;
14937 for (i = 0; i < 16; i += 4) {
14939 if (tg3_nvram_read_be32(tp, offset + i, &v))
14942 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14947 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14950 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14951 TG3_NVM_BCVER_MAJSFT;
14952 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14953 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14954 "v%d.%02d", major, minor);
14958 static void tg3_read_hwsb_ver(struct tg3 *tp)
14960 u32 val, major, minor;
14962 /* Use native endian representation */
14963 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14966 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14967 TG3_NVM_HWSB_CFG1_MAJSFT;
14968 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14969 TG3_NVM_HWSB_CFG1_MINSFT;
14971 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14974 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14976 u32 offset, major, minor, build;
14978 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14980 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14983 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14984 case TG3_EEPROM_SB_REVISION_0:
14985 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14987 case TG3_EEPROM_SB_REVISION_2:
14988 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14990 case TG3_EEPROM_SB_REVISION_3:
14991 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14993 case TG3_EEPROM_SB_REVISION_4:
14994 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14996 case TG3_EEPROM_SB_REVISION_5:
14997 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14999 case TG3_EEPROM_SB_REVISION_6:
15000 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15006 if (tg3_nvram_read(tp, offset, &val))
15009 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15010 TG3_EEPROM_SB_EDH_BLD_SHFT;
15011 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15012 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15013 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15015 if (minor > 99 || build > 26)
15018 offset = strlen(tp->fw_ver);
15019 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15020 " v%d.%02d", major, minor);
15023 offset = strlen(tp->fw_ver);
15024 if (offset < TG3_VER_SIZE - 1)
15025 tp->fw_ver[offset] = 'a' + build - 1;
15029 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15031 u32 val, offset, start;
15034 for (offset = TG3_NVM_DIR_START;
15035 offset < TG3_NVM_DIR_END;
15036 offset += TG3_NVM_DIRENT_SIZE) {
15037 if (tg3_nvram_read(tp, offset, &val))
15040 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15044 if (offset == TG3_NVM_DIR_END)
15047 if (!tg3_flag(tp, 5705_PLUS))
15048 start = 0x08000000;
15049 else if (tg3_nvram_read(tp, offset - 4, &start))
15052 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15053 !tg3_fw_img_is_valid(tp, offset) ||
15054 tg3_nvram_read(tp, offset + 8, &val))
15057 offset += val - start;
15059 vlen = strlen(tp->fw_ver);
15061 tp->fw_ver[vlen++] = ',';
15062 tp->fw_ver[vlen++] = ' ';
15064 for (i = 0; i < 4; i++) {
15066 if (tg3_nvram_read_be32(tp, offset, &v))
15069 offset += sizeof(v);
15071 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15072 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15076 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15081 static void tg3_probe_ncsi(struct tg3 *tp)
15085 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15086 if (apedata != APE_SEG_SIG_MAGIC)
15089 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15090 if (!(apedata & APE_FW_STATUS_READY))
15093 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15094 tg3_flag_set(tp, APE_HAS_NCSI);
15097 static void tg3_read_dash_ver(struct tg3 *tp)
15103 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15105 if (tg3_flag(tp, APE_HAS_NCSI))
15107 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15112 vlen = strlen(tp->fw_ver);
15114 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15116 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15117 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15118 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15119 (apedata & APE_FW_VERSION_BLDMSK));
15122 static void tg3_read_otp_ver(struct tg3 *tp)
15126 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15129 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15130 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15131 TG3_OTP_MAGIC0_VALID(val)) {
15132 u64 val64 = (u64) val << 32 | val2;
15136 for (i = 0; i < 7; i++) {
15137 if ((val64 & 0xff) == 0)
15139 ver = val64 & 0xff;
15142 vlen = strlen(tp->fw_ver);
15143 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15147 static void tg3_read_fw_ver(struct tg3 *tp)
15150 bool vpd_vers = false;
15152 if (tp->fw_ver[0] != 0)
15155 if (tg3_flag(tp, NO_NVRAM)) {
15156 strcat(tp->fw_ver, "sb");
15157 tg3_read_otp_ver(tp);
15161 if (tg3_nvram_read(tp, 0, &val))
15164 if (val == TG3_EEPROM_MAGIC)
15165 tg3_read_bc_ver(tp);
15166 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15167 tg3_read_sb_ver(tp, val);
15168 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15169 tg3_read_hwsb_ver(tp);
15171 if (tg3_flag(tp, ENABLE_ASF)) {
15172 if (tg3_flag(tp, ENABLE_APE)) {
15173 tg3_probe_ncsi(tp);
15175 tg3_read_dash_ver(tp);
15176 } else if (!vpd_vers) {
15177 tg3_read_mgmtfw_ver(tp);
15181 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15184 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15186 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15187 return TG3_RX_RET_MAX_SIZE_5717;
15188 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15189 return TG3_RX_RET_MAX_SIZE_5700;
15191 return TG3_RX_RET_MAX_SIZE_5705;
15194 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15195 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15196 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15197 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15201 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15203 struct pci_dev *peer;
15204 unsigned int func, devnr = tp->pdev->devfn & ~7;
15206 for (func = 0; func < 8; func++) {
15207 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15208 if (peer && peer != tp->pdev)
15212 /* 5704 can be configured in single-port mode, set peer to
15213 * tp->pdev in that case.
15221 * We don't need to keep the refcount elevated; there's no way
15222 * to remove one half of this device without removing the other
15229 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15231 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15232 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15235 /* All devices that use the alternate
15236 * ASIC REV location have a CPMU.
15238 tg3_flag_set(tp, CPMU_PRESENT);
15240 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15241 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15242 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15243 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15244 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15245 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15246 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15247 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15248 reg = TG3PCI_GEN2_PRODID_ASICREV;
15249 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15250 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15251 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15252 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15253 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15254 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15255 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15256 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15257 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15258 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15259 reg = TG3PCI_GEN15_PRODID_ASICREV;
15261 reg = TG3PCI_PRODID_ASICREV;
15263 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15266 /* Wrong chip ID in 5752 A0. This code can be removed later
15267 * as A0 is not in production.
15269 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15270 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15272 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15273 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15275 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15276 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15277 tg3_asic_rev(tp) == ASIC_REV_5720)
15278 tg3_flag_set(tp, 5717_PLUS);
15280 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15281 tg3_asic_rev(tp) == ASIC_REV_57766)
15282 tg3_flag_set(tp, 57765_CLASS);
15284 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15285 tg3_asic_rev(tp) == ASIC_REV_5762)
15286 tg3_flag_set(tp, 57765_PLUS);
15288 /* Intentionally exclude ASIC_REV_5906 */
15289 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15290 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15291 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15292 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15293 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15294 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15295 tg3_flag(tp, 57765_PLUS))
15296 tg3_flag_set(tp, 5755_PLUS);
15298 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15299 tg3_asic_rev(tp) == ASIC_REV_5714)
15300 tg3_flag_set(tp, 5780_CLASS);
15302 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15303 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15304 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15305 tg3_flag(tp, 5755_PLUS) ||
15306 tg3_flag(tp, 5780_CLASS))
15307 tg3_flag_set(tp, 5750_PLUS);
15309 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15310 tg3_flag(tp, 5750_PLUS))
15311 tg3_flag_set(tp, 5705_PLUS);
15314 static bool tg3_10_100_only_device(struct tg3 *tp,
15315 const struct pci_device_id *ent)
15317 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15319 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15320 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15321 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15324 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15325 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15326 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15336 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15339 u32 pci_state_reg, grc_misc_cfg;
15344 /* Force memory write invalidate off. If we leave it on,
15345 * then on 5700_BX chips we have to enable a workaround.
15346 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15347 * to match the cacheline size. The Broadcom driver have this
15348 * workaround but turns MWI off all the times so never uses
15349 * it. This seems to suggest that the workaround is insufficient.
15351 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15352 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15353 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15355 /* Important! -- Make sure register accesses are byteswapped
15356 * correctly. Also, for those chips that require it, make
15357 * sure that indirect register accesses are enabled before
15358 * the first operation.
15360 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15362 tp->misc_host_ctrl |= (misc_ctrl_reg &
15363 MISC_HOST_CTRL_CHIPREV);
15364 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15365 tp->misc_host_ctrl);
15367 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15369 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15370 * we need to disable memory and use config. cycles
15371 * only to access all registers. The 5702/03 chips
15372 * can mistakenly decode the special cycles from the
15373 * ICH chipsets as memory write cycles, causing corruption
15374 * of register and memory space. Only certain ICH bridges
15375 * will drive special cycles with non-zero data during the
15376 * address phase which can fall within the 5703's address
15377 * range. This is not an ICH bug as the PCI spec allows
15378 * non-zero address during special cycles. However, only
15379 * these ICH bridges are known to drive non-zero addresses
15380 * during special cycles.
15382 * Since special cycles do not cross PCI bridges, we only
15383 * enable this workaround if the 5703 is on the secondary
15384 * bus of these ICH bridges.
15386 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15387 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15388 static struct tg3_dev_id {
15392 } ich_chipsets[] = {
15393 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15395 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15397 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15399 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15403 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15404 struct pci_dev *bridge = NULL;
15406 while (pci_id->vendor != 0) {
15407 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15413 if (pci_id->rev != PCI_ANY_ID) {
15414 if (bridge->revision > pci_id->rev)
15417 if (bridge->subordinate &&
15418 (bridge->subordinate->number ==
15419 tp->pdev->bus->number)) {
15420 tg3_flag_set(tp, ICH_WORKAROUND);
15421 pci_dev_put(bridge);
15427 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15428 static struct tg3_dev_id {
15431 } bridge_chipsets[] = {
15432 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15433 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15436 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15437 struct pci_dev *bridge = NULL;
15439 while (pci_id->vendor != 0) {
15440 bridge = pci_get_device(pci_id->vendor,
15447 if (bridge->subordinate &&
15448 (bridge->subordinate->number <=
15449 tp->pdev->bus->number) &&
15450 (bridge->subordinate->busn_res.end >=
15451 tp->pdev->bus->number)) {
15452 tg3_flag_set(tp, 5701_DMA_BUG);
15453 pci_dev_put(bridge);
15459 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15460 * DMA addresses > 40-bit. This bridge may have other additional
15461 * 57xx devices behind it in some 4-port NIC designs for example.
15462 * Any tg3 device found behind the bridge will also need the 40-bit
15465 if (tg3_flag(tp, 5780_CLASS)) {
15466 tg3_flag_set(tp, 40BIT_DMA_BUG);
15467 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15469 struct pci_dev *bridge = NULL;
15472 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15473 PCI_DEVICE_ID_SERVERWORKS_EPB,
15475 if (bridge && bridge->subordinate &&
15476 (bridge->subordinate->number <=
15477 tp->pdev->bus->number) &&
15478 (bridge->subordinate->busn_res.end >=
15479 tp->pdev->bus->number)) {
15480 tg3_flag_set(tp, 40BIT_DMA_BUG);
15481 pci_dev_put(bridge);
15487 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15488 tg3_asic_rev(tp) == ASIC_REV_5714)
15489 tp->pdev_peer = tg3_find_peer(tp);
15491 /* Determine TSO capabilities */
15492 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15493 ; /* Do nothing. HW bug. */
15494 else if (tg3_flag(tp, 57765_PLUS))
15495 tg3_flag_set(tp, HW_TSO_3);
15496 else if (tg3_flag(tp, 5755_PLUS) ||
15497 tg3_asic_rev(tp) == ASIC_REV_5906)
15498 tg3_flag_set(tp, HW_TSO_2);
15499 else if (tg3_flag(tp, 5750_PLUS)) {
15500 tg3_flag_set(tp, HW_TSO_1);
15501 tg3_flag_set(tp, TSO_BUG);
15502 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15503 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15504 tg3_flag_clear(tp, TSO_BUG);
15505 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15506 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15507 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15508 tg3_flag_set(tp, FW_TSO);
15509 tg3_flag_set(tp, TSO_BUG);
15510 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15511 tp->fw_needed = FIRMWARE_TG3TSO5;
15513 tp->fw_needed = FIRMWARE_TG3TSO;
15516 /* Selectively allow TSO based on operating conditions */
15517 if (tg3_flag(tp, HW_TSO_1) ||
15518 tg3_flag(tp, HW_TSO_2) ||
15519 tg3_flag(tp, HW_TSO_3) ||
15520 tg3_flag(tp, FW_TSO)) {
15521 /* For firmware TSO, assume ASF is disabled.
15522 * We'll disable TSO later if we discover ASF
15523 * is enabled in tg3_get_eeprom_hw_cfg().
15525 tg3_flag_set(tp, TSO_CAPABLE);
15527 tg3_flag_clear(tp, TSO_CAPABLE);
15528 tg3_flag_clear(tp, TSO_BUG);
15529 tp->fw_needed = NULL;
15532 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15533 tp->fw_needed = FIRMWARE_TG3;
15535 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15536 tp->fw_needed = FIRMWARE_TG357766;
15540 if (tg3_flag(tp, 5750_PLUS)) {
15541 tg3_flag_set(tp, SUPPORT_MSI);
15542 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15543 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15544 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15545 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15546 tp->pdev_peer == tp->pdev))
15547 tg3_flag_clear(tp, SUPPORT_MSI);
15549 if (tg3_flag(tp, 5755_PLUS) ||
15550 tg3_asic_rev(tp) == ASIC_REV_5906) {
15551 tg3_flag_set(tp, 1SHOT_MSI);
15554 if (tg3_flag(tp, 57765_PLUS)) {
15555 tg3_flag_set(tp, SUPPORT_MSIX);
15556 tp->irq_max = TG3_IRQ_MAX_VECS;
15562 if (tp->irq_max > 1) {
15563 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15564 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15566 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15567 tg3_asic_rev(tp) == ASIC_REV_5720)
15568 tp->txq_max = tp->irq_max - 1;
15571 if (tg3_flag(tp, 5755_PLUS) ||
15572 tg3_asic_rev(tp) == ASIC_REV_5906)
15573 tg3_flag_set(tp, SHORT_DMA_BUG);
15575 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15576 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15578 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15579 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15580 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15581 tg3_asic_rev(tp) == ASIC_REV_5762)
15582 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15584 if (tg3_flag(tp, 57765_PLUS) &&
15585 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15586 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15588 if (!tg3_flag(tp, 5705_PLUS) ||
15589 tg3_flag(tp, 5780_CLASS) ||
15590 tg3_flag(tp, USE_JUMBO_BDFLAG))
15591 tg3_flag_set(tp, JUMBO_CAPABLE);
15593 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15596 if (pci_is_pcie(tp->pdev)) {
15599 tg3_flag_set(tp, PCI_EXPRESS);
15601 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15602 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15603 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15604 tg3_flag_clear(tp, HW_TSO_2);
15605 tg3_flag_clear(tp, TSO_CAPABLE);
15607 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15608 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15609 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15610 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15611 tg3_flag_set(tp, CLKREQ_BUG);
15612 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15613 tg3_flag_set(tp, L1PLLPD_EN);
15615 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15616 /* BCM5785 devices are effectively PCIe devices, and should
15617 * follow PCIe codepaths, but do not have a PCIe capabilities
15620 tg3_flag_set(tp, PCI_EXPRESS);
15621 } else if (!tg3_flag(tp, 5705_PLUS) ||
15622 tg3_flag(tp, 5780_CLASS)) {
15623 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15624 if (!tp->pcix_cap) {
15625 dev_err(&tp->pdev->dev,
15626 "Cannot find PCI-X capability, aborting\n");
15630 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15631 tg3_flag_set(tp, PCIX_MODE);
15634 /* If we have an AMD 762 or VIA K8T800 chipset, write
15635 * reordering to the mailbox registers done by the host
15636 * controller can cause major troubles. We read back from
15637 * every mailbox register write to force the writes to be
15638 * posted to the chip in order.
15640 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15641 !tg3_flag(tp, PCI_EXPRESS))
15642 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15644 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15645 &tp->pci_cacheline_sz);
15646 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15647 &tp->pci_lat_timer);
15648 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15649 tp->pci_lat_timer < 64) {
15650 tp->pci_lat_timer = 64;
15651 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15652 tp->pci_lat_timer);
15655 /* Important! -- It is critical that the PCI-X hw workaround
15656 * situation is decided before the first MMIO register access.
15658 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15659 /* 5700 BX chips need to have their TX producer index
15660 * mailboxes written twice to workaround a bug.
15662 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15664 /* If we are in PCI-X mode, enable register write workaround.
15666 * The workaround is to use indirect register accesses
15667 * for all chip writes not to mailbox registers.
15669 if (tg3_flag(tp, PCIX_MODE)) {
15672 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15674 /* The chip can have it's power management PCI config
15675 * space registers clobbered due to this bug.
15676 * So explicitly force the chip into D0 here.
15678 pci_read_config_dword(tp->pdev,
15679 tp->pm_cap + PCI_PM_CTRL,
15681 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15682 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15683 pci_write_config_dword(tp->pdev,
15684 tp->pm_cap + PCI_PM_CTRL,
15687 /* Also, force SERR#/PERR# in PCI command. */
15688 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15689 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15690 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15694 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15695 tg3_flag_set(tp, PCI_HIGH_SPEED);
15696 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15697 tg3_flag_set(tp, PCI_32BIT);
15699 /* Chip-specific fixup from Broadcom driver */
15700 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15701 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15702 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15703 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15706 /* Default fast path register access methods */
15707 tp->read32 = tg3_read32;
15708 tp->write32 = tg3_write32;
15709 tp->read32_mbox = tg3_read32;
15710 tp->write32_mbox = tg3_write32;
15711 tp->write32_tx_mbox = tg3_write32;
15712 tp->write32_rx_mbox = tg3_write32;
15714 /* Various workaround register access methods */
15715 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15716 tp->write32 = tg3_write_indirect_reg32;
15717 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15718 (tg3_flag(tp, PCI_EXPRESS) &&
15719 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15721 * Back to back register writes can cause problems on these
15722 * chips, the workaround is to read back all reg writes
15723 * except those to mailbox regs.
15725 * See tg3_write_indirect_reg32().
15727 tp->write32 = tg3_write_flush_reg32;
15730 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15731 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15732 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15733 tp->write32_rx_mbox = tg3_write_flush_reg32;
15736 if (tg3_flag(tp, ICH_WORKAROUND)) {
15737 tp->read32 = tg3_read_indirect_reg32;
15738 tp->write32 = tg3_write_indirect_reg32;
15739 tp->read32_mbox = tg3_read_indirect_mbox;
15740 tp->write32_mbox = tg3_write_indirect_mbox;
15741 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15742 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15747 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15748 pci_cmd &= ~PCI_COMMAND_MEMORY;
15749 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15751 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15752 tp->read32_mbox = tg3_read32_mbox_5906;
15753 tp->write32_mbox = tg3_write32_mbox_5906;
15754 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15755 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15758 if (tp->write32 == tg3_write_indirect_reg32 ||
15759 (tg3_flag(tp, PCIX_MODE) &&
15760 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15761 tg3_asic_rev(tp) == ASIC_REV_5701)))
15762 tg3_flag_set(tp, SRAM_USE_CONFIG);
15764 /* The memory arbiter has to be enabled in order for SRAM accesses
15765 * to succeed. Normally on powerup the tg3 chip firmware will make
15766 * sure it is enabled, but other entities such as system netboot
15767 * code might disable it.
15769 val = tr32(MEMARB_MODE);
15770 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15772 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15773 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15774 tg3_flag(tp, 5780_CLASS)) {
15775 if (tg3_flag(tp, PCIX_MODE)) {
15776 pci_read_config_dword(tp->pdev,
15777 tp->pcix_cap + PCI_X_STATUS,
15779 tp->pci_fn = val & 0x7;
15781 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15782 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15783 tg3_asic_rev(tp) == ASIC_REV_5720) {
15784 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15785 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15786 val = tr32(TG3_CPMU_STATUS);
15788 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15789 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15791 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15792 TG3_CPMU_STATUS_FSHFT_5719;
15795 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15796 tp->write32_tx_mbox = tg3_write_flush_reg32;
15797 tp->write32_rx_mbox = tg3_write_flush_reg32;
15800 /* Get eeprom hw config before calling tg3_set_power_state().
15801 * In particular, the TG3_FLAG_IS_NIC flag must be
15802 * determined before calling tg3_set_power_state() so that
15803 * we know whether or not to switch out of Vaux power.
15804 * When the flag is set, it means that GPIO1 is used for eeprom
15805 * write protect and also implies that it is a LOM where GPIOs
15806 * are not used to switch power.
15808 tg3_get_eeprom_hw_cfg(tp);
15810 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15811 tg3_flag_clear(tp, TSO_CAPABLE);
15812 tg3_flag_clear(tp, TSO_BUG);
15813 tp->fw_needed = NULL;
15816 if (tg3_flag(tp, ENABLE_APE)) {
15817 /* Allow reads and writes to the
15818 * APE register and memory space.
15820 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15821 PCISTATE_ALLOW_APE_SHMEM_WR |
15822 PCISTATE_ALLOW_APE_PSPACE_WR;
15823 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15826 tg3_ape_lock_init(tp);
15829 /* Set up tp->grc_local_ctrl before calling
15830 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15831 * will bring 5700's external PHY out of reset.
15832 * It is also used as eeprom write protect on LOMs.
15834 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15835 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15836 tg3_flag(tp, EEPROM_WRITE_PROT))
15837 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15838 GRC_LCLCTRL_GPIO_OUTPUT1);
15839 /* Unused GPIO3 must be driven as output on 5752 because there
15840 * are no pull-up resistors on unused GPIO pins.
15842 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15843 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15845 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15846 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15847 tg3_flag(tp, 57765_CLASS))
15848 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15850 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15851 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15852 /* Turn off the debug UART. */
15853 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15854 if (tg3_flag(tp, IS_NIC))
15855 /* Keep VMain power. */
15856 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15857 GRC_LCLCTRL_GPIO_OUTPUT0;
15860 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15861 tp->grc_local_ctrl |=
15862 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15864 /* Switch out of Vaux if it is a NIC */
15865 tg3_pwrsrc_switch_to_vmain(tp);
15867 /* Derive initial jumbo mode from MTU assigned in
15868 * ether_setup() via the alloc_etherdev() call
15870 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15871 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15873 /* Determine WakeOnLan speed to use. */
15874 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15875 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15876 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15877 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15878 tg3_flag_clear(tp, WOL_SPEED_100MB);
15880 tg3_flag_set(tp, WOL_SPEED_100MB);
15883 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15884 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15886 /* A few boards don't want Ethernet@WireSpeed phy feature */
15887 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15888 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15889 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15890 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15891 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15892 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15893 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15895 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15896 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15897 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15898 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15899 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15901 if (tg3_flag(tp, 5705_PLUS) &&
15902 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15903 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15904 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15905 !tg3_flag(tp, 57765_PLUS)) {
15906 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15907 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15908 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15909 tg3_asic_rev(tp) == ASIC_REV_5761) {
15910 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15911 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15912 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15913 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15914 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15916 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15919 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15920 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15921 tp->phy_otp = tg3_read_otp_phycfg(tp);
15922 if (tp->phy_otp == 0)
15923 tp->phy_otp = TG3_OTP_DEFAULT;
15926 if (tg3_flag(tp, CPMU_PRESENT))
15927 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15929 tp->mi_mode = MAC_MI_MODE_BASE;
15931 tp->coalesce_mode = 0;
15932 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15933 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15934 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15936 /* Set these bits to enable statistics workaround. */
15937 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15938 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15939 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15940 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15941 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15944 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15945 tg3_asic_rev(tp) == ASIC_REV_57780)
15946 tg3_flag_set(tp, USE_PHYLIB);
15948 err = tg3_mdio_init(tp);
15952 /* Initialize data/descriptor byte/word swapping. */
15953 val = tr32(GRC_MODE);
15954 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15955 tg3_asic_rev(tp) == ASIC_REV_5762)
15956 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15957 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15958 GRC_MODE_B2HRX_ENABLE |
15959 GRC_MODE_HTX2B_ENABLE |
15960 GRC_MODE_HOST_STACKUP);
15962 val &= GRC_MODE_HOST_STACKUP;
15964 tw32(GRC_MODE, val | tp->grc_mode);
15966 tg3_switch_clocks(tp);
15968 /* Clear this out for sanity. */
15969 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15971 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15973 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15974 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15975 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15976 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15977 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15978 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15979 void __iomem *sram_base;
15981 /* Write some dummy words into the SRAM status block
15982 * area, see if it reads back correctly. If the return
15983 * value is bad, force enable the PCIX workaround.
15985 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15987 writel(0x00000000, sram_base);
15988 writel(0x00000000, sram_base + 4);
15989 writel(0xffffffff, sram_base + 4);
15990 if (readl(sram_base) != 0x00000000)
15991 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15996 tg3_nvram_init(tp);
15998 /* If the device has an NVRAM, no need to load patch firmware */
15999 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16000 !tg3_flag(tp, NO_NVRAM))
16001 tp->fw_needed = NULL;
16003 grc_misc_cfg = tr32(GRC_MISC_CFG);
16004 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16006 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16007 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16008 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16009 tg3_flag_set(tp, IS_5788);
16011 if (!tg3_flag(tp, IS_5788) &&
16012 tg3_asic_rev(tp) != ASIC_REV_5700)
16013 tg3_flag_set(tp, TAGGED_STATUS);
16014 if (tg3_flag(tp, TAGGED_STATUS)) {
16015 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16016 HOSTCC_MODE_CLRTICK_TXBD);
16018 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16019 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16020 tp->misc_host_ctrl);
16023 /* Preserve the APE MAC_MODE bits */
16024 if (tg3_flag(tp, ENABLE_APE))
16025 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16029 if (tg3_10_100_only_device(tp, ent))
16030 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16032 err = tg3_phy_probe(tp);
16034 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16035 /* ... but do not return immediately ... */
16040 tg3_read_fw_ver(tp);
16042 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16043 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16045 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16046 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16048 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16051 /* 5700 {AX,BX} chips have a broken status block link
16052 * change bit implementation, so we must use the
16053 * status register in those cases.
16055 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16056 tg3_flag_set(tp, USE_LINKCHG_REG);
16058 tg3_flag_clear(tp, USE_LINKCHG_REG);
16060 /* The led_ctrl is set during tg3_phy_probe, here we might
16061 * have to force the link status polling mechanism based
16062 * upon subsystem IDs.
16064 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16065 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16066 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16067 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16068 tg3_flag_set(tp, USE_LINKCHG_REG);
16071 /* For all SERDES we poll the MAC status register. */
16072 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16073 tg3_flag_set(tp, POLL_SERDES);
16075 tg3_flag_clear(tp, POLL_SERDES);
16077 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16078 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16079 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16080 tg3_flag(tp, PCIX_MODE)) {
16081 tp->rx_offset = NET_SKB_PAD;
16082 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16083 tp->rx_copy_thresh = ~(u16)0;
16087 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16088 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16089 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16091 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16093 /* Increment the rx prod index on the rx std ring by at most
16094 * 8 for these chips to workaround hw errata.
16096 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16097 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16098 tg3_asic_rev(tp) == ASIC_REV_5755)
16099 tp->rx_std_max_post = 8;
16101 if (tg3_flag(tp, ASPM_WORKAROUND))
16102 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16103 PCIE_PWR_MGMT_L1_THRESH_MSK;
16108 #ifdef CONFIG_SPARC
16109 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16111 struct net_device *dev = tp->dev;
16112 struct pci_dev *pdev = tp->pdev;
16113 struct device_node *dp = pci_device_to_OF_node(pdev);
16114 const unsigned char *addr;
16117 addr = of_get_property(dp, "local-mac-address", &len);
16118 if (addr && len == 6) {
16119 memcpy(dev->dev_addr, addr, 6);
16125 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16127 struct net_device *dev = tp->dev;
16129 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16134 static int tg3_get_device_address(struct tg3 *tp)
16136 struct net_device *dev = tp->dev;
16137 u32 hi, lo, mac_offset;
16141 #ifdef CONFIG_SPARC
16142 if (!tg3_get_macaddr_sparc(tp))
16146 if (tg3_flag(tp, IS_SSB_CORE)) {
16147 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16148 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16153 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16154 tg3_flag(tp, 5780_CLASS)) {
16155 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16157 if (tg3_nvram_lock(tp))
16158 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16160 tg3_nvram_unlock(tp);
16161 } else if (tg3_flag(tp, 5717_PLUS)) {
16162 if (tp->pci_fn & 1)
16164 if (tp->pci_fn > 1)
16165 mac_offset += 0x18c;
16166 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16169 /* First try to get it from MAC address mailbox. */
16170 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16171 if ((hi >> 16) == 0x484b) {
16172 dev->dev_addr[0] = (hi >> 8) & 0xff;
16173 dev->dev_addr[1] = (hi >> 0) & 0xff;
16175 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16176 dev->dev_addr[2] = (lo >> 24) & 0xff;
16177 dev->dev_addr[3] = (lo >> 16) & 0xff;
16178 dev->dev_addr[4] = (lo >> 8) & 0xff;
16179 dev->dev_addr[5] = (lo >> 0) & 0xff;
16181 /* Some old bootcode may report a 0 MAC address in SRAM */
16182 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16185 /* Next, try NVRAM. */
16186 if (!tg3_flag(tp, NO_NVRAM) &&
16187 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16188 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16189 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16190 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16192 /* Finally just fetch it out of the MAC control regs. */
16194 hi = tr32(MAC_ADDR_0_HIGH);
16195 lo = tr32(MAC_ADDR_0_LOW);
16197 dev->dev_addr[5] = lo & 0xff;
16198 dev->dev_addr[4] = (lo >> 8) & 0xff;
16199 dev->dev_addr[3] = (lo >> 16) & 0xff;
16200 dev->dev_addr[2] = (lo >> 24) & 0xff;
16201 dev->dev_addr[1] = hi & 0xff;
16202 dev->dev_addr[0] = (hi >> 8) & 0xff;
16206 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16207 #ifdef CONFIG_SPARC
16208 if (!tg3_get_default_macaddr_sparc(tp))
16216 #define BOUNDARY_SINGLE_CACHELINE 1
16217 #define BOUNDARY_MULTI_CACHELINE 2
16219 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16221 int cacheline_size;
16225 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16227 cacheline_size = 1024;
16229 cacheline_size = (int) byte * 4;
16231 /* On 5703 and later chips, the boundary bits have no
16234 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16235 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16236 !tg3_flag(tp, PCI_EXPRESS))
16239 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16240 goal = BOUNDARY_MULTI_CACHELINE;
16242 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16243 goal = BOUNDARY_SINGLE_CACHELINE;
16249 if (tg3_flag(tp, 57765_PLUS)) {
16250 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16257 /* PCI controllers on most RISC systems tend to disconnect
16258 * when a device tries to burst across a cache-line boundary.
16259 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16261 * Unfortunately, for PCI-E there are only limited
16262 * write-side controls for this, and thus for reads
16263 * we will still get the disconnects. We'll also waste
16264 * these PCI cycles for both read and write for chips
16265 * other than 5700 and 5701 which do not implement the
16268 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16269 switch (cacheline_size) {
16274 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16275 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16276 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16278 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16279 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16284 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16285 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16289 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16290 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16293 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16294 switch (cacheline_size) {
16298 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16299 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16300 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16306 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16307 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16311 switch (cacheline_size) {
16313 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16314 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16315 DMA_RWCTRL_WRITE_BNDRY_16);
16320 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16321 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16322 DMA_RWCTRL_WRITE_BNDRY_32);
16327 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16328 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16329 DMA_RWCTRL_WRITE_BNDRY_64);
16334 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16335 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16336 DMA_RWCTRL_WRITE_BNDRY_128);
16341 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16342 DMA_RWCTRL_WRITE_BNDRY_256);
16345 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16346 DMA_RWCTRL_WRITE_BNDRY_512);
16350 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16351 DMA_RWCTRL_WRITE_BNDRY_1024);
16360 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16361 int size, int to_device)
16363 struct tg3_internal_buffer_desc test_desc;
16364 u32 sram_dma_descs;
16367 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16369 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16370 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16371 tw32(RDMAC_STATUS, 0);
16372 tw32(WDMAC_STATUS, 0);
16374 tw32(BUFMGR_MODE, 0);
16375 tw32(FTQ_RESET, 0);
16377 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16378 test_desc.addr_lo = buf_dma & 0xffffffff;
16379 test_desc.nic_mbuf = 0x00002100;
16380 test_desc.len = size;
16383 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16384 * the *second* time the tg3 driver was getting loaded after an
16387 * Broadcom tells me:
16388 * ...the DMA engine is connected to the GRC block and a DMA
16389 * reset may affect the GRC block in some unpredictable way...
16390 * The behavior of resets to individual blocks has not been tested.
16392 * Broadcom noted the GRC reset will also reset all sub-components.
16395 test_desc.cqid_sqid = (13 << 8) | 2;
16397 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16400 test_desc.cqid_sqid = (16 << 8) | 7;
16402 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16405 test_desc.flags = 0x00000005;
16407 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16410 val = *(((u32 *)&test_desc) + i);
16411 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16412 sram_dma_descs + (i * sizeof(u32)));
16413 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16415 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16418 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16420 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16423 for (i = 0; i < 40; i++) {
16427 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16429 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16430 if ((val & 0xffff) == sram_dma_descs) {
16441 #define TEST_BUFFER_SIZE 0x2000
16443 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16444 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16448 static int tg3_test_dma(struct tg3 *tp)
16450 dma_addr_t buf_dma;
16451 u32 *buf, saved_dma_rwctrl;
16454 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16455 &buf_dma, GFP_KERNEL);
16461 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16462 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16464 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16466 if (tg3_flag(tp, 57765_PLUS))
16469 if (tg3_flag(tp, PCI_EXPRESS)) {
16470 /* DMA read watermark not used on PCIE */
16471 tp->dma_rwctrl |= 0x00180000;
16472 } else if (!tg3_flag(tp, PCIX_MODE)) {
16473 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16474 tg3_asic_rev(tp) == ASIC_REV_5750)
16475 tp->dma_rwctrl |= 0x003f0000;
16477 tp->dma_rwctrl |= 0x003f000f;
16479 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16480 tg3_asic_rev(tp) == ASIC_REV_5704) {
16481 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16482 u32 read_water = 0x7;
16484 /* If the 5704 is behind the EPB bridge, we can
16485 * do the less restrictive ONE_DMA workaround for
16486 * better performance.
16488 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16489 tg3_asic_rev(tp) == ASIC_REV_5704)
16490 tp->dma_rwctrl |= 0x8000;
16491 else if (ccval == 0x6 || ccval == 0x7)
16492 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16494 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16496 /* Set bit 23 to enable PCIX hw bug fix */
16498 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16499 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16501 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16502 /* 5780 always in PCIX mode */
16503 tp->dma_rwctrl |= 0x00144000;
16504 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16505 /* 5714 always in PCIX mode */
16506 tp->dma_rwctrl |= 0x00148000;
16508 tp->dma_rwctrl |= 0x001b000f;
16511 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16512 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16514 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16515 tg3_asic_rev(tp) == ASIC_REV_5704)
16516 tp->dma_rwctrl &= 0xfffffff0;
16518 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16519 tg3_asic_rev(tp) == ASIC_REV_5701) {
16520 /* Remove this if it causes problems for some boards. */
16521 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16523 /* On 5700/5701 chips, we need to set this bit.
16524 * Otherwise the chip will issue cacheline transactions
16525 * to streamable DMA memory with not all the byte
16526 * enables turned on. This is an error on several
16527 * RISC PCI controllers, in particular sparc64.
16529 * On 5703/5704 chips, this bit has been reassigned
16530 * a different meaning. In particular, it is used
16531 * on those chips to enable a PCI-X workaround.
16533 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16536 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16539 /* Unneeded, already done by tg3_get_invariants. */
16540 tg3_switch_clocks(tp);
16543 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16544 tg3_asic_rev(tp) != ASIC_REV_5701)
16547 /* It is best to perform DMA test with maximum write burst size
16548 * to expose the 5700/5701 write DMA bug.
16550 saved_dma_rwctrl = tp->dma_rwctrl;
16551 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16552 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16557 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16560 /* Send the buffer to the chip. */
16561 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16563 dev_err(&tp->pdev->dev,
16564 "%s: Buffer write failed. err = %d\n",
16570 /* validate data reached card RAM correctly. */
16571 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16573 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16574 if (le32_to_cpu(val) != p[i]) {
16575 dev_err(&tp->pdev->dev,
16576 "%s: Buffer corrupted on device! "
16577 "(%d != %d)\n", __func__, val, i);
16578 /* ret = -ENODEV here? */
16583 /* Now read it back. */
16584 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16586 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16587 "err = %d\n", __func__, ret);
16592 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16596 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16597 DMA_RWCTRL_WRITE_BNDRY_16) {
16598 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16599 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16600 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16603 dev_err(&tp->pdev->dev,
16604 "%s: Buffer corrupted on read back! "
16605 "(%d != %d)\n", __func__, p[i], i);
16611 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16617 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16618 DMA_RWCTRL_WRITE_BNDRY_16) {
16619 /* DMA test passed without adjusting DMA boundary,
16620 * now look for chipsets that are known to expose the
16621 * DMA bug without failing the test.
16623 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16624 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16625 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16627 /* Safe to use the calculated DMA boundary. */
16628 tp->dma_rwctrl = saved_dma_rwctrl;
16631 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16635 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16640 static void tg3_init_bufmgr_config(struct tg3 *tp)
16642 if (tg3_flag(tp, 57765_PLUS)) {
16643 tp->bufmgr_config.mbuf_read_dma_low_water =
16644 DEFAULT_MB_RDMA_LOW_WATER_5705;
16645 tp->bufmgr_config.mbuf_mac_rx_low_water =
16646 DEFAULT_MB_MACRX_LOW_WATER_57765;
16647 tp->bufmgr_config.mbuf_high_water =
16648 DEFAULT_MB_HIGH_WATER_57765;
16650 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16651 DEFAULT_MB_RDMA_LOW_WATER_5705;
16652 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16653 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16654 tp->bufmgr_config.mbuf_high_water_jumbo =
16655 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16656 } else if (tg3_flag(tp, 5705_PLUS)) {
16657 tp->bufmgr_config.mbuf_read_dma_low_water =
16658 DEFAULT_MB_RDMA_LOW_WATER_5705;
16659 tp->bufmgr_config.mbuf_mac_rx_low_water =
16660 DEFAULT_MB_MACRX_LOW_WATER_5705;
16661 tp->bufmgr_config.mbuf_high_water =
16662 DEFAULT_MB_HIGH_WATER_5705;
16663 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16664 tp->bufmgr_config.mbuf_mac_rx_low_water =
16665 DEFAULT_MB_MACRX_LOW_WATER_5906;
16666 tp->bufmgr_config.mbuf_high_water =
16667 DEFAULT_MB_HIGH_WATER_5906;
16670 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16671 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16672 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16673 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16674 tp->bufmgr_config.mbuf_high_water_jumbo =
16675 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16677 tp->bufmgr_config.mbuf_read_dma_low_water =
16678 DEFAULT_MB_RDMA_LOW_WATER;
16679 tp->bufmgr_config.mbuf_mac_rx_low_water =
16680 DEFAULT_MB_MACRX_LOW_WATER;
16681 tp->bufmgr_config.mbuf_high_water =
16682 DEFAULT_MB_HIGH_WATER;
16684 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16685 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16686 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16687 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16688 tp->bufmgr_config.mbuf_high_water_jumbo =
16689 DEFAULT_MB_HIGH_WATER_JUMBO;
16692 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16693 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16696 static char *tg3_phy_string(struct tg3 *tp)
16698 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16699 case TG3_PHY_ID_BCM5400: return "5400";
16700 case TG3_PHY_ID_BCM5401: return "5401";
16701 case TG3_PHY_ID_BCM5411: return "5411";
16702 case TG3_PHY_ID_BCM5701: return "5701";
16703 case TG3_PHY_ID_BCM5703: return "5703";
16704 case TG3_PHY_ID_BCM5704: return "5704";
16705 case TG3_PHY_ID_BCM5705: return "5705";
16706 case TG3_PHY_ID_BCM5750: return "5750";
16707 case TG3_PHY_ID_BCM5752: return "5752";
16708 case TG3_PHY_ID_BCM5714: return "5714";
16709 case TG3_PHY_ID_BCM5780: return "5780";
16710 case TG3_PHY_ID_BCM5755: return "5755";
16711 case TG3_PHY_ID_BCM5787: return "5787";
16712 case TG3_PHY_ID_BCM5784: return "5784";
16713 case TG3_PHY_ID_BCM5756: return "5722/5756";
16714 case TG3_PHY_ID_BCM5906: return "5906";
16715 case TG3_PHY_ID_BCM5761: return "5761";
16716 case TG3_PHY_ID_BCM5718C: return "5718C";
16717 case TG3_PHY_ID_BCM5718S: return "5718S";
16718 case TG3_PHY_ID_BCM57765: return "57765";
16719 case TG3_PHY_ID_BCM5719C: return "5719C";
16720 case TG3_PHY_ID_BCM5720C: return "5720C";
16721 case TG3_PHY_ID_BCM5762: return "5762C";
16722 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16723 case 0: return "serdes";
16724 default: return "unknown";
16728 static char *tg3_bus_string(struct tg3 *tp, char *str)
16730 if (tg3_flag(tp, PCI_EXPRESS)) {
16731 strcpy(str, "PCI Express");
16733 } else if (tg3_flag(tp, PCIX_MODE)) {
16734 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16736 strcpy(str, "PCIX:");
16738 if ((clock_ctrl == 7) ||
16739 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16740 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16741 strcat(str, "133MHz");
16742 else if (clock_ctrl == 0)
16743 strcat(str, "33MHz");
16744 else if (clock_ctrl == 2)
16745 strcat(str, "50MHz");
16746 else if (clock_ctrl == 4)
16747 strcat(str, "66MHz");
16748 else if (clock_ctrl == 6)
16749 strcat(str, "100MHz");
16751 strcpy(str, "PCI:");
16752 if (tg3_flag(tp, PCI_HIGH_SPEED))
16753 strcat(str, "66MHz");
16755 strcat(str, "33MHz");
16757 if (tg3_flag(tp, PCI_32BIT))
16758 strcat(str, ":32-bit");
16760 strcat(str, ":64-bit");
16764 static void tg3_init_coal(struct tg3 *tp)
16766 struct ethtool_coalesce *ec = &tp->coal;
16768 memset(ec, 0, sizeof(*ec));
16769 ec->cmd = ETHTOOL_GCOALESCE;
16770 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16771 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16772 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16773 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16774 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16775 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16776 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16777 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16778 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16780 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16781 HOSTCC_MODE_CLRTICK_TXBD)) {
16782 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16783 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16784 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16785 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16788 if (tg3_flag(tp, 5705_PLUS)) {
16789 ec->rx_coalesce_usecs_irq = 0;
16790 ec->tx_coalesce_usecs_irq = 0;
16791 ec->stats_block_coalesce_usecs = 0;
16795 static int tg3_init_one(struct pci_dev *pdev,
16796 const struct pci_device_id *ent)
16798 struct net_device *dev;
16800 int i, err, pm_cap;
16801 u32 sndmbx, rcvmbx, intmbx;
16803 u64 dma_mask, persist_dma_mask;
16804 netdev_features_t features = 0;
16806 printk_once(KERN_INFO "%s\n", version);
16808 err = pci_enable_device(pdev);
16810 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16814 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16816 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16817 goto err_out_disable_pdev;
16820 pci_set_master(pdev);
16822 /* Find power-management capability. */
16823 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16825 dev_err(&pdev->dev,
16826 "Cannot find Power Management capability, aborting\n");
16828 goto err_out_free_res;
16831 err = pci_set_power_state(pdev, PCI_D0);
16833 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16834 goto err_out_free_res;
16837 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16840 goto err_out_power_down;
16843 SET_NETDEV_DEV(dev, &pdev->dev);
16845 tp = netdev_priv(dev);
16848 tp->pm_cap = pm_cap;
16849 tp->rx_mode = TG3_DEF_RX_MODE;
16850 tp->tx_mode = TG3_DEF_TX_MODE;
16854 tp->msg_enable = tg3_debug;
16856 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16858 if (pdev_is_ssb_gige_core(pdev)) {
16859 tg3_flag_set(tp, IS_SSB_CORE);
16860 if (ssb_gige_must_flush_posted_writes(pdev))
16861 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16862 if (ssb_gige_one_dma_at_once(pdev))
16863 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16864 if (ssb_gige_have_roboswitch(pdev))
16865 tg3_flag_set(tp, ROBOSWITCH);
16866 if (ssb_gige_is_rgmii(pdev))
16867 tg3_flag_set(tp, RGMII_MODE);
16870 /* The word/byte swap controls here control register access byte
16871 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16874 tp->misc_host_ctrl =
16875 MISC_HOST_CTRL_MASK_PCI_INT |
16876 MISC_HOST_CTRL_WORD_SWAP |
16877 MISC_HOST_CTRL_INDIR_ACCESS |
16878 MISC_HOST_CTRL_PCISTATE_RW;
16880 /* The NONFRM (non-frame) byte/word swap controls take effect
16881 * on descriptor entries, anything which isn't packet data.
16883 * The StrongARM chips on the board (one for tx, one for rx)
16884 * are running in big-endian mode.
16886 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16887 GRC_MODE_WSWAP_NONFRM_DATA);
16888 #ifdef __BIG_ENDIAN
16889 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16891 spin_lock_init(&tp->lock);
16892 spin_lock_init(&tp->indirect_lock);
16893 INIT_WORK(&tp->reset_task, tg3_reset_task);
16895 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16897 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16899 goto err_out_free_dev;
16902 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16903 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16904 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16905 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16906 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16907 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16908 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16909 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16910 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16911 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16912 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16913 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16914 tg3_flag_set(tp, ENABLE_APE);
16915 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16916 if (!tp->aperegs) {
16917 dev_err(&pdev->dev,
16918 "Cannot map APE registers, aborting\n");
16920 goto err_out_iounmap;
16924 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16925 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16927 dev->ethtool_ops = &tg3_ethtool_ops;
16928 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16929 dev->netdev_ops = &tg3_netdev_ops;
16930 dev->irq = pdev->irq;
16932 err = tg3_get_invariants(tp, ent);
16934 dev_err(&pdev->dev,
16935 "Problem fetching invariants of chip, aborting\n");
16936 goto err_out_apeunmap;
16939 /* The EPB bridge inside 5714, 5715, and 5780 and any
16940 * device behind the EPB cannot support DMA addresses > 40-bit.
16941 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16942 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16943 * do DMA address check in tg3_start_xmit().
16945 if (tg3_flag(tp, IS_5788))
16946 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16947 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16948 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16949 #ifdef CONFIG_HIGHMEM
16950 dma_mask = DMA_BIT_MASK(64);
16953 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16955 /* Configure DMA attributes. */
16956 if (dma_mask > DMA_BIT_MASK(32)) {
16957 err = pci_set_dma_mask(pdev, dma_mask);
16959 features |= NETIF_F_HIGHDMA;
16960 err = pci_set_consistent_dma_mask(pdev,
16963 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16964 "DMA for consistent allocations\n");
16965 goto err_out_apeunmap;
16969 if (err || dma_mask == DMA_BIT_MASK(32)) {
16970 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16972 dev_err(&pdev->dev,
16973 "No usable DMA configuration, aborting\n");
16974 goto err_out_apeunmap;
16978 tg3_init_bufmgr_config(tp);
16980 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16982 /* 5700 B0 chips do not support checksumming correctly due
16983 * to hardware bugs.
16985 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16986 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16988 if (tg3_flag(tp, 5755_PLUS))
16989 features |= NETIF_F_IPV6_CSUM;
16992 /* TSO is on by default on chips that support hardware TSO.
16993 * Firmware TSO on older chips gives lower performance, so it
16994 * is off by default, but can be enabled using ethtool.
16996 if ((tg3_flag(tp, HW_TSO_1) ||
16997 tg3_flag(tp, HW_TSO_2) ||
16998 tg3_flag(tp, HW_TSO_3)) &&
16999 (features & NETIF_F_IP_CSUM))
17000 features |= NETIF_F_TSO;
17001 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17002 if (features & NETIF_F_IPV6_CSUM)
17003 features |= NETIF_F_TSO6;
17004 if (tg3_flag(tp, HW_TSO_3) ||
17005 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17006 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17007 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17008 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17009 tg3_asic_rev(tp) == ASIC_REV_57780)
17010 features |= NETIF_F_TSO_ECN;
17013 dev->features |= features;
17014 dev->vlan_features |= features;
17017 * Add loopback capability only for a subset of devices that support
17018 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17019 * loopback for the remaining devices.
17021 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17022 !tg3_flag(tp, CPMU_PRESENT))
17023 /* Add the loopback capability */
17024 features |= NETIF_F_LOOPBACK;
17026 dev->hw_features |= features;
17028 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17029 !tg3_flag(tp, TSO_CAPABLE) &&
17030 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17031 tg3_flag_set(tp, MAX_RXPEND_64);
17032 tp->rx_pending = 63;
17035 err = tg3_get_device_address(tp);
17037 dev_err(&pdev->dev,
17038 "Could not obtain valid ethernet address, aborting\n");
17039 goto err_out_apeunmap;
17043 * Reset chip in case UNDI or EFI driver did not shutdown
17044 * DMA self test will enable WDMAC and we'll see (spurious)
17045 * pending DMA on the PCI bus at that point.
17047 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17048 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17049 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17050 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17053 err = tg3_test_dma(tp);
17055 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17056 goto err_out_apeunmap;
17059 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17060 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17061 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17062 for (i = 0; i < tp->irq_max; i++) {
17063 struct tg3_napi *tnapi = &tp->napi[i];
17066 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17068 tnapi->int_mbox = intmbx;
17074 tnapi->consmbox = rcvmbx;
17075 tnapi->prodmbox = sndmbx;
17078 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17080 tnapi->coal_now = HOSTCC_MODE_NOW;
17082 if (!tg3_flag(tp, SUPPORT_MSIX))
17086 * If we support MSIX, we'll be using RSS. If we're using
17087 * RSS, the first vector only handles link interrupts and the
17088 * remaining vectors handle rx and tx interrupts. Reuse the
17089 * mailbox values for the next iteration. The values we setup
17090 * above are still useful for the single vectored mode.
17105 pci_set_drvdata(pdev, dev);
17107 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17108 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17109 tg3_asic_rev(tp) == ASIC_REV_5762)
17110 tg3_flag_set(tp, PTP_CAPABLE);
17112 if (tg3_flag(tp, 5717_PLUS)) {
17113 /* Resume a low-power mode */
17114 tg3_frob_aux_power(tp, false);
17117 tg3_timer_init(tp);
17119 tg3_carrier_off(tp);
17121 err = register_netdev(dev);
17123 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17124 goto err_out_apeunmap;
17127 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17128 tp->board_part_number,
17129 tg3_chip_rev_id(tp),
17130 tg3_bus_string(tp, str),
17133 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17134 struct phy_device *phydev;
17135 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17137 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17138 phydev->drv->name, dev_name(&phydev->dev));
17142 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17143 ethtype = "10/100Base-TX";
17144 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17145 ethtype = "1000Base-SX";
17147 ethtype = "10/100/1000Base-T";
17149 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17150 "(WireSpeed[%d], EEE[%d])\n",
17151 tg3_phy_string(tp), ethtype,
17152 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17153 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17156 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17157 (dev->features & NETIF_F_RXCSUM) != 0,
17158 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17159 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17160 tg3_flag(tp, ENABLE_ASF) != 0,
17161 tg3_flag(tp, TSO_CAPABLE) != 0);
17162 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17164 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17165 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17167 pci_save_state(pdev);
17173 iounmap(tp->aperegs);
17174 tp->aperegs = NULL;
17186 err_out_power_down:
17187 pci_set_power_state(pdev, PCI_D3hot);
17190 pci_release_regions(pdev);
17192 err_out_disable_pdev:
17193 pci_disable_device(pdev);
17194 pci_set_drvdata(pdev, NULL);
17198 static void tg3_remove_one(struct pci_dev *pdev)
17200 struct net_device *dev = pci_get_drvdata(pdev);
17203 struct tg3 *tp = netdev_priv(dev);
17205 release_firmware(tp->fw);
17207 tg3_reset_task_cancel(tp);
17209 if (tg3_flag(tp, USE_PHYLIB)) {
17214 unregister_netdev(dev);
17216 iounmap(tp->aperegs);
17217 tp->aperegs = NULL;
17224 pci_release_regions(pdev);
17225 pci_disable_device(pdev);
17226 pci_set_drvdata(pdev, NULL);
17230 #ifdef CONFIG_PM_SLEEP
17231 static int tg3_suspend(struct device *device)
17233 struct pci_dev *pdev = to_pci_dev(device);
17234 struct net_device *dev = pci_get_drvdata(pdev);
17235 struct tg3 *tp = netdev_priv(dev);
17238 if (!netif_running(dev))
17241 tg3_reset_task_cancel(tp);
17243 tg3_netif_stop(tp);
17245 tg3_timer_stop(tp);
17247 tg3_full_lock(tp, 1);
17248 tg3_disable_ints(tp);
17249 tg3_full_unlock(tp);
17251 netif_device_detach(dev);
17253 tg3_full_lock(tp, 0);
17254 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17255 tg3_flag_clear(tp, INIT_COMPLETE);
17256 tg3_full_unlock(tp);
17258 err = tg3_power_down_prepare(tp);
17262 tg3_full_lock(tp, 0);
17264 tg3_flag_set(tp, INIT_COMPLETE);
17265 err2 = tg3_restart_hw(tp, 1);
17269 tg3_timer_start(tp);
17271 netif_device_attach(dev);
17272 tg3_netif_start(tp);
17275 tg3_full_unlock(tp);
17284 static int tg3_resume(struct device *device)
17286 struct pci_dev *pdev = to_pci_dev(device);
17287 struct net_device *dev = pci_get_drvdata(pdev);
17288 struct tg3 *tp = netdev_priv(dev);
17291 if (!netif_running(dev))
17294 netif_device_attach(dev);
17296 tg3_full_lock(tp, 0);
17298 tg3_flag_set(tp, INIT_COMPLETE);
17299 err = tg3_restart_hw(tp, 1);
17303 tg3_timer_start(tp);
17305 tg3_netif_start(tp);
17308 tg3_full_unlock(tp);
17316 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17317 #define TG3_PM_OPS (&tg3_pm_ops)
17321 #define TG3_PM_OPS NULL
17323 #endif /* CONFIG_PM_SLEEP */
17326 * tg3_io_error_detected - called when PCI error is detected
17327 * @pdev: Pointer to PCI device
17328 * @state: The current pci connection state
17330 * This function is called after a PCI bus error affecting
17331 * this device has been detected.
17333 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17334 pci_channel_state_t state)
17336 struct net_device *netdev = pci_get_drvdata(pdev);
17337 struct tg3 *tp = netdev_priv(netdev);
17338 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17340 netdev_info(netdev, "PCI I/O error detected\n");
17344 if (!netif_running(netdev))
17349 tg3_netif_stop(tp);
17351 tg3_timer_stop(tp);
17353 /* Want to make sure that the reset task doesn't run */
17354 tg3_reset_task_cancel(tp);
17356 netif_device_detach(netdev);
17358 /* Clean up software state, even if MMIO is blocked */
17359 tg3_full_lock(tp, 0);
17360 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17361 tg3_full_unlock(tp);
17364 if (state == pci_channel_io_perm_failure)
17365 err = PCI_ERS_RESULT_DISCONNECT;
17367 pci_disable_device(pdev);
17375 * tg3_io_slot_reset - called after the pci bus has been reset.
17376 * @pdev: Pointer to PCI device
17378 * Restart the card from scratch, as if from a cold-boot.
17379 * At this point, the card has exprienced a hard reset,
17380 * followed by fixups by BIOS, and has its config space
17381 * set up identically to what it was at cold boot.
17383 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17385 struct net_device *netdev = pci_get_drvdata(pdev);
17386 struct tg3 *tp = netdev_priv(netdev);
17387 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17392 if (pci_enable_device(pdev)) {
17393 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17397 pci_set_master(pdev);
17398 pci_restore_state(pdev);
17399 pci_save_state(pdev);
17401 if (!netif_running(netdev)) {
17402 rc = PCI_ERS_RESULT_RECOVERED;
17406 err = tg3_power_up(tp);
17410 rc = PCI_ERS_RESULT_RECOVERED;
17419 * tg3_io_resume - called when traffic can start flowing again.
17420 * @pdev: Pointer to PCI device
17422 * This callback is called when the error recovery driver tells
17423 * us that its OK to resume normal operation.
17425 static void tg3_io_resume(struct pci_dev *pdev)
17427 struct net_device *netdev = pci_get_drvdata(pdev);
17428 struct tg3 *tp = netdev_priv(netdev);
17433 if (!netif_running(netdev))
17436 tg3_full_lock(tp, 0);
17437 tg3_flag_set(tp, INIT_COMPLETE);
17438 err = tg3_restart_hw(tp, 1);
17440 tg3_full_unlock(tp);
17441 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17445 netif_device_attach(netdev);
17447 tg3_timer_start(tp);
17449 tg3_netif_start(tp);
17451 tg3_full_unlock(tp);
17459 static const struct pci_error_handlers tg3_err_handler = {
17460 .error_detected = tg3_io_error_detected,
17461 .slot_reset = tg3_io_slot_reset,
17462 .resume = tg3_io_resume
17465 static struct pci_driver tg3_driver = {
17466 .name = DRV_MODULE_NAME,
17467 .id_table = tg3_pci_tbl,
17468 .probe = tg3_init_one,
17469 .remove = tg3_remove_one,
17470 .err_handler = &tg3_err_handler,
17471 .driver.pm = TG3_PM_OPS,
17474 static int __init tg3_init(void)
17476 return pci_register_driver(&tg3_driver);
17479 static void __exit tg3_cleanup(void)
17481 pci_unregister_driver(&tg3_driver);
17484 module_init(tg3_init);
17485 module_exit(tg3_cleanup);