2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1648 if (!tg3_readphy(tp, MII_BMCR, ®))
1650 if (!tg3_readphy(tp, MII_BMSR, ®))
1651 val |= (reg & 0xffff);
1655 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1657 if (!tg3_readphy(tp, MII_LPA, ®))
1658 val |= (reg & 0xffff);
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1665 if (!tg3_readphy(tp, MII_STAT1000, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685 tg3_phy_gather_ump_data(tp, data);
1687 tg3_wait_for_event_ack(tp);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1696 tg3_generate_fw_event(tp);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1708 tg3_generate_fw_event(tp);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 static int tg3_poll_fw(struct tg3 *tp)
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834 netdev_info(tp->dev, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3 *tp)
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1856 (tp->link_config.active_speed == SPEED_100 ?
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1871 tg3_ump_link_report(tp);
1874 tp->link_up = netif_carrier_ok(tp->dev);
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1881 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882 miireg = ADVERTISE_1000XPAUSE;
1883 else if (flow_ctrl & FLOW_CTRL_TX)
1884 miireg = ADVERTISE_1000XPSE_ASYM;
1885 else if (flow_ctrl & FLOW_CTRL_RX)
1886 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1897 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900 if (lcladv & ADVERTISE_1000XPAUSE)
1902 if (rmtadv & ADVERTISE_1000XPAUSE)
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1913 u32 old_rx_mode = tp->rx_mode;
1914 u32 old_tx_mode = tp->tx_mode;
1916 if (tg3_flag(tp, USE_PHYLIB))
1917 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1919 autoneg = tp->link_config.autoneg;
1921 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1925 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1927 flowctrl = tp->link_config.flowctrl;
1929 tp->link_config.active_flowctrl = flowctrl;
1931 if (flowctrl & FLOW_CTRL_RX)
1932 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1934 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1936 if (old_rx_mode != tp->rx_mode)
1937 tw32_f(MAC_RX_MODE, tp->rx_mode);
1939 if (flowctrl & FLOW_CTRL_TX)
1940 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1942 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1944 if (old_tx_mode != tp->tx_mode)
1945 tw32_f(MAC_TX_MODE, tp->tx_mode);
1948 static void tg3_adjust_link(struct net_device *dev)
1950 u8 oldflowctrl, linkmesg = 0;
1951 u32 mac_mode, lcl_adv, rmt_adv;
1952 struct tg3 *tp = netdev_priv(dev);
1953 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1955 spin_lock_bh(&tp->lock);
1957 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958 MAC_MODE_HALF_DUPLEX);
1960 oldflowctrl = tp->link_config.active_flowctrl;
1966 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967 mac_mode |= MAC_MODE_PORT_MODE_MII;
1968 else if (phydev->speed == SPEED_1000 ||
1969 tg3_asic_rev(tp) != ASIC_REV_5785)
1970 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1972 mac_mode |= MAC_MODE_PORT_MODE_MII;
1974 if (phydev->duplex == DUPLEX_HALF)
1975 mac_mode |= MAC_MODE_HALF_DUPLEX;
1977 lcl_adv = mii_advertise_flowctrl(
1978 tp->link_config.flowctrl);
1981 rmt_adv = LPA_PAUSE_CAP;
1982 if (phydev->asym_pause)
1983 rmt_adv |= LPA_PAUSE_ASYM;
1986 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1988 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1990 if (mac_mode != tp->mac_mode) {
1991 tp->mac_mode = mac_mode;
1992 tw32_f(MAC_MODE, tp->mac_mode);
1996 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997 if (phydev->speed == SPEED_10)
1999 MAC_MI_STAT_10MBPS_MODE |
2000 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2002 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2005 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006 tw32(MAC_TX_LENGTHS,
2007 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008 (6 << TX_LENGTHS_IPG_SHIFT) |
2009 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2011 tw32(MAC_TX_LENGTHS,
2012 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013 (6 << TX_LENGTHS_IPG_SHIFT) |
2014 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2016 if (phydev->link != tp->old_link ||
2017 phydev->speed != tp->link_config.active_speed ||
2018 phydev->duplex != tp->link_config.active_duplex ||
2019 oldflowctrl != tp->link_config.active_flowctrl)
2022 tp->old_link = phydev->link;
2023 tp->link_config.active_speed = phydev->speed;
2024 tp->link_config.active_duplex = phydev->duplex;
2026 spin_unlock_bh(&tp->lock);
2029 tg3_link_report(tp);
2032 static int tg3_phy_init(struct tg3 *tp)
2034 struct phy_device *phydev;
2036 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2039 /* Bring the PHY back to a known state. */
2042 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2044 /* Attach the MAC to the PHY. */
2045 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046 tg3_adjust_link, phydev->interface);
2047 if (IS_ERR(phydev)) {
2048 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049 return PTR_ERR(phydev);
2052 /* Mask with MAC supported features. */
2053 switch (phydev->interface) {
2054 case PHY_INTERFACE_MODE_GMII:
2055 case PHY_INTERFACE_MODE_RGMII:
2056 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057 phydev->supported &= (PHY_GBIT_FEATURES |
2059 SUPPORTED_Asym_Pause);
2063 case PHY_INTERFACE_MODE_MII:
2064 phydev->supported &= (PHY_BASIC_FEATURES |
2066 SUPPORTED_Asym_Pause);
2069 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2075 phydev->advertising = phydev->supported;
2080 static void tg3_phy_start(struct tg3 *tp)
2082 struct phy_device *phydev;
2084 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2087 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2089 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091 phydev->speed = tp->link_config.speed;
2092 phydev->duplex = tp->link_config.duplex;
2093 phydev->autoneg = tp->link_config.autoneg;
2094 phydev->advertising = tp->link_config.advertising;
2099 phy_start_aneg(phydev);
2102 static void tg3_phy_stop(struct tg3 *tp)
2104 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2107 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2110 static void tg3_phy_fini(struct tg3 *tp)
2112 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2123 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2126 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127 /* Cannot do read-modify-write on 5401 */
2128 err = tg3_phy_auxctl_write(tp,
2129 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2135 err = tg3_phy_auxctl_read(tp,
2136 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2140 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141 err = tg3_phy_auxctl_write(tp,
2142 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2152 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2155 tg3_writephy(tp, MII_TG3_FET_TEST,
2156 phytest | MII_TG3_FET_SHADOW_EN);
2157 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2159 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2161 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2164 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2172 if (!tg3_flag(tp, 5705_PLUS) ||
2173 (tg3_flag(tp, 5717_PLUS) &&
2174 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2177 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178 tg3_phy_fet_toggle_apd(tp, enable);
2182 reg = MII_TG3_MISC_SHDW_WREN |
2183 MII_TG3_MISC_SHDW_SCR5_SEL |
2184 MII_TG3_MISC_SHDW_SCR5_LPED |
2185 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186 MII_TG3_MISC_SHDW_SCR5_SDTL |
2187 MII_TG3_MISC_SHDW_SCR5_C125OE;
2188 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2191 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2194 reg = MII_TG3_MISC_SHDW_WREN |
2195 MII_TG3_MISC_SHDW_APD_SEL |
2196 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2198 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2200 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2207 if (!tg3_flag(tp, 5705_PLUS) ||
2208 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2211 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2214 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2217 tg3_writephy(tp, MII_TG3_FET_TEST,
2218 ephy | MII_TG3_FET_SHADOW_EN);
2219 if (!tg3_readphy(tp, reg, &phy)) {
2221 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2223 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224 tg3_writephy(tp, reg, phy);
2226 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2231 ret = tg3_phy_auxctl_read(tp,
2232 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2235 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2237 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238 tg3_phy_auxctl_write(tp,
2239 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2249 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2252 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2254 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2267 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2270 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2274 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2278 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2282 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2285 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2288 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2292 tg3_phy_toggle_auxctl_smdsp(tp, false);
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2299 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2304 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305 current_link_up == 1 &&
2306 tp->link_config.active_duplex == DUPLEX_FULL &&
2307 (tp->link_config.active_speed == SPEED_100 ||
2308 tp->link_config.active_speed == SPEED_1000)) {
2311 if (tp->link_config.active_speed == SPEED_1000)
2312 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2314 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2316 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2318 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319 TG3_CL45_D7_EEERES_STAT, &val);
2321 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2326 if (!tp->setlpicnt) {
2327 if (current_link_up == 1 &&
2328 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330 tg3_phy_toggle_auxctl_smdsp(tp, false);
2333 val = tr32(TG3_CPMU_EEE_MODE);
2334 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2342 if (tp->link_config.active_speed == SPEED_1000 &&
2343 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345 tg3_flag(tp, 57765_CLASS)) &&
2346 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347 val = MII_TG3_DSP_TAP26_ALNOKO |
2348 MII_TG3_DSP_TAP26_RMRXSTO;
2349 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350 tg3_phy_toggle_auxctl_smdsp(tp, false);
2353 val = tr32(TG3_CPMU_EEE_MODE);
2354 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2364 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365 if ((tmp32 & 0x1000) == 0)
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2377 static const u32 test_pat[4][6] = {
2378 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2385 for (chan = 0; chan < 4; chan++) {
2388 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389 (chan * 0x2000) | 0x0200);
2390 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2392 for (i = 0; i < 6; i++)
2393 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2396 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397 if (tg3_wait_macro_done(tp)) {
2402 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403 (chan * 0x2000) | 0x0200);
2404 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405 if (tg3_wait_macro_done(tp)) {
2410 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411 if (tg3_wait_macro_done(tp)) {
2416 for (i = 0; i < 6; i += 2) {
2419 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421 tg3_wait_macro_done(tp)) {
2427 if (low != test_pat[chan][i] ||
2428 high != test_pat[chan][i+1]) {
2429 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2445 for (chan = 0; chan < 4; chan++) {
2448 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449 (chan * 0x2000) | 0x0200);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451 for (i = 0; i < 6; i++)
2452 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454 if (tg3_wait_macro_done(tp))
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2463 u32 reg32, phy9_orig;
2464 int retries, do_phy_reset, err;
2470 err = tg3_bmcr_reset(tp);
2476 /* Disable transmitter and interrupt. */
2477 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2481 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2483 /* Set full-duplex, 1000 mbps. */
2484 tg3_writephy(tp, MII_BMCR,
2485 BMCR_FULLDPLX | BMCR_SPEED1000);
2487 /* Set to master mode. */
2488 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2491 tg3_writephy(tp, MII_CTRL1000,
2492 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2494 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2498 /* Block the PHY control access. */
2499 tg3_phydsp_write(tp, 0x8005, 0x0800);
2501 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2504 } while (--retries);
2506 err = tg3_phy_reset_chanpat(tp);
2510 tg3_phydsp_write(tp, 0x8005, 0x0000);
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2515 tg3_phy_toggle_auxctl_smdsp(tp, false);
2517 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2519 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2521 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2528 static void tg3_carrier_off(struct tg3 *tp)
2530 netif_carrier_off(tp->dev);
2531 tp->link_up = false;
2534 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2536 if (tg3_flag(tp, ENABLE_ASF))
2537 netdev_warn(tp->dev,
2538 "Management side-band traffic will be interrupted during phy settings change\n");
2541 /* This will reset the tigon3 PHY if there is no valid
2542 * link unless the FORCE argument is non-zero.
2544 static int tg3_phy_reset(struct tg3 *tp)
2549 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2550 val = tr32(GRC_MISC_CFG);
2551 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2554 err = tg3_readphy(tp, MII_BMSR, &val);
2555 err |= tg3_readphy(tp, MII_BMSR, &val);
2559 if (netif_running(tp->dev) && tp->link_up) {
2560 netif_carrier_off(tp->dev);
2561 tg3_link_report(tp);
2564 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2565 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2566 tg3_asic_rev(tp) == ASIC_REV_5705) {
2567 err = tg3_phy_reset_5703_4_5(tp);
2574 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2575 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2576 cpmuctrl = tr32(TG3_CPMU_CTRL);
2577 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2579 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2582 err = tg3_bmcr_reset(tp);
2586 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2587 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2588 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2590 tw32(TG3_CPMU_CTRL, cpmuctrl);
2593 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2594 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2595 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2596 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2597 CPMU_LSPD_1000MB_MACCLK_12_5) {
2598 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2600 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2604 if (tg3_flag(tp, 5717_PLUS) &&
2605 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2608 tg3_phy_apply_otp(tp);
2610 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2611 tg3_phy_toggle_apd(tp, true);
2613 tg3_phy_toggle_apd(tp, false);
2616 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2617 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2618 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2619 tg3_phydsp_write(tp, 0x000a, 0x0323);
2620 tg3_phy_toggle_auxctl_smdsp(tp, false);
2623 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2624 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2625 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2628 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2629 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630 tg3_phydsp_write(tp, 0x000a, 0x310b);
2631 tg3_phydsp_write(tp, 0x201f, 0x9506);
2632 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2633 tg3_phy_toggle_auxctl_smdsp(tp, false);
2635 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2636 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2637 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2638 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2639 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2640 tg3_writephy(tp, MII_TG3_TEST1,
2641 MII_TG3_TEST1_TRIM_EN | 0x4);
2643 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2645 tg3_phy_toggle_auxctl_smdsp(tp, false);
2649 /* Set Extended packet length bit (bit 14) on all chips that */
2650 /* support jumbo frames */
2651 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2652 /* Cannot do read-modify-write on 5401 */
2653 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2654 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2655 /* Set bit 14 with read-modify-write to preserve other bits */
2656 err = tg3_phy_auxctl_read(tp,
2657 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2659 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2660 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2663 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2664 * jumbo frames transmission.
2666 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2667 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2668 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2669 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2672 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2673 /* adjust output voltage */
2674 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2677 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2678 tg3_phydsp_write(tp, 0xffb, 0x4000);
2680 tg3_phy_toggle_automdix(tp, 1);
2681 tg3_phy_set_wirespeed(tp);
2685 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2686 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2687 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2688 TG3_GPIO_MSG_NEED_VAUX)
2689 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2690 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2691 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2692 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2693 (TG3_GPIO_MSG_DRVR_PRES << 12))
2695 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2696 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2697 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2698 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2699 (TG3_GPIO_MSG_NEED_VAUX << 12))
2701 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2705 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2706 tg3_asic_rev(tp) == ASIC_REV_5719)
2707 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2709 status = tr32(TG3_CPMU_DRV_STATUS);
2711 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2712 status &= ~(TG3_GPIO_MSG_MASK << shift);
2713 status |= (newstat << shift);
2715 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2716 tg3_asic_rev(tp) == ASIC_REV_5719)
2717 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2719 tw32(TG3_CPMU_DRV_STATUS, status);
2721 return status >> TG3_APE_GPIO_MSG_SHIFT;
2724 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2726 if (!tg3_flag(tp, IS_NIC))
2729 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2730 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2731 tg3_asic_rev(tp) == ASIC_REV_5720) {
2732 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2735 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2737 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2738 TG3_GRC_LCLCTL_PWRSW_DELAY);
2740 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2742 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2743 TG3_GRC_LCLCTL_PWRSW_DELAY);
2749 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2753 if (!tg3_flag(tp, IS_NIC) ||
2754 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2755 tg3_asic_rev(tp) == ASIC_REV_5701)
2758 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2760 tw32_wait_f(GRC_LOCAL_CTRL,
2761 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2762 TG3_GRC_LCLCTL_PWRSW_DELAY);
2764 tw32_wait_f(GRC_LOCAL_CTRL,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 tw32_wait_f(GRC_LOCAL_CTRL,
2769 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2770 TG3_GRC_LCLCTL_PWRSW_DELAY);
2773 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2775 if (!tg3_flag(tp, IS_NIC))
2778 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2779 tg3_asic_rev(tp) == ASIC_REV_5701) {
2780 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2781 (GRC_LCLCTRL_GPIO_OE0 |
2782 GRC_LCLCTRL_GPIO_OE1 |
2783 GRC_LCLCTRL_GPIO_OE2 |
2784 GRC_LCLCTRL_GPIO_OUTPUT0 |
2785 GRC_LCLCTRL_GPIO_OUTPUT1),
2786 TG3_GRC_LCLCTL_PWRSW_DELAY);
2787 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2789 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2790 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2791 GRC_LCLCTRL_GPIO_OE1 |
2792 GRC_LCLCTRL_GPIO_OE2 |
2793 GRC_LCLCTRL_GPIO_OUTPUT0 |
2794 GRC_LCLCTRL_GPIO_OUTPUT1 |
2796 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2797 TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2800 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2803 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2804 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2805 TG3_GRC_LCLCTL_PWRSW_DELAY);
2808 u32 grc_local_ctrl = 0;
2810 /* Workaround to prevent overdrawing Amps. */
2811 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2812 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2813 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2815 TG3_GRC_LCLCTL_PWRSW_DELAY);
2818 /* On 5753 and variants, GPIO2 cannot be used. */
2819 no_gpio2 = tp->nic_sram_data_cfg &
2820 NIC_SRAM_DATA_CFG_NO_GPIO2;
2822 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2823 GRC_LCLCTRL_GPIO_OE1 |
2824 GRC_LCLCTRL_GPIO_OE2 |
2825 GRC_LCLCTRL_GPIO_OUTPUT1 |
2826 GRC_LCLCTRL_GPIO_OUTPUT2;
2828 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2829 GRC_LCLCTRL_GPIO_OUTPUT2);
2831 tw32_wait_f(GRC_LOCAL_CTRL,
2832 tp->grc_local_ctrl | grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2837 tw32_wait_f(GRC_LOCAL_CTRL,
2838 tp->grc_local_ctrl | grc_local_ctrl,
2839 TG3_GRC_LCLCTL_PWRSW_DELAY);
2842 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2843 tw32_wait_f(GRC_LOCAL_CTRL,
2844 tp->grc_local_ctrl | grc_local_ctrl,
2845 TG3_GRC_LCLCTL_PWRSW_DELAY);
2850 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2854 /* Serialize power state transitions */
2855 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2858 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2859 msg = TG3_GPIO_MSG_NEED_VAUX;
2861 msg = tg3_set_function_status(tp, msg);
2863 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2866 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2867 tg3_pwrsrc_switch_to_vaux(tp);
2869 tg3_pwrsrc_die_with_vmain(tp);
2872 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2875 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2877 bool need_vaux = false;
2879 /* The GPIOs do something completely different on 57765. */
2880 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2883 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2884 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2885 tg3_asic_rev(tp) == ASIC_REV_5720) {
2886 tg3_frob_aux_power_5717(tp, include_wol ?
2887 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2891 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2892 struct net_device *dev_peer;
2894 dev_peer = pci_get_drvdata(tp->pdev_peer);
2896 /* remove_one() may have been run on the peer. */
2898 struct tg3 *tp_peer = netdev_priv(dev_peer);
2900 if (tg3_flag(tp_peer, INIT_COMPLETE))
2903 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2904 tg3_flag(tp_peer, ENABLE_ASF))
2909 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2910 tg3_flag(tp, ENABLE_ASF))
2914 tg3_pwrsrc_switch_to_vaux(tp);
2916 tg3_pwrsrc_die_with_vmain(tp);
2919 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2921 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2923 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2924 if (speed != SPEED_10)
2926 } else if (speed == SPEED_10)
2932 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2936 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2939 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2940 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2941 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2942 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2945 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2946 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2947 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2952 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2954 val = tr32(GRC_MISC_CFG);
2955 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2958 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2960 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2963 tg3_writephy(tp, MII_ADVERTISE, 0);
2964 tg3_writephy(tp, MII_BMCR,
2965 BMCR_ANENABLE | BMCR_ANRESTART);
2967 tg3_writephy(tp, MII_TG3_FET_TEST,
2968 phytest | MII_TG3_FET_SHADOW_EN);
2969 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2970 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2972 MII_TG3_FET_SHDW_AUXMODE4,
2975 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2978 } else if (do_low_power) {
2979 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2980 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2982 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2983 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2984 MII_TG3_AUXCTL_PCTL_VREG_11V;
2985 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2988 /* The PHY should not be powered down on some chips because
2991 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2992 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2993 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2994 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2995 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2999 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3000 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3001 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3002 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3003 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3004 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3007 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3010 /* tp->lock is held. */
3011 static int tg3_nvram_lock(struct tg3 *tp)
3013 if (tg3_flag(tp, NVRAM)) {
3016 if (tp->nvram_lock_cnt == 0) {
3017 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3018 for (i = 0; i < 8000; i++) {
3019 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3024 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3028 tp->nvram_lock_cnt++;
3033 /* tp->lock is held. */
3034 static void tg3_nvram_unlock(struct tg3 *tp)
3036 if (tg3_flag(tp, NVRAM)) {
3037 if (tp->nvram_lock_cnt > 0)
3038 tp->nvram_lock_cnt--;
3039 if (tp->nvram_lock_cnt == 0)
3040 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3044 /* tp->lock is held. */
3045 static void tg3_enable_nvram_access(struct tg3 *tp)
3047 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3048 u32 nvaccess = tr32(NVRAM_ACCESS);
3050 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3054 /* tp->lock is held. */
3055 static void tg3_disable_nvram_access(struct tg3 *tp)
3057 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3058 u32 nvaccess = tr32(NVRAM_ACCESS);
3060 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3064 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3065 u32 offset, u32 *val)
3070 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3073 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3074 EEPROM_ADDR_DEVID_MASK |
3076 tw32(GRC_EEPROM_ADDR,
3078 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3079 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3080 EEPROM_ADDR_ADDR_MASK) |
3081 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3083 for (i = 0; i < 1000; i++) {
3084 tmp = tr32(GRC_EEPROM_ADDR);
3086 if (tmp & EEPROM_ADDR_COMPLETE)
3090 if (!(tmp & EEPROM_ADDR_COMPLETE))
3093 tmp = tr32(GRC_EEPROM_DATA);
3096 * The data will always be opposite the native endian
3097 * format. Perform a blind byteswap to compensate.
3104 #define NVRAM_CMD_TIMEOUT 10000
3106 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3110 tw32(NVRAM_CMD, nvram_cmd);
3111 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3113 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3119 if (i == NVRAM_CMD_TIMEOUT)
3125 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3127 if (tg3_flag(tp, NVRAM) &&
3128 tg3_flag(tp, NVRAM_BUFFERED) &&
3129 tg3_flag(tp, FLASH) &&
3130 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3131 (tp->nvram_jedecnum == JEDEC_ATMEL))
3133 addr = ((addr / tp->nvram_pagesize) <<
3134 ATMEL_AT45DB0X1B_PAGE_POS) +
3135 (addr % tp->nvram_pagesize);
3140 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3142 if (tg3_flag(tp, NVRAM) &&
3143 tg3_flag(tp, NVRAM_BUFFERED) &&
3144 tg3_flag(tp, FLASH) &&
3145 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3146 (tp->nvram_jedecnum == JEDEC_ATMEL))
3148 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3149 tp->nvram_pagesize) +
3150 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3155 /* NOTE: Data read in from NVRAM is byteswapped according to
3156 * the byteswapping settings for all other register accesses.
3157 * tg3 devices are BE devices, so on a BE machine, the data
3158 * returned will be exactly as it is seen in NVRAM. On a LE
3159 * machine, the 32-bit value will be byteswapped.
3161 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3165 if (!tg3_flag(tp, NVRAM))
3166 return tg3_nvram_read_using_eeprom(tp, offset, val);
3168 offset = tg3_nvram_phys_addr(tp, offset);
3170 if (offset > NVRAM_ADDR_MSK)
3173 ret = tg3_nvram_lock(tp);
3177 tg3_enable_nvram_access(tp);
3179 tw32(NVRAM_ADDR, offset);
3180 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3181 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3184 *val = tr32(NVRAM_RDDATA);
3186 tg3_disable_nvram_access(tp);
3188 tg3_nvram_unlock(tp);
3193 /* Ensures NVRAM data is in bytestream format. */
3194 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3197 int res = tg3_nvram_read(tp, offset, &v);
3199 *val = cpu_to_be32(v);
3203 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3204 u32 offset, u32 len, u8 *buf)
3209 for (i = 0; i < len; i += 4) {
3215 memcpy(&data, buf + i, 4);
3218 * The SEEPROM interface expects the data to always be opposite
3219 * the native endian format. We accomplish this by reversing
3220 * all the operations that would have been performed on the
3221 * data from a call to tg3_nvram_read_be32().
3223 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3225 val = tr32(GRC_EEPROM_ADDR);
3226 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3228 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3230 tw32(GRC_EEPROM_ADDR, val |
3231 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3232 (addr & EEPROM_ADDR_ADDR_MASK) |
3236 for (j = 0; j < 1000; j++) {
3237 val = tr32(GRC_EEPROM_ADDR);
3239 if (val & EEPROM_ADDR_COMPLETE)
3243 if (!(val & EEPROM_ADDR_COMPLETE)) {
3252 /* offset and length are dword aligned */
3253 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3257 u32 pagesize = tp->nvram_pagesize;
3258 u32 pagemask = pagesize - 1;
3262 tmp = kmalloc(pagesize, GFP_KERNEL);
3268 u32 phy_addr, page_off, size;
3270 phy_addr = offset & ~pagemask;
3272 for (j = 0; j < pagesize; j += 4) {
3273 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3274 (__be32 *) (tmp + j));
3281 page_off = offset & pagemask;
3288 memcpy(tmp + page_off, buf, size);
3290 offset = offset + (pagesize - page_off);
3292 tg3_enable_nvram_access(tp);
3295 * Before we can erase the flash page, we need
3296 * to issue a special "write enable" command.
3298 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3300 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3303 /* Erase the target page */
3304 tw32(NVRAM_ADDR, phy_addr);
3306 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3307 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3309 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3312 /* Issue another write enable to start the write. */
3313 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3315 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3318 for (j = 0; j < pagesize; j += 4) {
3321 data = *((__be32 *) (tmp + j));
3323 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3325 tw32(NVRAM_ADDR, phy_addr + j);
3327 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3331 nvram_cmd |= NVRAM_CMD_FIRST;
3332 else if (j == (pagesize - 4))
3333 nvram_cmd |= NVRAM_CMD_LAST;
3335 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3343 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3344 tg3_nvram_exec_cmd(tp, nvram_cmd);
3351 /* offset and length are dword aligned */
3352 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3357 for (i = 0; i < len; i += 4, offset += 4) {
3358 u32 page_off, phy_addr, nvram_cmd;
3361 memcpy(&data, buf + i, 4);
3362 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3364 page_off = offset % tp->nvram_pagesize;
3366 phy_addr = tg3_nvram_phys_addr(tp, offset);
3368 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3370 if (page_off == 0 || i == 0)
3371 nvram_cmd |= NVRAM_CMD_FIRST;
3372 if (page_off == (tp->nvram_pagesize - 4))
3373 nvram_cmd |= NVRAM_CMD_LAST;
3376 nvram_cmd |= NVRAM_CMD_LAST;
3378 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3379 !tg3_flag(tp, FLASH) ||
3380 !tg3_flag(tp, 57765_PLUS))
3381 tw32(NVRAM_ADDR, phy_addr);
3383 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3384 !tg3_flag(tp, 5755_PLUS) &&
3385 (tp->nvram_jedecnum == JEDEC_ST) &&
3386 (nvram_cmd & NVRAM_CMD_FIRST)) {
3389 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3390 ret = tg3_nvram_exec_cmd(tp, cmd);
3394 if (!tg3_flag(tp, FLASH)) {
3395 /* We always do complete word writes to eeprom. */
3396 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3399 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3406 /* offset and length are dword aligned */
3407 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3411 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3412 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3413 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3417 if (!tg3_flag(tp, NVRAM)) {
3418 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3422 ret = tg3_nvram_lock(tp);
3426 tg3_enable_nvram_access(tp);
3427 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3428 tw32(NVRAM_WRITE1, 0x406);
3430 grc_mode = tr32(GRC_MODE);
3431 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3433 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3434 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3437 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3441 grc_mode = tr32(GRC_MODE);
3442 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3444 tg3_disable_nvram_access(tp);
3445 tg3_nvram_unlock(tp);
3448 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3449 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3456 #define RX_CPU_SCRATCH_BASE 0x30000
3457 #define RX_CPU_SCRATCH_SIZE 0x04000
3458 #define TX_CPU_SCRATCH_BASE 0x34000
3459 #define TX_CPU_SCRATCH_SIZE 0x04000
3461 /* tp->lock is held. */
3462 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3465 const int iters = 10000;
3467 for (i = 0; i < iters; i++) {
3468 tw32(cpu_base + CPU_STATE, 0xffffffff);
3469 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3470 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3474 return (i == iters) ? -EBUSY : 0;
3477 /* tp->lock is held. */
3478 static int tg3_rxcpu_pause(struct tg3 *tp)
3480 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3482 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3483 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3489 /* tp->lock is held. */
3490 static int tg3_txcpu_pause(struct tg3 *tp)
3492 return tg3_pause_cpu(tp, TX_CPU_BASE);
3495 /* tp->lock is held. */
3496 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3498 tw32(cpu_base + CPU_STATE, 0xffffffff);
3499 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3502 /* tp->lock is held. */
3503 static void tg3_rxcpu_resume(struct tg3 *tp)
3505 tg3_resume_cpu(tp, RX_CPU_BASE);
3508 /* tp->lock is held. */
3509 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3513 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3515 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3516 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3518 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3521 if (cpu_base == RX_CPU_BASE) {
3522 rc = tg3_rxcpu_pause(tp);
3525 * There is only an Rx CPU for the 5750 derivative in the
3528 if (tg3_flag(tp, IS_SSB_CORE))
3531 rc = tg3_txcpu_pause(tp);
3535 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3536 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3540 /* Clear firmware's nvram arbitration. */
3541 if (tg3_flag(tp, NVRAM))
3542 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3546 static int tg3_fw_data_len(struct tg3 *tp,
3547 const struct tg3_firmware_hdr *fw_hdr)
3551 /* Non fragmented firmware have one firmware header followed by a
3552 * contiguous chunk of data to be written. The length field in that
3553 * header is not the length of data to be written but the complete
3554 * length of the bss. The data length is determined based on
3555 * tp->fw->size minus headers.
3557 * Fragmented firmware have a main header followed by multiple
3558 * fragments. Each fragment is identical to non fragmented firmware
3559 * with a firmware header followed by a contiguous chunk of data. In
3560 * the main header, the length field is unused and set to 0xffffffff.
3561 * In each fragment header the length is the entire size of that
3562 * fragment i.e. fragment data + header length. Data length is
3563 * therefore length field in the header minus TG3_FW_HDR_LEN.
3565 if (tp->fw_len == 0xffffffff)
3566 fw_len = be32_to_cpu(fw_hdr->len);
3568 fw_len = tp->fw->size;
3570 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3573 /* tp->lock is held. */
3574 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3575 u32 cpu_scratch_base, int cpu_scratch_size,
3576 const struct tg3_firmware_hdr *fw_hdr)
3579 void (*write_op)(struct tg3 *, u32, u32);
3580 int total_len = tp->fw->size;
3582 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3584 "%s: Trying to load TX cpu firmware which is 5705\n",
3589 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3590 write_op = tg3_write_mem;
3592 write_op = tg3_write_indirect_reg32;
3594 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3595 /* It is possible that bootcode is still loading at this point.
3596 * Get the nvram lock first before halting the cpu.
3598 int lock_err = tg3_nvram_lock(tp);
3599 err = tg3_halt_cpu(tp, cpu_base);
3601 tg3_nvram_unlock(tp);
3605 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3606 write_op(tp, cpu_scratch_base + i, 0);
3607 tw32(cpu_base + CPU_STATE, 0xffffffff);
3608 tw32(cpu_base + CPU_MODE,
3609 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3611 /* Subtract additional main header for fragmented firmware and
3612 * advance to the first fragment
3614 total_len -= TG3_FW_HDR_LEN;
3619 u32 *fw_data = (u32 *)(fw_hdr + 1);
3620 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3621 write_op(tp, cpu_scratch_base +
3622 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3624 be32_to_cpu(fw_data[i]));
3626 total_len -= be32_to_cpu(fw_hdr->len);
3628 /* Advance to next fragment */
3629 fw_hdr = (struct tg3_firmware_hdr *)
3630 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3631 } while (total_len > 0);
3639 /* tp->lock is held. */
3640 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3643 const int iters = 5;
3645 tw32(cpu_base + CPU_STATE, 0xffffffff);
3646 tw32_f(cpu_base + CPU_PC, pc);
3648 for (i = 0; i < iters; i++) {
3649 if (tr32(cpu_base + CPU_PC) == pc)
3651 tw32(cpu_base + CPU_STATE, 0xffffffff);
3652 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3653 tw32_f(cpu_base + CPU_PC, pc);
3657 return (i == iters) ? -EBUSY : 0;
3660 /* tp->lock is held. */
3661 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3663 const struct tg3_firmware_hdr *fw_hdr;
3666 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3668 /* Firmware blob starts with version numbers, followed by
3669 start address and length. We are setting complete length.
3670 length = end_address_of_bss - start_address_of_text.
3671 Remainder is the blob to be loaded contiguously
3672 from start address. */
3674 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3675 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3680 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3681 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3686 /* Now startup only the RX cpu. */
3687 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3688 be32_to_cpu(fw_hdr->base_addr));
3690 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3691 "should be %08x\n", __func__,
3692 tr32(RX_CPU_BASE + CPU_PC),
3693 be32_to_cpu(fw_hdr->base_addr));
3697 tg3_rxcpu_resume(tp);
3702 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3704 const int iters = 1000;
3708 /* Wait for boot code to complete initialization and enter service
3709 * loop. It is then safe to download service patches
3711 for (i = 0; i < iters; i++) {
3712 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3719 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3723 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3725 netdev_warn(tp->dev,
3726 "Other patches exist. Not downloading EEE patch\n");
3733 /* tp->lock is held. */
3734 static void tg3_load_57766_firmware(struct tg3 *tp)
3736 struct tg3_firmware_hdr *fw_hdr;
3738 if (!tg3_flag(tp, NO_NVRAM))
3741 if (tg3_validate_rxcpu_state(tp))
3747 /* This firmware blob has a different format than older firmware
3748 * releases as given below. The main difference is we have fragmented
3749 * data to be written to non-contiguous locations.
3751 * In the beginning we have a firmware header identical to other
3752 * firmware which consists of version, base addr and length. The length
3753 * here is unused and set to 0xffffffff.
3755 * This is followed by a series of firmware fragments which are
3756 * individually identical to previous firmware. i.e. they have the
3757 * firmware header and followed by data for that fragment. The version
3758 * field of the individual fragment header is unused.
3761 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3762 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3765 if (tg3_rxcpu_pause(tp))
3768 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3769 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3771 tg3_rxcpu_resume(tp);
3774 /* tp->lock is held. */
3775 static int tg3_load_tso_firmware(struct tg3 *tp)
3777 const struct tg3_firmware_hdr *fw_hdr;
3778 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3781 if (!tg3_flag(tp, FW_TSO))
3784 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3786 /* Firmware blob starts with version numbers, followed by
3787 start address and length. We are setting complete length.
3788 length = end_address_of_bss - start_address_of_text.
3789 Remainder is the blob to be loaded contiguously
3790 from start address. */
3792 cpu_scratch_size = tp->fw_len;
3794 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3795 cpu_base = RX_CPU_BASE;
3796 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3798 cpu_base = TX_CPU_BASE;
3799 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3800 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3803 err = tg3_load_firmware_cpu(tp, cpu_base,
3804 cpu_scratch_base, cpu_scratch_size,
3809 /* Now startup the cpu. */
3810 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3811 be32_to_cpu(fw_hdr->base_addr));
3814 "%s fails to set CPU PC, is %08x should be %08x\n",
3815 __func__, tr32(cpu_base + CPU_PC),
3816 be32_to_cpu(fw_hdr->base_addr));
3820 tg3_resume_cpu(tp, cpu_base);
3825 /* tp->lock is held. */
3826 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3828 u32 addr_high, addr_low;
3831 addr_high = ((tp->dev->dev_addr[0] << 8) |
3832 tp->dev->dev_addr[1]);
3833 addr_low = ((tp->dev->dev_addr[2] << 24) |
3834 (tp->dev->dev_addr[3] << 16) |
3835 (tp->dev->dev_addr[4] << 8) |
3836 (tp->dev->dev_addr[5] << 0));
3837 for (i = 0; i < 4; i++) {
3838 if (i == 1 && skip_mac_1)
3840 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3841 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3844 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3845 tg3_asic_rev(tp) == ASIC_REV_5704) {
3846 for (i = 0; i < 12; i++) {
3847 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3848 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3852 addr_high = (tp->dev->dev_addr[0] +
3853 tp->dev->dev_addr[1] +
3854 tp->dev->dev_addr[2] +
3855 tp->dev->dev_addr[3] +
3856 tp->dev->dev_addr[4] +
3857 tp->dev->dev_addr[5]) &
3858 TX_BACKOFF_SEED_MASK;
3859 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3862 static void tg3_enable_register_access(struct tg3 *tp)
3865 * Make sure register accesses (indirect or otherwise) will function
3868 pci_write_config_dword(tp->pdev,
3869 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3872 static int tg3_power_up(struct tg3 *tp)
3876 tg3_enable_register_access(tp);
3878 err = pci_set_power_state(tp->pdev, PCI_D0);
3880 /* Switch out of Vaux if it is a NIC */
3881 tg3_pwrsrc_switch_to_vmain(tp);
3883 netdev_err(tp->dev, "Transition to D0 failed\n");
3889 static int tg3_setup_phy(struct tg3 *, int);
3891 static int tg3_power_down_prepare(struct tg3 *tp)
3894 bool device_should_wake, do_low_power;
3896 tg3_enable_register_access(tp);
3898 /* Restore the CLKREQ setting. */
3899 if (tg3_flag(tp, CLKREQ_BUG))
3900 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3901 PCI_EXP_LNKCTL_CLKREQ_EN);
3903 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3904 tw32(TG3PCI_MISC_HOST_CTRL,
3905 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3907 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3908 tg3_flag(tp, WOL_ENABLE);
3910 if (tg3_flag(tp, USE_PHYLIB)) {
3911 do_low_power = false;
3912 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3913 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3914 struct phy_device *phydev;
3915 u32 phyid, advertising;
3917 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3919 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3921 tp->link_config.speed = phydev->speed;
3922 tp->link_config.duplex = phydev->duplex;
3923 tp->link_config.autoneg = phydev->autoneg;
3924 tp->link_config.advertising = phydev->advertising;
3926 advertising = ADVERTISED_TP |
3928 ADVERTISED_Autoneg |
3929 ADVERTISED_10baseT_Half;
3931 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3932 if (tg3_flag(tp, WOL_SPEED_100MB))
3934 ADVERTISED_100baseT_Half |
3935 ADVERTISED_100baseT_Full |
3936 ADVERTISED_10baseT_Full;
3938 advertising |= ADVERTISED_10baseT_Full;
3941 phydev->advertising = advertising;
3943 phy_start_aneg(phydev);
3945 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3946 if (phyid != PHY_ID_BCMAC131) {
3947 phyid &= PHY_BCM_OUI_MASK;
3948 if (phyid == PHY_BCM_OUI_1 ||
3949 phyid == PHY_BCM_OUI_2 ||
3950 phyid == PHY_BCM_OUI_3)
3951 do_low_power = true;
3955 do_low_power = true;
3957 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3958 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3960 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3961 tg3_setup_phy(tp, 0);
3964 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3967 val = tr32(GRC_VCPU_EXT_CTRL);
3968 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3969 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3973 for (i = 0; i < 200; i++) {
3974 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3975 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3980 if (tg3_flag(tp, WOL_CAP))
3981 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3982 WOL_DRV_STATE_SHUTDOWN |
3986 if (device_should_wake) {
3989 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3991 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3992 tg3_phy_auxctl_write(tp,
3993 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3994 MII_TG3_AUXCTL_PCTL_WOL_EN |
3995 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3996 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4000 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4001 mac_mode = MAC_MODE_PORT_MODE_GMII;
4002 else if (tp->phy_flags &
4003 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4004 if (tp->link_config.active_speed == SPEED_1000)
4005 mac_mode = MAC_MODE_PORT_MODE_GMII;
4007 mac_mode = MAC_MODE_PORT_MODE_MII;
4009 mac_mode = MAC_MODE_PORT_MODE_MII;
4011 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4012 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4013 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4014 SPEED_100 : SPEED_10;
4015 if (tg3_5700_link_polarity(tp, speed))
4016 mac_mode |= MAC_MODE_LINK_POLARITY;
4018 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4021 mac_mode = MAC_MODE_PORT_MODE_TBI;
4024 if (!tg3_flag(tp, 5750_PLUS))
4025 tw32(MAC_LED_CTRL, tp->led_ctrl);
4027 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4028 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4029 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4030 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4032 if (tg3_flag(tp, ENABLE_APE))
4033 mac_mode |= MAC_MODE_APE_TX_EN |
4034 MAC_MODE_APE_RX_EN |
4035 MAC_MODE_TDE_ENABLE;
4037 tw32_f(MAC_MODE, mac_mode);
4040 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4044 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4045 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4046 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4049 base_val = tp->pci_clock_ctrl;
4050 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4051 CLOCK_CTRL_TXCLK_DISABLE);
4053 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4054 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4055 } else if (tg3_flag(tp, 5780_CLASS) ||
4056 tg3_flag(tp, CPMU_PRESENT) ||
4057 tg3_asic_rev(tp) == ASIC_REV_5906) {
4059 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4060 u32 newbits1, newbits2;
4062 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4063 tg3_asic_rev(tp) == ASIC_REV_5701) {
4064 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4065 CLOCK_CTRL_TXCLK_DISABLE |
4067 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4068 } else if (tg3_flag(tp, 5705_PLUS)) {
4069 newbits1 = CLOCK_CTRL_625_CORE;
4070 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4072 newbits1 = CLOCK_CTRL_ALTCLK;
4073 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4076 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4079 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4082 if (!tg3_flag(tp, 5705_PLUS)) {
4085 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4086 tg3_asic_rev(tp) == ASIC_REV_5701) {
4087 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4088 CLOCK_CTRL_TXCLK_DISABLE |
4089 CLOCK_CTRL_44MHZ_CORE);
4091 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4094 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4095 tp->pci_clock_ctrl | newbits3, 40);
4099 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4100 tg3_power_down_phy(tp, do_low_power);
4102 tg3_frob_aux_power(tp, true);
4104 /* Workaround for unstable PLL clock */
4105 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4106 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4107 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4108 u32 val = tr32(0x7d00);
4110 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4112 if (!tg3_flag(tp, ENABLE_ASF)) {
4115 err = tg3_nvram_lock(tp);
4116 tg3_halt_cpu(tp, RX_CPU_BASE);
4118 tg3_nvram_unlock(tp);
4122 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4127 static void tg3_power_down(struct tg3 *tp)
4129 tg3_power_down_prepare(tp);
4131 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4132 pci_set_power_state(tp->pdev, PCI_D3hot);
4135 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4137 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4138 case MII_TG3_AUX_STAT_10HALF:
4140 *duplex = DUPLEX_HALF;
4143 case MII_TG3_AUX_STAT_10FULL:
4145 *duplex = DUPLEX_FULL;
4148 case MII_TG3_AUX_STAT_100HALF:
4150 *duplex = DUPLEX_HALF;
4153 case MII_TG3_AUX_STAT_100FULL:
4155 *duplex = DUPLEX_FULL;
4158 case MII_TG3_AUX_STAT_1000HALF:
4159 *speed = SPEED_1000;
4160 *duplex = DUPLEX_HALF;
4163 case MII_TG3_AUX_STAT_1000FULL:
4164 *speed = SPEED_1000;
4165 *duplex = DUPLEX_FULL;
4169 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4170 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4172 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4176 *speed = SPEED_UNKNOWN;
4177 *duplex = DUPLEX_UNKNOWN;
4182 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4187 new_adv = ADVERTISE_CSMA;
4188 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4189 new_adv |= mii_advertise_flowctrl(flowctrl);
4191 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4195 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4196 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4198 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4199 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4200 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4202 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4207 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4210 tw32(TG3_CPMU_EEE_MODE,
4211 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4213 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4218 /* Advertise 100-BaseTX EEE ability */
4219 if (advertise & ADVERTISED_100baseT_Full)
4220 val |= MDIO_AN_EEE_ADV_100TX;
4221 /* Advertise 1000-BaseT EEE ability */
4222 if (advertise & ADVERTISED_1000baseT_Full)
4223 val |= MDIO_AN_EEE_ADV_1000T;
4224 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4228 switch (tg3_asic_rev(tp)) {
4230 case ASIC_REV_57765:
4231 case ASIC_REV_57766:
4233 /* If we advertised any eee advertisements above... */
4235 val = MII_TG3_DSP_TAP26_ALNOKO |
4236 MII_TG3_DSP_TAP26_RMRXSTO |
4237 MII_TG3_DSP_TAP26_OPCSINPT;
4238 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4242 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4243 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4244 MII_TG3_DSP_CH34TP2_HIBW01);
4247 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4256 static void tg3_phy_copper_begin(struct tg3 *tp)
4258 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4259 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4262 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4263 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4264 adv = ADVERTISED_10baseT_Half |
4265 ADVERTISED_10baseT_Full;
4266 if (tg3_flag(tp, WOL_SPEED_100MB))
4267 adv |= ADVERTISED_100baseT_Half |
4268 ADVERTISED_100baseT_Full;
4269 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4270 adv |= ADVERTISED_1000baseT_Half |
4271 ADVERTISED_1000baseT_Full;
4273 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4275 adv = tp->link_config.advertising;
4276 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4277 adv &= ~(ADVERTISED_1000baseT_Half |
4278 ADVERTISED_1000baseT_Full);
4280 fc = tp->link_config.flowctrl;
4283 tg3_phy_autoneg_cfg(tp, adv, fc);
4285 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4286 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4287 /* Normally during power down we want to autonegotiate
4288 * the lowest possible speed for WOL. However, to avoid
4289 * link flap, we leave it untouched.
4294 tg3_writephy(tp, MII_BMCR,
4295 BMCR_ANENABLE | BMCR_ANRESTART);
4298 u32 bmcr, orig_bmcr;
4300 tp->link_config.active_speed = tp->link_config.speed;
4301 tp->link_config.active_duplex = tp->link_config.duplex;
4303 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4304 /* With autoneg disabled, 5715 only links up when the
4305 * advertisement register has the configured speed
4308 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4312 switch (tp->link_config.speed) {
4318 bmcr |= BMCR_SPEED100;
4322 bmcr |= BMCR_SPEED1000;
4326 if (tp->link_config.duplex == DUPLEX_FULL)
4327 bmcr |= BMCR_FULLDPLX;
4329 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4330 (bmcr != orig_bmcr)) {
4331 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4332 for (i = 0; i < 1500; i++) {
4336 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4337 tg3_readphy(tp, MII_BMSR, &tmp))
4339 if (!(tmp & BMSR_LSTATUS)) {
4344 tg3_writephy(tp, MII_BMCR, bmcr);
4350 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4354 /* Turn off tap power management. */
4355 /* Set Extended packet length bit */
4356 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4358 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4359 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4360 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4361 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4362 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4369 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4371 u32 advmsk, tgtadv, advertising;
4373 advertising = tp->link_config.advertising;
4374 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4376 advmsk = ADVERTISE_ALL;
4377 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4378 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4379 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4382 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4385 if ((*lcladv & advmsk) != tgtadv)
4388 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4391 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4393 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4397 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4398 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4399 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4400 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4401 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4403 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4406 if (tg3_ctrl != tgtadv)
4413 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4417 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4420 if (tg3_readphy(tp, MII_STAT1000, &val))
4423 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4426 if (tg3_readphy(tp, MII_LPA, rmtadv))
4429 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4430 tp->link_config.rmt_adv = lpeth;
4435 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4437 if (curr_link_up != tp->link_up) {
4439 netif_carrier_on(tp->dev);
4441 netif_carrier_off(tp->dev);
4442 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4443 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4446 tg3_link_report(tp);
4453 static void tg3_clear_mac_status(struct tg3 *tp)
4458 MAC_STATUS_SYNC_CHANGED |
4459 MAC_STATUS_CFG_CHANGED |
4460 MAC_STATUS_MI_COMPLETION |
4461 MAC_STATUS_LNKSTATE_CHANGED);
4465 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4467 int current_link_up;
4469 u32 lcl_adv, rmt_adv;
4474 tg3_clear_mac_status(tp);
4476 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4478 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4482 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4484 /* Some third-party PHYs need to be reset on link going
4487 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4488 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4489 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4491 tg3_readphy(tp, MII_BMSR, &bmsr);
4492 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4493 !(bmsr & BMSR_LSTATUS))
4499 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4500 tg3_readphy(tp, MII_BMSR, &bmsr);
4501 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4502 !tg3_flag(tp, INIT_COMPLETE))
4505 if (!(bmsr & BMSR_LSTATUS)) {
4506 err = tg3_init_5401phy_dsp(tp);
4510 tg3_readphy(tp, MII_BMSR, &bmsr);
4511 for (i = 0; i < 1000; i++) {
4513 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4514 (bmsr & BMSR_LSTATUS)) {
4520 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4521 TG3_PHY_REV_BCM5401_B0 &&
4522 !(bmsr & BMSR_LSTATUS) &&
4523 tp->link_config.active_speed == SPEED_1000) {
4524 err = tg3_phy_reset(tp);
4526 err = tg3_init_5401phy_dsp(tp);
4531 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4532 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4533 /* 5701 {A0,B0} CRC bug workaround */
4534 tg3_writephy(tp, 0x15, 0x0a75);
4535 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4536 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4537 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4540 /* Clear pending interrupts... */
4541 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4542 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4544 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4545 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4546 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4547 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4549 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4550 tg3_asic_rev(tp) == ASIC_REV_5701) {
4551 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4552 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4553 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4555 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4558 current_link_up = 0;
4559 current_speed = SPEED_UNKNOWN;
4560 current_duplex = DUPLEX_UNKNOWN;
4561 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4562 tp->link_config.rmt_adv = 0;
4564 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4565 err = tg3_phy_auxctl_read(tp,
4566 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4568 if (!err && !(val & (1 << 10))) {
4569 tg3_phy_auxctl_write(tp,
4570 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4577 for (i = 0; i < 100; i++) {
4578 tg3_readphy(tp, MII_BMSR, &bmsr);
4579 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4580 (bmsr & BMSR_LSTATUS))
4585 if (bmsr & BMSR_LSTATUS) {
4588 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4589 for (i = 0; i < 2000; i++) {
4591 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4596 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4601 for (i = 0; i < 200; i++) {
4602 tg3_readphy(tp, MII_BMCR, &bmcr);
4603 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4605 if (bmcr && bmcr != 0x7fff)
4613 tp->link_config.active_speed = current_speed;
4614 tp->link_config.active_duplex = current_duplex;
4616 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4617 if ((bmcr & BMCR_ANENABLE) &&
4618 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4619 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4620 current_link_up = 1;
4622 if (!(bmcr & BMCR_ANENABLE) &&
4623 tp->link_config.speed == current_speed &&
4624 tp->link_config.duplex == current_duplex) {
4625 current_link_up = 1;
4629 if (current_link_up == 1 &&
4630 tp->link_config.active_duplex == DUPLEX_FULL) {
4633 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4634 reg = MII_TG3_FET_GEN_STAT;
4635 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4637 reg = MII_TG3_EXT_STAT;
4638 bit = MII_TG3_EXT_STAT_MDIX;
4641 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4642 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4644 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4649 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4650 tg3_phy_copper_begin(tp);
4652 if (tg3_flag(tp, ROBOSWITCH)) {
4653 current_link_up = 1;
4654 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4655 current_speed = SPEED_1000;
4656 current_duplex = DUPLEX_FULL;
4657 tp->link_config.active_speed = current_speed;
4658 tp->link_config.active_duplex = current_duplex;
4661 tg3_readphy(tp, MII_BMSR, &bmsr);
4662 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4663 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4664 current_link_up = 1;
4667 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4668 if (current_link_up == 1) {
4669 if (tp->link_config.active_speed == SPEED_100 ||
4670 tp->link_config.active_speed == SPEED_10)
4671 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4673 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4674 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4675 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4677 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4679 /* In order for the 5750 core in BCM4785 chip to work properly
4680 * in RGMII mode, the Led Control Register must be set up.
4682 if (tg3_flag(tp, RGMII_MODE)) {
4683 u32 led_ctrl = tr32(MAC_LED_CTRL);
4684 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4686 if (tp->link_config.active_speed == SPEED_10)
4687 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4688 else if (tp->link_config.active_speed == SPEED_100)
4689 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4690 LED_CTRL_100MBPS_ON);
4691 else if (tp->link_config.active_speed == SPEED_1000)
4692 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4693 LED_CTRL_1000MBPS_ON);
4695 tw32(MAC_LED_CTRL, led_ctrl);
4699 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4700 if (tp->link_config.active_duplex == DUPLEX_HALF)
4701 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4703 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4704 if (current_link_up == 1 &&
4705 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4706 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4708 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4711 /* ??? Without this setting Netgear GA302T PHY does not
4712 * ??? send/receive packets...
4714 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4715 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4716 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4717 tw32_f(MAC_MI_MODE, tp->mi_mode);
4721 tw32_f(MAC_MODE, tp->mac_mode);
4724 tg3_phy_eee_adjust(tp, current_link_up);
4726 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4727 /* Polled via timer. */
4728 tw32_f(MAC_EVENT, 0);
4730 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4734 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4735 current_link_up == 1 &&
4736 tp->link_config.active_speed == SPEED_1000 &&
4737 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4740 (MAC_STATUS_SYNC_CHANGED |
4741 MAC_STATUS_CFG_CHANGED));
4744 NIC_SRAM_FIRMWARE_MBOX,
4745 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4748 /* Prevent send BD corruption. */
4749 if (tg3_flag(tp, CLKREQ_BUG)) {
4750 if (tp->link_config.active_speed == SPEED_100 ||
4751 tp->link_config.active_speed == SPEED_10)
4752 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4753 PCI_EXP_LNKCTL_CLKREQ_EN);
4755 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4756 PCI_EXP_LNKCTL_CLKREQ_EN);
4759 tg3_test_and_report_link_chg(tp, current_link_up);
4764 struct tg3_fiber_aneginfo {
4766 #define ANEG_STATE_UNKNOWN 0
4767 #define ANEG_STATE_AN_ENABLE 1
4768 #define ANEG_STATE_RESTART_INIT 2
4769 #define ANEG_STATE_RESTART 3
4770 #define ANEG_STATE_DISABLE_LINK_OK 4
4771 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4772 #define ANEG_STATE_ABILITY_DETECT 6
4773 #define ANEG_STATE_ACK_DETECT_INIT 7
4774 #define ANEG_STATE_ACK_DETECT 8
4775 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4776 #define ANEG_STATE_COMPLETE_ACK 10
4777 #define ANEG_STATE_IDLE_DETECT_INIT 11
4778 #define ANEG_STATE_IDLE_DETECT 12
4779 #define ANEG_STATE_LINK_OK 13
4780 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4781 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4784 #define MR_AN_ENABLE 0x00000001
4785 #define MR_RESTART_AN 0x00000002
4786 #define MR_AN_COMPLETE 0x00000004
4787 #define MR_PAGE_RX 0x00000008
4788 #define MR_NP_LOADED 0x00000010
4789 #define MR_TOGGLE_TX 0x00000020
4790 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4791 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4792 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4793 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4794 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4795 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4796 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4797 #define MR_TOGGLE_RX 0x00002000
4798 #define MR_NP_RX 0x00004000
4800 #define MR_LINK_OK 0x80000000
4802 unsigned long link_time, cur_time;
4804 u32 ability_match_cfg;
4805 int ability_match_count;
4807 char ability_match, idle_match, ack_match;
4809 u32 txconfig, rxconfig;
4810 #define ANEG_CFG_NP 0x00000080
4811 #define ANEG_CFG_ACK 0x00000040
4812 #define ANEG_CFG_RF2 0x00000020
4813 #define ANEG_CFG_RF1 0x00000010
4814 #define ANEG_CFG_PS2 0x00000001
4815 #define ANEG_CFG_PS1 0x00008000
4816 #define ANEG_CFG_HD 0x00004000
4817 #define ANEG_CFG_FD 0x00002000
4818 #define ANEG_CFG_INVAL 0x00001f06
4823 #define ANEG_TIMER_ENAB 2
4824 #define ANEG_FAILED -1
4826 #define ANEG_STATE_SETTLE_TIME 10000
4828 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4829 struct tg3_fiber_aneginfo *ap)
4832 unsigned long delta;
4836 if (ap->state == ANEG_STATE_UNKNOWN) {
4840 ap->ability_match_cfg = 0;
4841 ap->ability_match_count = 0;
4842 ap->ability_match = 0;
4848 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4849 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4851 if (rx_cfg_reg != ap->ability_match_cfg) {
4852 ap->ability_match_cfg = rx_cfg_reg;
4853 ap->ability_match = 0;
4854 ap->ability_match_count = 0;
4856 if (++ap->ability_match_count > 1) {
4857 ap->ability_match = 1;
4858 ap->ability_match_cfg = rx_cfg_reg;
4861 if (rx_cfg_reg & ANEG_CFG_ACK)
4869 ap->ability_match_cfg = 0;
4870 ap->ability_match_count = 0;
4871 ap->ability_match = 0;
4877 ap->rxconfig = rx_cfg_reg;
4880 switch (ap->state) {
4881 case ANEG_STATE_UNKNOWN:
4882 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4883 ap->state = ANEG_STATE_AN_ENABLE;
4886 case ANEG_STATE_AN_ENABLE:
4887 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4888 if (ap->flags & MR_AN_ENABLE) {
4891 ap->ability_match_cfg = 0;
4892 ap->ability_match_count = 0;
4893 ap->ability_match = 0;
4897 ap->state = ANEG_STATE_RESTART_INIT;
4899 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4903 case ANEG_STATE_RESTART_INIT:
4904 ap->link_time = ap->cur_time;
4905 ap->flags &= ~(MR_NP_LOADED);
4907 tw32(MAC_TX_AUTO_NEG, 0);
4908 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4909 tw32_f(MAC_MODE, tp->mac_mode);
4912 ret = ANEG_TIMER_ENAB;
4913 ap->state = ANEG_STATE_RESTART;
4916 case ANEG_STATE_RESTART:
4917 delta = ap->cur_time - ap->link_time;
4918 if (delta > ANEG_STATE_SETTLE_TIME)
4919 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4921 ret = ANEG_TIMER_ENAB;
4924 case ANEG_STATE_DISABLE_LINK_OK:
4928 case ANEG_STATE_ABILITY_DETECT_INIT:
4929 ap->flags &= ~(MR_TOGGLE_TX);
4930 ap->txconfig = ANEG_CFG_FD;
4931 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4932 if (flowctrl & ADVERTISE_1000XPAUSE)
4933 ap->txconfig |= ANEG_CFG_PS1;
4934 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4935 ap->txconfig |= ANEG_CFG_PS2;
4936 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4937 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4938 tw32_f(MAC_MODE, tp->mac_mode);
4941 ap->state = ANEG_STATE_ABILITY_DETECT;
4944 case ANEG_STATE_ABILITY_DETECT:
4945 if (ap->ability_match != 0 && ap->rxconfig != 0)
4946 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4949 case ANEG_STATE_ACK_DETECT_INIT:
4950 ap->txconfig |= ANEG_CFG_ACK;
4951 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4952 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4953 tw32_f(MAC_MODE, tp->mac_mode);
4956 ap->state = ANEG_STATE_ACK_DETECT;
4959 case ANEG_STATE_ACK_DETECT:
4960 if (ap->ack_match != 0) {
4961 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4962 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4963 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4965 ap->state = ANEG_STATE_AN_ENABLE;
4967 } else if (ap->ability_match != 0 &&
4968 ap->rxconfig == 0) {
4969 ap->state = ANEG_STATE_AN_ENABLE;
4973 case ANEG_STATE_COMPLETE_ACK_INIT:
4974 if (ap->rxconfig & ANEG_CFG_INVAL) {
4978 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4979 MR_LP_ADV_HALF_DUPLEX |
4980 MR_LP_ADV_SYM_PAUSE |
4981 MR_LP_ADV_ASYM_PAUSE |
4982 MR_LP_ADV_REMOTE_FAULT1 |
4983 MR_LP_ADV_REMOTE_FAULT2 |
4984 MR_LP_ADV_NEXT_PAGE |
4987 if (ap->rxconfig & ANEG_CFG_FD)
4988 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4989 if (ap->rxconfig & ANEG_CFG_HD)
4990 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4991 if (ap->rxconfig & ANEG_CFG_PS1)
4992 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4993 if (ap->rxconfig & ANEG_CFG_PS2)
4994 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4995 if (ap->rxconfig & ANEG_CFG_RF1)
4996 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4997 if (ap->rxconfig & ANEG_CFG_RF2)
4998 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4999 if (ap->rxconfig & ANEG_CFG_NP)
5000 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5002 ap->link_time = ap->cur_time;
5004 ap->flags ^= (MR_TOGGLE_TX);
5005 if (ap->rxconfig & 0x0008)
5006 ap->flags |= MR_TOGGLE_RX;
5007 if (ap->rxconfig & ANEG_CFG_NP)
5008 ap->flags |= MR_NP_RX;
5009 ap->flags |= MR_PAGE_RX;
5011 ap->state = ANEG_STATE_COMPLETE_ACK;
5012 ret = ANEG_TIMER_ENAB;
5015 case ANEG_STATE_COMPLETE_ACK:
5016 if (ap->ability_match != 0 &&
5017 ap->rxconfig == 0) {
5018 ap->state = ANEG_STATE_AN_ENABLE;
5021 delta = ap->cur_time - ap->link_time;
5022 if (delta > ANEG_STATE_SETTLE_TIME) {
5023 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5024 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5026 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5027 !(ap->flags & MR_NP_RX)) {
5028 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5036 case ANEG_STATE_IDLE_DETECT_INIT:
5037 ap->link_time = ap->cur_time;
5038 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5039 tw32_f(MAC_MODE, tp->mac_mode);
5042 ap->state = ANEG_STATE_IDLE_DETECT;
5043 ret = ANEG_TIMER_ENAB;
5046 case ANEG_STATE_IDLE_DETECT:
5047 if (ap->ability_match != 0 &&
5048 ap->rxconfig == 0) {
5049 ap->state = ANEG_STATE_AN_ENABLE;
5052 delta = ap->cur_time - ap->link_time;
5053 if (delta > ANEG_STATE_SETTLE_TIME) {
5054 /* XXX another gem from the Broadcom driver :( */
5055 ap->state = ANEG_STATE_LINK_OK;
5059 case ANEG_STATE_LINK_OK:
5060 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5064 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5065 /* ??? unimplemented */
5068 case ANEG_STATE_NEXT_PAGE_WAIT:
5069 /* ??? unimplemented */
5080 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5083 struct tg3_fiber_aneginfo aninfo;
5084 int status = ANEG_FAILED;
5088 tw32_f(MAC_TX_AUTO_NEG, 0);
5090 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5091 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5094 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5097 memset(&aninfo, 0, sizeof(aninfo));
5098 aninfo.flags |= MR_AN_ENABLE;
5099 aninfo.state = ANEG_STATE_UNKNOWN;
5100 aninfo.cur_time = 0;
5102 while (++tick < 195000) {
5103 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5104 if (status == ANEG_DONE || status == ANEG_FAILED)
5110 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5111 tw32_f(MAC_MODE, tp->mac_mode);
5114 *txflags = aninfo.txconfig;
5115 *rxflags = aninfo.flags;
5117 if (status == ANEG_DONE &&
5118 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5119 MR_LP_ADV_FULL_DUPLEX)))
5125 static void tg3_init_bcm8002(struct tg3 *tp)
5127 u32 mac_status = tr32(MAC_STATUS);
5130 /* Reset when initting first time or we have a link. */
5131 if (tg3_flag(tp, INIT_COMPLETE) &&
5132 !(mac_status & MAC_STATUS_PCS_SYNCED))
5135 /* Set PLL lock range. */
5136 tg3_writephy(tp, 0x16, 0x8007);
5139 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5141 /* Wait for reset to complete. */
5142 /* XXX schedule_timeout() ... */
5143 for (i = 0; i < 500; i++)
5146 /* Config mode; select PMA/Ch 1 regs. */
5147 tg3_writephy(tp, 0x10, 0x8411);
5149 /* Enable auto-lock and comdet, select txclk for tx. */
5150 tg3_writephy(tp, 0x11, 0x0a10);
5152 tg3_writephy(tp, 0x18, 0x00a0);
5153 tg3_writephy(tp, 0x16, 0x41ff);
5155 /* Assert and deassert POR. */
5156 tg3_writephy(tp, 0x13, 0x0400);
5158 tg3_writephy(tp, 0x13, 0x0000);
5160 tg3_writephy(tp, 0x11, 0x0a50);
5162 tg3_writephy(tp, 0x11, 0x0a10);
5164 /* Wait for signal to stabilize */
5165 /* XXX schedule_timeout() ... */
5166 for (i = 0; i < 15000; i++)
5169 /* Deselect the channel register so we can read the PHYID
5172 tg3_writephy(tp, 0x10, 0x8011);
5175 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5178 u32 sg_dig_ctrl, sg_dig_status;
5179 u32 serdes_cfg, expected_sg_dig_ctrl;
5180 int workaround, port_a;
5181 int current_link_up;
5184 expected_sg_dig_ctrl = 0;
5187 current_link_up = 0;
5189 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5190 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5192 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5195 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5196 /* preserve bits 20-23 for voltage regulator */
5197 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5200 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5202 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5203 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5205 u32 val = serdes_cfg;
5211 tw32_f(MAC_SERDES_CFG, val);
5214 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5216 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5217 tg3_setup_flow_control(tp, 0, 0);
5218 current_link_up = 1;
5223 /* Want auto-negotiation. */
5224 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5226 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5227 if (flowctrl & ADVERTISE_1000XPAUSE)
5228 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5229 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5230 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5232 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5233 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5234 tp->serdes_counter &&
5235 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5236 MAC_STATUS_RCVD_CFG)) ==
5237 MAC_STATUS_PCS_SYNCED)) {
5238 tp->serdes_counter--;
5239 current_link_up = 1;
5244 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5245 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5247 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5249 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5250 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5251 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5252 MAC_STATUS_SIGNAL_DET)) {
5253 sg_dig_status = tr32(SG_DIG_STATUS);
5254 mac_status = tr32(MAC_STATUS);
5256 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5257 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5258 u32 local_adv = 0, remote_adv = 0;
5260 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5261 local_adv |= ADVERTISE_1000XPAUSE;
5262 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5263 local_adv |= ADVERTISE_1000XPSE_ASYM;
5265 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5266 remote_adv |= LPA_1000XPAUSE;
5267 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5268 remote_adv |= LPA_1000XPAUSE_ASYM;
5270 tp->link_config.rmt_adv =
5271 mii_adv_to_ethtool_adv_x(remote_adv);
5273 tg3_setup_flow_control(tp, local_adv, remote_adv);
5274 current_link_up = 1;
5275 tp->serdes_counter = 0;
5276 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5277 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5278 if (tp->serdes_counter)
5279 tp->serdes_counter--;
5282 u32 val = serdes_cfg;
5289 tw32_f(MAC_SERDES_CFG, val);
5292 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5295 /* Link parallel detection - link is up */
5296 /* only if we have PCS_SYNC and not */
5297 /* receiving config code words */
5298 mac_status = tr32(MAC_STATUS);
5299 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5300 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5301 tg3_setup_flow_control(tp, 0, 0);
5302 current_link_up = 1;
5304 TG3_PHYFLG_PARALLEL_DETECT;
5305 tp->serdes_counter =
5306 SERDES_PARALLEL_DET_TIMEOUT;
5308 goto restart_autoneg;
5312 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5313 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5317 return current_link_up;
5320 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5322 int current_link_up = 0;
5324 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5327 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5328 u32 txflags, rxflags;
5331 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5332 u32 local_adv = 0, remote_adv = 0;
5334 if (txflags & ANEG_CFG_PS1)
5335 local_adv |= ADVERTISE_1000XPAUSE;
5336 if (txflags & ANEG_CFG_PS2)
5337 local_adv |= ADVERTISE_1000XPSE_ASYM;
5339 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5340 remote_adv |= LPA_1000XPAUSE;
5341 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5342 remote_adv |= LPA_1000XPAUSE_ASYM;
5344 tp->link_config.rmt_adv =
5345 mii_adv_to_ethtool_adv_x(remote_adv);
5347 tg3_setup_flow_control(tp, local_adv, remote_adv);
5349 current_link_up = 1;
5351 for (i = 0; i < 30; i++) {
5354 (MAC_STATUS_SYNC_CHANGED |
5355 MAC_STATUS_CFG_CHANGED));
5357 if ((tr32(MAC_STATUS) &
5358 (MAC_STATUS_SYNC_CHANGED |
5359 MAC_STATUS_CFG_CHANGED)) == 0)
5363 mac_status = tr32(MAC_STATUS);
5364 if (current_link_up == 0 &&
5365 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5366 !(mac_status & MAC_STATUS_RCVD_CFG))
5367 current_link_up = 1;
5369 tg3_setup_flow_control(tp, 0, 0);
5371 /* Forcing 1000FD link up. */
5372 current_link_up = 1;
5374 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5377 tw32_f(MAC_MODE, tp->mac_mode);
5382 return current_link_up;
5385 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5388 u16 orig_active_speed;
5389 u8 orig_active_duplex;
5391 int current_link_up;
5394 orig_pause_cfg = tp->link_config.active_flowctrl;
5395 orig_active_speed = tp->link_config.active_speed;
5396 orig_active_duplex = tp->link_config.active_duplex;
5398 if (!tg3_flag(tp, HW_AUTONEG) &&
5400 tg3_flag(tp, INIT_COMPLETE)) {
5401 mac_status = tr32(MAC_STATUS);
5402 mac_status &= (MAC_STATUS_PCS_SYNCED |
5403 MAC_STATUS_SIGNAL_DET |
5404 MAC_STATUS_CFG_CHANGED |
5405 MAC_STATUS_RCVD_CFG);
5406 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5407 MAC_STATUS_SIGNAL_DET)) {
5408 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5409 MAC_STATUS_CFG_CHANGED));
5414 tw32_f(MAC_TX_AUTO_NEG, 0);
5416 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5417 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5418 tw32_f(MAC_MODE, tp->mac_mode);
5421 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5422 tg3_init_bcm8002(tp);
5424 /* Enable link change event even when serdes polling. */
5425 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5428 current_link_up = 0;
5429 tp->link_config.rmt_adv = 0;
5430 mac_status = tr32(MAC_STATUS);
5432 if (tg3_flag(tp, HW_AUTONEG))
5433 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5435 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5437 tp->napi[0].hw_status->status =
5438 (SD_STATUS_UPDATED |
5439 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5441 for (i = 0; i < 100; i++) {
5442 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5443 MAC_STATUS_CFG_CHANGED));
5445 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5446 MAC_STATUS_CFG_CHANGED |
5447 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5451 mac_status = tr32(MAC_STATUS);
5452 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5453 current_link_up = 0;
5454 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5455 tp->serdes_counter == 0) {
5456 tw32_f(MAC_MODE, (tp->mac_mode |
5457 MAC_MODE_SEND_CONFIGS));
5459 tw32_f(MAC_MODE, tp->mac_mode);
5463 if (current_link_up == 1) {
5464 tp->link_config.active_speed = SPEED_1000;
5465 tp->link_config.active_duplex = DUPLEX_FULL;
5466 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5467 LED_CTRL_LNKLED_OVERRIDE |
5468 LED_CTRL_1000MBPS_ON));
5470 tp->link_config.active_speed = SPEED_UNKNOWN;
5471 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5472 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5473 LED_CTRL_LNKLED_OVERRIDE |
5474 LED_CTRL_TRAFFIC_OVERRIDE));
5477 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5478 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5479 if (orig_pause_cfg != now_pause_cfg ||
5480 orig_active_speed != tp->link_config.active_speed ||
5481 orig_active_duplex != tp->link_config.active_duplex)
5482 tg3_link_report(tp);
5488 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5490 int current_link_up = 0, err = 0;
5492 u16 current_speed = SPEED_UNKNOWN;
5493 u8 current_duplex = DUPLEX_UNKNOWN;
5494 u32 local_adv, remote_adv, sgsr;
5496 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5497 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5498 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5499 (sgsr & SERDES_TG3_SGMII_MODE)) {
5504 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5506 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5507 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5509 current_link_up = 1;
5510 if (sgsr & SERDES_TG3_SPEED_1000) {
5511 current_speed = SPEED_1000;
5512 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5513 } else if (sgsr & SERDES_TG3_SPEED_100) {
5514 current_speed = SPEED_100;
5515 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5517 current_speed = SPEED_10;
5518 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5521 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5522 current_duplex = DUPLEX_FULL;
5524 current_duplex = DUPLEX_HALF;
5527 tw32_f(MAC_MODE, tp->mac_mode);
5530 tg3_clear_mac_status(tp);
5532 goto fiber_setup_done;
5535 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5536 tw32_f(MAC_MODE, tp->mac_mode);
5539 tg3_clear_mac_status(tp);
5544 tp->link_config.rmt_adv = 0;
5546 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5547 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5548 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5549 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5550 bmsr |= BMSR_LSTATUS;
5552 bmsr &= ~BMSR_LSTATUS;
5555 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5557 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5558 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5559 /* do nothing, just check for link up at the end */
5560 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5563 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5564 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5565 ADVERTISE_1000XPAUSE |
5566 ADVERTISE_1000XPSE_ASYM |
5569 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5570 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5572 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5573 tg3_writephy(tp, MII_ADVERTISE, newadv);
5574 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5575 tg3_writephy(tp, MII_BMCR, bmcr);
5577 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5578 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5579 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5586 bmcr &= ~BMCR_SPEED1000;
5587 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5589 if (tp->link_config.duplex == DUPLEX_FULL)
5590 new_bmcr |= BMCR_FULLDPLX;
5592 if (new_bmcr != bmcr) {
5593 /* BMCR_SPEED1000 is a reserved bit that needs
5594 * to be set on write.
5596 new_bmcr |= BMCR_SPEED1000;
5598 /* Force a linkdown */
5602 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5603 adv &= ~(ADVERTISE_1000XFULL |
5604 ADVERTISE_1000XHALF |
5606 tg3_writephy(tp, MII_ADVERTISE, adv);
5607 tg3_writephy(tp, MII_BMCR, bmcr |
5611 tg3_carrier_off(tp);
5613 tg3_writephy(tp, MII_BMCR, new_bmcr);
5615 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5616 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5617 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5618 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5619 bmsr |= BMSR_LSTATUS;
5621 bmsr &= ~BMSR_LSTATUS;
5623 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5627 if (bmsr & BMSR_LSTATUS) {
5628 current_speed = SPEED_1000;
5629 current_link_up = 1;
5630 if (bmcr & BMCR_FULLDPLX)
5631 current_duplex = DUPLEX_FULL;
5633 current_duplex = DUPLEX_HALF;
5638 if (bmcr & BMCR_ANENABLE) {
5641 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5642 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5643 common = local_adv & remote_adv;
5644 if (common & (ADVERTISE_1000XHALF |
5645 ADVERTISE_1000XFULL)) {
5646 if (common & ADVERTISE_1000XFULL)
5647 current_duplex = DUPLEX_FULL;
5649 current_duplex = DUPLEX_HALF;
5651 tp->link_config.rmt_adv =
5652 mii_adv_to_ethtool_adv_x(remote_adv);
5653 } else if (!tg3_flag(tp, 5780_CLASS)) {
5654 /* Link is up via parallel detect */
5656 current_link_up = 0;
5662 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5663 tg3_setup_flow_control(tp, local_adv, remote_adv);
5665 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5666 if (tp->link_config.active_duplex == DUPLEX_HALF)
5667 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5669 tw32_f(MAC_MODE, tp->mac_mode);
5672 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5674 tp->link_config.active_speed = current_speed;
5675 tp->link_config.active_duplex = current_duplex;
5677 tg3_test_and_report_link_chg(tp, current_link_up);
5681 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5683 if (tp->serdes_counter) {
5684 /* Give autoneg time to complete. */
5685 tp->serdes_counter--;
5690 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5693 tg3_readphy(tp, MII_BMCR, &bmcr);
5694 if (bmcr & BMCR_ANENABLE) {
5697 /* Select shadow register 0x1f */
5698 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5699 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5701 /* Select expansion interrupt status register */
5702 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5703 MII_TG3_DSP_EXP1_INT_STAT);
5704 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5705 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5707 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5708 /* We have signal detect and not receiving
5709 * config code words, link is up by parallel
5713 bmcr &= ~BMCR_ANENABLE;
5714 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5715 tg3_writephy(tp, MII_BMCR, bmcr);
5716 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5719 } else if (tp->link_up &&
5720 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5721 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5724 /* Select expansion interrupt status register */
5725 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5726 MII_TG3_DSP_EXP1_INT_STAT);
5727 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5731 /* Config code words received, turn on autoneg. */
5732 tg3_readphy(tp, MII_BMCR, &bmcr);
5733 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5735 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5741 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5746 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5747 err = tg3_setup_fiber_phy(tp, force_reset);
5748 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5749 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5751 err = tg3_setup_copper_phy(tp, force_reset);
5753 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5756 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5757 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5759 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5764 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5765 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5766 tw32(GRC_MISC_CFG, val);
5769 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5770 (6 << TX_LENGTHS_IPG_SHIFT);
5771 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5772 tg3_asic_rev(tp) == ASIC_REV_5762)
5773 val |= tr32(MAC_TX_LENGTHS) &
5774 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5775 TX_LENGTHS_CNT_DWN_VAL_MSK);
5777 if (tp->link_config.active_speed == SPEED_1000 &&
5778 tp->link_config.active_duplex == DUPLEX_HALF)
5779 tw32(MAC_TX_LENGTHS, val |
5780 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5782 tw32(MAC_TX_LENGTHS, val |
5783 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5785 if (!tg3_flag(tp, 5705_PLUS)) {
5787 tw32(HOSTCC_STAT_COAL_TICKS,
5788 tp->coal.stats_block_coalesce_usecs);
5790 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5794 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5795 val = tr32(PCIE_PWR_MGMT_THRESH);
5797 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5800 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5801 tw32(PCIE_PWR_MGMT_THRESH, val);
5807 /* tp->lock must be held */
5808 static u64 tg3_refclk_read(struct tg3 *tp)
5810 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5811 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5814 /* tp->lock must be held */
5815 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5817 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5818 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5819 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5820 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5823 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5824 static inline void tg3_full_unlock(struct tg3 *tp);
5825 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5827 struct tg3 *tp = netdev_priv(dev);
5829 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5830 SOF_TIMESTAMPING_RX_SOFTWARE |
5831 SOF_TIMESTAMPING_SOFTWARE |
5832 SOF_TIMESTAMPING_TX_HARDWARE |
5833 SOF_TIMESTAMPING_RX_HARDWARE |
5834 SOF_TIMESTAMPING_RAW_HARDWARE;
5837 info->phc_index = ptp_clock_index(tp->ptp_clock);
5839 info->phc_index = -1;
5841 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5843 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5844 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5845 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5846 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5850 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5852 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5853 bool neg_adj = false;
5861 /* Frequency adjustment is performed using hardware with a 24 bit
5862 * accumulator and a programmable correction value. On each clk, the
5863 * correction value gets added to the accumulator and when it
5864 * overflows, the time counter is incremented/decremented.
5866 * So conversion from ppb to correction value is
5867 * ppb * (1 << 24) / 1000000000
5869 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5870 TG3_EAV_REF_CLK_CORRECT_MASK;
5872 tg3_full_lock(tp, 0);
5875 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5876 TG3_EAV_REF_CLK_CORRECT_EN |
5877 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5879 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5881 tg3_full_unlock(tp);
5886 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5888 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5890 tg3_full_lock(tp, 0);
5891 tp->ptp_adjust += delta;
5892 tg3_full_unlock(tp);
5897 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5901 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5903 tg3_full_lock(tp, 0);
5904 ns = tg3_refclk_read(tp);
5905 ns += tp->ptp_adjust;
5906 tg3_full_unlock(tp);
5908 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5909 ts->tv_nsec = remainder;
5914 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5915 const struct timespec *ts)
5918 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5920 ns = timespec_to_ns(ts);
5922 tg3_full_lock(tp, 0);
5923 tg3_refclk_write(tp, ns);
5925 tg3_full_unlock(tp);
5930 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5931 struct ptp_clock_request *rq, int on)
5936 static const struct ptp_clock_info tg3_ptp_caps = {
5937 .owner = THIS_MODULE,
5938 .name = "tg3 clock",
5939 .max_adj = 250000000,
5944 .adjfreq = tg3_ptp_adjfreq,
5945 .adjtime = tg3_ptp_adjtime,
5946 .gettime = tg3_ptp_gettime,
5947 .settime = tg3_ptp_settime,
5948 .enable = tg3_ptp_enable,
5951 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5952 struct skb_shared_hwtstamps *timestamp)
5954 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5955 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5959 /* tp->lock must be held */
5960 static void tg3_ptp_init(struct tg3 *tp)
5962 if (!tg3_flag(tp, PTP_CAPABLE))
5965 /* Initialize the hardware clock to the system time. */
5966 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5968 tp->ptp_info = tg3_ptp_caps;
5971 /* tp->lock must be held */
5972 static void tg3_ptp_resume(struct tg3 *tp)
5974 if (!tg3_flag(tp, PTP_CAPABLE))
5977 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5981 static void tg3_ptp_fini(struct tg3 *tp)
5983 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5986 ptp_clock_unregister(tp->ptp_clock);
5987 tp->ptp_clock = NULL;
5991 static inline int tg3_irq_sync(struct tg3 *tp)
5993 return tp->irq_sync;
5996 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6000 dst = (u32 *)((u8 *)dst + off);
6001 for (i = 0; i < len; i += sizeof(u32))
6002 *dst++ = tr32(off + i);
6005 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6007 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6008 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6009 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6010 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6011 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6012 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6013 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6014 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6015 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6016 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6017 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6018 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6019 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6020 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6021 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6022 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6023 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6024 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6025 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6027 if (tg3_flag(tp, SUPPORT_MSIX))
6028 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6030 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6031 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6032 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6033 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6034 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6035 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6036 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6037 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6039 if (!tg3_flag(tp, 5705_PLUS)) {
6040 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6041 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6042 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6045 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6046 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6047 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6048 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6049 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6051 if (tg3_flag(tp, NVRAM))
6052 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6055 static void tg3_dump_state(struct tg3 *tp)
6060 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6064 if (tg3_flag(tp, PCI_EXPRESS)) {
6065 /* Read up to but not including private PCI registers */
6066 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6067 regs[i / sizeof(u32)] = tr32(i);
6069 tg3_dump_legacy_regs(tp, regs);
6071 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6072 if (!regs[i + 0] && !regs[i + 1] &&
6073 !regs[i + 2] && !regs[i + 3])
6076 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6078 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6083 for (i = 0; i < tp->irq_cnt; i++) {
6084 struct tg3_napi *tnapi = &tp->napi[i];
6086 /* SW status block */
6088 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6090 tnapi->hw_status->status,
6091 tnapi->hw_status->status_tag,
6092 tnapi->hw_status->rx_jumbo_consumer,
6093 tnapi->hw_status->rx_consumer,
6094 tnapi->hw_status->rx_mini_consumer,
6095 tnapi->hw_status->idx[0].rx_producer,
6096 tnapi->hw_status->idx[0].tx_consumer);
6099 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6101 tnapi->last_tag, tnapi->last_irq_tag,
6102 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6104 tnapi->prodring.rx_std_prod_idx,
6105 tnapi->prodring.rx_std_cons_idx,
6106 tnapi->prodring.rx_jmb_prod_idx,
6107 tnapi->prodring.rx_jmb_cons_idx);
6111 /* This is called whenever we suspect that the system chipset is re-
6112 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6113 * is bogus tx completions. We try to recover by setting the
6114 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6117 static void tg3_tx_recover(struct tg3 *tp)
6119 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6120 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6122 netdev_warn(tp->dev,
6123 "The system may be re-ordering memory-mapped I/O "
6124 "cycles to the network device, attempting to recover. "
6125 "Please report the problem to the driver maintainer "
6126 "and include system chipset information.\n");
6128 spin_lock(&tp->lock);
6129 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6130 spin_unlock(&tp->lock);
6133 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6135 /* Tell compiler to fetch tx indices from memory. */
6137 return tnapi->tx_pending -
6138 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6141 /* Tigon3 never reports partial packet sends. So we do not
6142 * need special logic to handle SKBs that have not had all
6143 * of their frags sent yet, like SunGEM does.
6145 static void tg3_tx(struct tg3_napi *tnapi)
6147 struct tg3 *tp = tnapi->tp;
6148 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6149 u32 sw_idx = tnapi->tx_cons;
6150 struct netdev_queue *txq;
6151 int index = tnapi - tp->napi;
6152 unsigned int pkts_compl = 0, bytes_compl = 0;
6154 if (tg3_flag(tp, ENABLE_TSS))
6157 txq = netdev_get_tx_queue(tp->dev, index);
6159 while (sw_idx != hw_idx) {
6160 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6161 struct sk_buff *skb = ri->skb;
6164 if (unlikely(skb == NULL)) {
6169 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6170 struct skb_shared_hwtstamps timestamp;
6171 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6172 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6174 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6176 skb_tstamp_tx(skb, ×tamp);
6179 pci_unmap_single(tp->pdev,
6180 dma_unmap_addr(ri, mapping),
6186 while (ri->fragmented) {
6187 ri->fragmented = false;
6188 sw_idx = NEXT_TX(sw_idx);
6189 ri = &tnapi->tx_buffers[sw_idx];
6192 sw_idx = NEXT_TX(sw_idx);
6194 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6195 ri = &tnapi->tx_buffers[sw_idx];
6196 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6199 pci_unmap_page(tp->pdev,
6200 dma_unmap_addr(ri, mapping),
6201 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6204 while (ri->fragmented) {
6205 ri->fragmented = false;
6206 sw_idx = NEXT_TX(sw_idx);
6207 ri = &tnapi->tx_buffers[sw_idx];
6210 sw_idx = NEXT_TX(sw_idx);
6214 bytes_compl += skb->len;
6218 if (unlikely(tx_bug)) {
6224 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6226 tnapi->tx_cons = sw_idx;
6228 /* Need to make the tx_cons update visible to tg3_start_xmit()
6229 * before checking for netif_queue_stopped(). Without the
6230 * memory barrier, there is a small possibility that tg3_start_xmit()
6231 * will miss it and cause the queue to be stopped forever.
6235 if (unlikely(netif_tx_queue_stopped(txq) &&
6236 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6237 __netif_tx_lock(txq, smp_processor_id());
6238 if (netif_tx_queue_stopped(txq) &&
6239 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6240 netif_tx_wake_queue(txq);
6241 __netif_tx_unlock(txq);
6245 static void tg3_frag_free(bool is_frag, void *data)
6248 put_page(virt_to_head_page(data));
6253 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6255 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6256 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6261 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6262 map_sz, PCI_DMA_FROMDEVICE);
6263 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6268 /* Returns size of skb allocated or < 0 on error.
6270 * We only need to fill in the address because the other members
6271 * of the RX descriptor are invariant, see tg3_init_rings.
6273 * Note the purposeful assymetry of cpu vs. chip accesses. For
6274 * posting buffers we only dirty the first cache line of the RX
6275 * descriptor (containing the address). Whereas for the RX status
6276 * buffers the cpu only reads the last cacheline of the RX descriptor
6277 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6279 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6280 u32 opaque_key, u32 dest_idx_unmasked,
6281 unsigned int *frag_size)
6283 struct tg3_rx_buffer_desc *desc;
6284 struct ring_info *map;
6287 int skb_size, data_size, dest_idx;
6289 switch (opaque_key) {
6290 case RXD_OPAQUE_RING_STD:
6291 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6292 desc = &tpr->rx_std[dest_idx];
6293 map = &tpr->rx_std_buffers[dest_idx];
6294 data_size = tp->rx_pkt_map_sz;
6297 case RXD_OPAQUE_RING_JUMBO:
6298 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6299 desc = &tpr->rx_jmb[dest_idx].std;
6300 map = &tpr->rx_jmb_buffers[dest_idx];
6301 data_size = TG3_RX_JMB_MAP_SZ;
6308 /* Do not overwrite any of the map or rp information
6309 * until we are sure we can commit to a new buffer.
6311 * Callers depend upon this behavior and assume that
6312 * we leave everything unchanged if we fail.
6314 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6315 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6316 if (skb_size <= PAGE_SIZE) {
6317 data = netdev_alloc_frag(skb_size);
6318 *frag_size = skb_size;
6320 data = kmalloc(skb_size, GFP_ATOMIC);
6326 mapping = pci_map_single(tp->pdev,
6327 data + TG3_RX_OFFSET(tp),
6329 PCI_DMA_FROMDEVICE);
6330 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6331 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6336 dma_unmap_addr_set(map, mapping, mapping);
6338 desc->addr_hi = ((u64)mapping >> 32);
6339 desc->addr_lo = ((u64)mapping & 0xffffffff);
6344 /* We only need to move over in the address because the other
6345 * members of the RX descriptor are invariant. See notes above
6346 * tg3_alloc_rx_data for full details.
6348 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6349 struct tg3_rx_prodring_set *dpr,
6350 u32 opaque_key, int src_idx,
6351 u32 dest_idx_unmasked)
6353 struct tg3 *tp = tnapi->tp;
6354 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6355 struct ring_info *src_map, *dest_map;
6356 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6359 switch (opaque_key) {
6360 case RXD_OPAQUE_RING_STD:
6361 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6362 dest_desc = &dpr->rx_std[dest_idx];
6363 dest_map = &dpr->rx_std_buffers[dest_idx];
6364 src_desc = &spr->rx_std[src_idx];
6365 src_map = &spr->rx_std_buffers[src_idx];
6368 case RXD_OPAQUE_RING_JUMBO:
6369 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6370 dest_desc = &dpr->rx_jmb[dest_idx].std;
6371 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6372 src_desc = &spr->rx_jmb[src_idx].std;
6373 src_map = &spr->rx_jmb_buffers[src_idx];
6380 dest_map->data = src_map->data;
6381 dma_unmap_addr_set(dest_map, mapping,
6382 dma_unmap_addr(src_map, mapping));
6383 dest_desc->addr_hi = src_desc->addr_hi;
6384 dest_desc->addr_lo = src_desc->addr_lo;
6386 /* Ensure that the update to the skb happens after the physical
6387 * addresses have been transferred to the new BD location.
6391 src_map->data = NULL;
6394 /* The RX ring scheme is composed of multiple rings which post fresh
6395 * buffers to the chip, and one special ring the chip uses to report
6396 * status back to the host.
6398 * The special ring reports the status of received packets to the
6399 * host. The chip does not write into the original descriptor the
6400 * RX buffer was obtained from. The chip simply takes the original
6401 * descriptor as provided by the host, updates the status and length
6402 * field, then writes this into the next status ring entry.
6404 * Each ring the host uses to post buffers to the chip is described
6405 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6406 * it is first placed into the on-chip ram. When the packet's length
6407 * is known, it walks down the TG3_BDINFO entries to select the ring.
6408 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6409 * which is within the range of the new packet's length is chosen.
6411 * The "separate ring for rx status" scheme may sound queer, but it makes
6412 * sense from a cache coherency perspective. If only the host writes
6413 * to the buffer post rings, and only the chip writes to the rx status
6414 * rings, then cache lines never move beyond shared-modified state.
6415 * If both the host and chip were to write into the same ring, cache line
6416 * eviction could occur since both entities want it in an exclusive state.
6418 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6420 struct tg3 *tp = tnapi->tp;
6421 u32 work_mask, rx_std_posted = 0;
6422 u32 std_prod_idx, jmb_prod_idx;
6423 u32 sw_idx = tnapi->rx_rcb_ptr;
6426 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6428 hw_idx = *(tnapi->rx_rcb_prod_idx);
6430 * We need to order the read of hw_idx and the read of
6431 * the opaque cookie.
6436 std_prod_idx = tpr->rx_std_prod_idx;
6437 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6438 while (sw_idx != hw_idx && budget > 0) {
6439 struct ring_info *ri;
6440 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6442 struct sk_buff *skb;
6443 dma_addr_t dma_addr;
6444 u32 opaque_key, desc_idx, *post_ptr;
6448 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6449 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6450 if (opaque_key == RXD_OPAQUE_RING_STD) {
6451 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6452 dma_addr = dma_unmap_addr(ri, mapping);
6454 post_ptr = &std_prod_idx;
6456 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6457 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6458 dma_addr = dma_unmap_addr(ri, mapping);
6460 post_ptr = &jmb_prod_idx;
6462 goto next_pkt_nopost;
6464 work_mask |= opaque_key;
6466 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6467 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6469 tg3_recycle_rx(tnapi, tpr, opaque_key,
6470 desc_idx, *post_ptr);
6472 /* Other statistics kept track of by card. */
6477 prefetch(data + TG3_RX_OFFSET(tp));
6478 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6481 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6482 RXD_FLAG_PTPSTAT_PTPV1 ||
6483 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6484 RXD_FLAG_PTPSTAT_PTPV2) {
6485 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6486 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6489 if (len > TG3_RX_COPY_THRESH(tp)) {
6491 unsigned int frag_size;
6493 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6494 *post_ptr, &frag_size);
6498 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6499 PCI_DMA_FROMDEVICE);
6501 skb = build_skb(data, frag_size);
6503 tg3_frag_free(frag_size != 0, data);
6504 goto drop_it_no_recycle;
6506 skb_reserve(skb, TG3_RX_OFFSET(tp));
6507 /* Ensure that the update to the data happens
6508 * after the usage of the old DMA mapping.
6515 tg3_recycle_rx(tnapi, tpr, opaque_key,
6516 desc_idx, *post_ptr);
6518 skb = netdev_alloc_skb(tp->dev,
6519 len + TG3_RAW_IP_ALIGN);
6521 goto drop_it_no_recycle;
6523 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6524 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6526 data + TG3_RX_OFFSET(tp),
6528 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6533 tg3_hwclock_to_timestamp(tp, tstamp,
6534 skb_hwtstamps(skb));
6536 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6537 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6538 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6539 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6540 skb->ip_summed = CHECKSUM_UNNECESSARY;
6542 skb_checksum_none_assert(skb);
6544 skb->protocol = eth_type_trans(skb, tp->dev);
6546 if (len > (tp->dev->mtu + ETH_HLEN) &&
6547 skb->protocol != htons(ETH_P_8021Q)) {
6549 goto drop_it_no_recycle;
6552 if (desc->type_flags & RXD_FLAG_VLAN &&
6553 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6554 __vlan_hwaccel_put_tag(skb,
6555 desc->err_vlan & RXD_VLAN_MASK);
6557 napi_gro_receive(&tnapi->napi, skb);
6565 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6566 tpr->rx_std_prod_idx = std_prod_idx &
6567 tp->rx_std_ring_mask;
6568 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6569 tpr->rx_std_prod_idx);
6570 work_mask &= ~RXD_OPAQUE_RING_STD;
6575 sw_idx &= tp->rx_ret_ring_mask;
6577 /* Refresh hw_idx to see if there is new work */
6578 if (sw_idx == hw_idx) {
6579 hw_idx = *(tnapi->rx_rcb_prod_idx);
6584 /* ACK the status ring. */
6585 tnapi->rx_rcb_ptr = sw_idx;
6586 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6588 /* Refill RX ring(s). */
6589 if (!tg3_flag(tp, ENABLE_RSS)) {
6590 /* Sync BD data before updating mailbox */
6593 if (work_mask & RXD_OPAQUE_RING_STD) {
6594 tpr->rx_std_prod_idx = std_prod_idx &
6595 tp->rx_std_ring_mask;
6596 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6597 tpr->rx_std_prod_idx);
6599 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6600 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6601 tp->rx_jmb_ring_mask;
6602 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6603 tpr->rx_jmb_prod_idx);
6606 } else if (work_mask) {
6607 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6608 * updated before the producer indices can be updated.
6612 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6613 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6615 if (tnapi != &tp->napi[1]) {
6616 tp->rx_refill = true;
6617 napi_schedule(&tp->napi[1].napi);
6624 static void tg3_poll_link(struct tg3 *tp)
6626 /* handle link change and other phy events */
6627 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6628 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6630 if (sblk->status & SD_STATUS_LINK_CHG) {
6631 sblk->status = SD_STATUS_UPDATED |
6632 (sblk->status & ~SD_STATUS_LINK_CHG);
6633 spin_lock(&tp->lock);
6634 if (tg3_flag(tp, USE_PHYLIB)) {
6636 (MAC_STATUS_SYNC_CHANGED |
6637 MAC_STATUS_CFG_CHANGED |
6638 MAC_STATUS_MI_COMPLETION |
6639 MAC_STATUS_LNKSTATE_CHANGED));
6642 tg3_setup_phy(tp, 0);
6643 spin_unlock(&tp->lock);
6648 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6649 struct tg3_rx_prodring_set *dpr,
6650 struct tg3_rx_prodring_set *spr)
6652 u32 si, di, cpycnt, src_prod_idx;
6656 src_prod_idx = spr->rx_std_prod_idx;
6658 /* Make sure updates to the rx_std_buffers[] entries and the
6659 * standard producer index are seen in the correct order.
6663 if (spr->rx_std_cons_idx == src_prod_idx)
6666 if (spr->rx_std_cons_idx < src_prod_idx)
6667 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6669 cpycnt = tp->rx_std_ring_mask + 1 -
6670 spr->rx_std_cons_idx;
6672 cpycnt = min(cpycnt,
6673 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6675 si = spr->rx_std_cons_idx;
6676 di = dpr->rx_std_prod_idx;
6678 for (i = di; i < di + cpycnt; i++) {
6679 if (dpr->rx_std_buffers[i].data) {
6689 /* Ensure that updates to the rx_std_buffers ring and the
6690 * shadowed hardware producer ring from tg3_recycle_skb() are
6691 * ordered correctly WRT the skb check above.
6695 memcpy(&dpr->rx_std_buffers[di],
6696 &spr->rx_std_buffers[si],
6697 cpycnt * sizeof(struct ring_info));
6699 for (i = 0; i < cpycnt; i++, di++, si++) {
6700 struct tg3_rx_buffer_desc *sbd, *dbd;
6701 sbd = &spr->rx_std[si];
6702 dbd = &dpr->rx_std[di];
6703 dbd->addr_hi = sbd->addr_hi;
6704 dbd->addr_lo = sbd->addr_lo;
6707 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6708 tp->rx_std_ring_mask;
6709 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6710 tp->rx_std_ring_mask;
6714 src_prod_idx = spr->rx_jmb_prod_idx;
6716 /* Make sure updates to the rx_jmb_buffers[] entries and
6717 * the jumbo producer index are seen in the correct order.
6721 if (spr->rx_jmb_cons_idx == src_prod_idx)
6724 if (spr->rx_jmb_cons_idx < src_prod_idx)
6725 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6727 cpycnt = tp->rx_jmb_ring_mask + 1 -
6728 spr->rx_jmb_cons_idx;
6730 cpycnt = min(cpycnt,
6731 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6733 si = spr->rx_jmb_cons_idx;
6734 di = dpr->rx_jmb_prod_idx;
6736 for (i = di; i < di + cpycnt; i++) {
6737 if (dpr->rx_jmb_buffers[i].data) {
6747 /* Ensure that updates to the rx_jmb_buffers ring and the
6748 * shadowed hardware producer ring from tg3_recycle_skb() are
6749 * ordered correctly WRT the skb check above.
6753 memcpy(&dpr->rx_jmb_buffers[di],
6754 &spr->rx_jmb_buffers[si],
6755 cpycnt * sizeof(struct ring_info));
6757 for (i = 0; i < cpycnt; i++, di++, si++) {
6758 struct tg3_rx_buffer_desc *sbd, *dbd;
6759 sbd = &spr->rx_jmb[si].std;
6760 dbd = &dpr->rx_jmb[di].std;
6761 dbd->addr_hi = sbd->addr_hi;
6762 dbd->addr_lo = sbd->addr_lo;
6765 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6766 tp->rx_jmb_ring_mask;
6767 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6768 tp->rx_jmb_ring_mask;
6774 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6776 struct tg3 *tp = tnapi->tp;
6778 /* run TX completion thread */
6779 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6781 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6785 if (!tnapi->rx_rcb_prod_idx)
6788 /* run RX thread, within the bounds set by NAPI.
6789 * All RX "locking" is done by ensuring outside
6790 * code synchronizes with tg3->napi.poll()
6792 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6793 work_done += tg3_rx(tnapi, budget - work_done);
6795 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6796 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6798 u32 std_prod_idx = dpr->rx_std_prod_idx;
6799 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6801 tp->rx_refill = false;
6802 for (i = 1; i <= tp->rxq_cnt; i++)
6803 err |= tg3_rx_prodring_xfer(tp, dpr,
6804 &tp->napi[i].prodring);
6808 if (std_prod_idx != dpr->rx_std_prod_idx)
6809 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6810 dpr->rx_std_prod_idx);
6812 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6813 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6814 dpr->rx_jmb_prod_idx);
6819 tw32_f(HOSTCC_MODE, tp->coal_now);
6825 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6827 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6828 schedule_work(&tp->reset_task);
6831 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6833 cancel_work_sync(&tp->reset_task);
6834 tg3_flag_clear(tp, RESET_TASK_PENDING);
6835 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6838 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6840 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6841 struct tg3 *tp = tnapi->tp;
6843 struct tg3_hw_status *sblk = tnapi->hw_status;
6846 work_done = tg3_poll_work(tnapi, work_done, budget);
6848 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6851 if (unlikely(work_done >= budget))
6854 /* tp->last_tag is used in tg3_int_reenable() below
6855 * to tell the hw how much work has been processed,
6856 * so we must read it before checking for more work.
6858 tnapi->last_tag = sblk->status_tag;
6859 tnapi->last_irq_tag = tnapi->last_tag;
6862 /* check for RX/TX work to do */
6863 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6864 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6866 /* This test here is not race free, but will reduce
6867 * the number of interrupts by looping again.
6869 if (tnapi == &tp->napi[1] && tp->rx_refill)
6872 napi_complete(napi);
6873 /* Reenable interrupts. */
6874 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6876 /* This test here is synchronized by napi_schedule()
6877 * and napi_complete() to close the race condition.
6879 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6880 tw32(HOSTCC_MODE, tp->coalesce_mode |
6881 HOSTCC_MODE_ENABLE |
6892 /* work_done is guaranteed to be less than budget. */
6893 napi_complete(napi);
6894 tg3_reset_task_schedule(tp);
6898 static void tg3_process_error(struct tg3 *tp)
6901 bool real_error = false;
6903 if (tg3_flag(tp, ERROR_PROCESSED))
6906 /* Check Flow Attention register */
6907 val = tr32(HOSTCC_FLOW_ATTN);
6908 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6909 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6913 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6914 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6918 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6919 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6928 tg3_flag_set(tp, ERROR_PROCESSED);
6929 tg3_reset_task_schedule(tp);
6932 static int tg3_poll(struct napi_struct *napi, int budget)
6934 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6935 struct tg3 *tp = tnapi->tp;
6937 struct tg3_hw_status *sblk = tnapi->hw_status;
6940 if (sblk->status & SD_STATUS_ERROR)
6941 tg3_process_error(tp);
6945 work_done = tg3_poll_work(tnapi, work_done, budget);
6947 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6950 if (unlikely(work_done >= budget))
6953 if (tg3_flag(tp, TAGGED_STATUS)) {
6954 /* tp->last_tag is used in tg3_int_reenable() below
6955 * to tell the hw how much work has been processed,
6956 * so we must read it before checking for more work.
6958 tnapi->last_tag = sblk->status_tag;
6959 tnapi->last_irq_tag = tnapi->last_tag;
6962 sblk->status &= ~SD_STATUS_UPDATED;
6964 if (likely(!tg3_has_work(tnapi))) {
6965 napi_complete(napi);
6966 tg3_int_reenable(tnapi);
6974 /* work_done is guaranteed to be less than budget. */
6975 napi_complete(napi);
6976 tg3_reset_task_schedule(tp);
6980 static void tg3_napi_disable(struct tg3 *tp)
6984 for (i = tp->irq_cnt - 1; i >= 0; i--)
6985 napi_disable(&tp->napi[i].napi);
6988 static void tg3_napi_enable(struct tg3 *tp)
6992 for (i = 0; i < tp->irq_cnt; i++)
6993 napi_enable(&tp->napi[i].napi);
6996 static void tg3_napi_init(struct tg3 *tp)
7000 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7001 for (i = 1; i < tp->irq_cnt; i++)
7002 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7005 static void tg3_napi_fini(struct tg3 *tp)
7009 for (i = 0; i < tp->irq_cnt; i++)
7010 netif_napi_del(&tp->napi[i].napi);
7013 static inline void tg3_netif_stop(struct tg3 *tp)
7015 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7016 tg3_napi_disable(tp);
7017 netif_carrier_off(tp->dev);
7018 netif_tx_disable(tp->dev);
7021 /* tp->lock must be held */
7022 static inline void tg3_netif_start(struct tg3 *tp)
7026 /* NOTE: unconditional netif_tx_wake_all_queues is only
7027 * appropriate so long as all callers are assured to
7028 * have free tx slots (such as after tg3_init_hw)
7030 netif_tx_wake_all_queues(tp->dev);
7033 netif_carrier_on(tp->dev);
7035 tg3_napi_enable(tp);
7036 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7037 tg3_enable_ints(tp);
7040 static void tg3_irq_quiesce(struct tg3 *tp)
7044 BUG_ON(tp->irq_sync);
7049 for (i = 0; i < tp->irq_cnt; i++)
7050 synchronize_irq(tp->napi[i].irq_vec);
7053 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7054 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7055 * with as well. Most of the time, this is not necessary except when
7056 * shutting down the device.
7058 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7060 spin_lock_bh(&tp->lock);
7062 tg3_irq_quiesce(tp);
7065 static inline void tg3_full_unlock(struct tg3 *tp)
7067 spin_unlock_bh(&tp->lock);
7070 /* One-shot MSI handler - Chip automatically disables interrupt
7071 * after sending MSI so driver doesn't have to do it.
7073 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7075 struct tg3_napi *tnapi = dev_id;
7076 struct tg3 *tp = tnapi->tp;
7078 prefetch(tnapi->hw_status);
7080 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7082 if (likely(!tg3_irq_sync(tp)))
7083 napi_schedule(&tnapi->napi);
7088 /* MSI ISR - No need to check for interrupt sharing and no need to
7089 * flush status block and interrupt mailbox. PCI ordering rules
7090 * guarantee that MSI will arrive after the status block.
7092 static irqreturn_t tg3_msi(int irq, void *dev_id)
7094 struct tg3_napi *tnapi = dev_id;
7095 struct tg3 *tp = tnapi->tp;
7097 prefetch(tnapi->hw_status);
7099 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7101 * Writing any value to intr-mbox-0 clears PCI INTA# and
7102 * chip-internal interrupt pending events.
7103 * Writing non-zero to intr-mbox-0 additional tells the
7104 * NIC to stop sending us irqs, engaging "in-intr-handler"
7107 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7108 if (likely(!tg3_irq_sync(tp)))
7109 napi_schedule(&tnapi->napi);
7111 return IRQ_RETVAL(1);
7114 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7116 struct tg3_napi *tnapi = dev_id;
7117 struct tg3 *tp = tnapi->tp;
7118 struct tg3_hw_status *sblk = tnapi->hw_status;
7119 unsigned int handled = 1;
7121 /* In INTx mode, it is possible for the interrupt to arrive at
7122 * the CPU before the status block posted prior to the interrupt.
7123 * Reading the PCI State register will confirm whether the
7124 * interrupt is ours and will flush the status block.
7126 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7127 if (tg3_flag(tp, CHIP_RESETTING) ||
7128 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7135 * Writing any value to intr-mbox-0 clears PCI INTA# and
7136 * chip-internal interrupt pending events.
7137 * Writing non-zero to intr-mbox-0 additional tells the
7138 * NIC to stop sending us irqs, engaging "in-intr-handler"
7141 * Flush the mailbox to de-assert the IRQ immediately to prevent
7142 * spurious interrupts. The flush impacts performance but
7143 * excessive spurious interrupts can be worse in some cases.
7145 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7146 if (tg3_irq_sync(tp))
7148 sblk->status &= ~SD_STATUS_UPDATED;
7149 if (likely(tg3_has_work(tnapi))) {
7150 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7151 napi_schedule(&tnapi->napi);
7153 /* No work, shared interrupt perhaps? re-enable
7154 * interrupts, and flush that PCI write
7156 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7160 return IRQ_RETVAL(handled);
7163 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7165 struct tg3_napi *tnapi = dev_id;
7166 struct tg3 *tp = tnapi->tp;
7167 struct tg3_hw_status *sblk = tnapi->hw_status;
7168 unsigned int handled = 1;
7170 /* In INTx mode, it is possible for the interrupt to arrive at
7171 * the CPU before the status block posted prior to the interrupt.
7172 * Reading the PCI State register will confirm whether the
7173 * interrupt is ours and will flush the status block.
7175 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7176 if (tg3_flag(tp, CHIP_RESETTING) ||
7177 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7184 * writing any value to intr-mbox-0 clears PCI INTA# and
7185 * chip-internal interrupt pending events.
7186 * writing non-zero to intr-mbox-0 additional tells the
7187 * NIC to stop sending us irqs, engaging "in-intr-handler"
7190 * Flush the mailbox to de-assert the IRQ immediately to prevent
7191 * spurious interrupts. The flush impacts performance but
7192 * excessive spurious interrupts can be worse in some cases.
7194 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7197 * In a shared interrupt configuration, sometimes other devices'
7198 * interrupts will scream. We record the current status tag here
7199 * so that the above check can report that the screaming interrupts
7200 * are unhandled. Eventually they will be silenced.
7202 tnapi->last_irq_tag = sblk->status_tag;
7204 if (tg3_irq_sync(tp))
7207 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7209 napi_schedule(&tnapi->napi);
7212 return IRQ_RETVAL(handled);
7215 /* ISR for interrupt test */
7216 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7218 struct tg3_napi *tnapi = dev_id;
7219 struct tg3 *tp = tnapi->tp;
7220 struct tg3_hw_status *sblk = tnapi->hw_status;
7222 if ((sblk->status & SD_STATUS_UPDATED) ||
7223 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7224 tg3_disable_ints(tp);
7225 return IRQ_RETVAL(1);
7227 return IRQ_RETVAL(0);
7230 #ifdef CONFIG_NET_POLL_CONTROLLER
7231 static void tg3_poll_controller(struct net_device *dev)
7234 struct tg3 *tp = netdev_priv(dev);
7236 if (tg3_irq_sync(tp))
7239 for (i = 0; i < tp->irq_cnt; i++)
7240 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7244 static void tg3_tx_timeout(struct net_device *dev)
7246 struct tg3 *tp = netdev_priv(dev);
7248 if (netif_msg_tx_err(tp)) {
7249 netdev_err(dev, "transmit timed out, resetting\n");
7253 tg3_reset_task_schedule(tp);
7256 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7257 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7259 u32 base = (u32) mapping & 0xffffffff;
7261 return (base > 0xffffdcc0) && (base + len + 8 < base);
7264 /* Test for DMA addresses > 40-bit */
7265 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7268 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7269 if (tg3_flag(tp, 40BIT_DMA_BUG))
7270 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7277 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7278 dma_addr_t mapping, u32 len, u32 flags,
7281 txbd->addr_hi = ((u64) mapping >> 32);
7282 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7283 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7284 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7287 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7288 dma_addr_t map, u32 len, u32 flags,
7291 struct tg3 *tp = tnapi->tp;
7294 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7297 if (tg3_4g_overflow_test(map, len))
7300 if (tg3_40bit_overflow_test(tp, map, len))
7303 if (tp->dma_limit) {
7304 u32 prvidx = *entry;
7305 u32 tmp_flag = flags & ~TXD_FLAG_END;
7306 while (len > tp->dma_limit && *budget) {
7307 u32 frag_len = tp->dma_limit;
7308 len -= tp->dma_limit;
7310 /* Avoid the 8byte DMA problem */
7312 len += tp->dma_limit / 2;
7313 frag_len = tp->dma_limit / 2;
7316 tnapi->tx_buffers[*entry].fragmented = true;
7318 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7319 frag_len, tmp_flag, mss, vlan);
7322 *entry = NEXT_TX(*entry);
7329 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7330 len, flags, mss, vlan);
7332 *entry = NEXT_TX(*entry);
7335 tnapi->tx_buffers[prvidx].fragmented = false;
7339 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7340 len, flags, mss, vlan);
7341 *entry = NEXT_TX(*entry);
7347 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7350 struct sk_buff *skb;
7351 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7356 pci_unmap_single(tnapi->tp->pdev,
7357 dma_unmap_addr(txb, mapping),
7361 while (txb->fragmented) {
7362 txb->fragmented = false;
7363 entry = NEXT_TX(entry);
7364 txb = &tnapi->tx_buffers[entry];
7367 for (i = 0; i <= last; i++) {
7368 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7370 entry = NEXT_TX(entry);
7371 txb = &tnapi->tx_buffers[entry];
7373 pci_unmap_page(tnapi->tp->pdev,
7374 dma_unmap_addr(txb, mapping),
7375 skb_frag_size(frag), PCI_DMA_TODEVICE);
7377 while (txb->fragmented) {
7378 txb->fragmented = false;
7379 entry = NEXT_TX(entry);
7380 txb = &tnapi->tx_buffers[entry];
7385 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7386 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7387 struct sk_buff **pskb,
7388 u32 *entry, u32 *budget,
7389 u32 base_flags, u32 mss, u32 vlan)
7391 struct tg3 *tp = tnapi->tp;
7392 struct sk_buff *new_skb, *skb = *pskb;
7393 dma_addr_t new_addr = 0;
7396 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7397 new_skb = skb_copy(skb, GFP_ATOMIC);
7399 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7401 new_skb = skb_copy_expand(skb,
7402 skb_headroom(skb) + more_headroom,
7403 skb_tailroom(skb), GFP_ATOMIC);
7409 /* New SKB is guaranteed to be linear. */
7410 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7412 /* Make sure the mapping succeeded */
7413 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7414 dev_kfree_skb(new_skb);
7417 u32 save_entry = *entry;
7419 base_flags |= TXD_FLAG_END;
7421 tnapi->tx_buffers[*entry].skb = new_skb;
7422 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7425 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7426 new_skb->len, base_flags,
7428 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7429 dev_kfree_skb(new_skb);
7440 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7442 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7443 * TSO header is greater than 80 bytes.
7445 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7447 struct sk_buff *segs, *nskb;
7448 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7450 /* Estimate the number of fragments in the worst case */
7451 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7452 netif_stop_queue(tp->dev);
7454 /* netif_tx_stop_queue() must be done before checking
7455 * checking tx index in tg3_tx_avail() below, because in
7456 * tg3_tx(), we update tx index before checking for
7457 * netif_tx_queue_stopped().
7460 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7461 return NETDEV_TX_BUSY;
7463 netif_wake_queue(tp->dev);
7466 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7468 goto tg3_tso_bug_end;
7474 tg3_start_xmit(nskb, tp->dev);
7480 return NETDEV_TX_OK;
7483 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7484 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7486 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7488 struct tg3 *tp = netdev_priv(dev);
7489 u32 len, entry, base_flags, mss, vlan = 0;
7491 int i = -1, would_hit_hwbug;
7493 struct tg3_napi *tnapi;
7494 struct netdev_queue *txq;
7497 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7498 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7499 if (tg3_flag(tp, ENABLE_TSS))
7502 budget = tg3_tx_avail(tnapi);
7504 /* We are running in BH disabled context with netif_tx_lock
7505 * and TX reclaim runs via tp->napi.poll inside of a software
7506 * interrupt. Furthermore, IRQ processing runs lockless so we have
7507 * no IRQ context deadlocks to worry about either. Rejoice!
7509 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7510 if (!netif_tx_queue_stopped(txq)) {
7511 netif_tx_stop_queue(txq);
7513 /* This is a hard error, log it. */
7515 "BUG! Tx Ring full when queue awake!\n");
7517 return NETDEV_TX_BUSY;
7520 entry = tnapi->tx_prod;
7522 if (skb->ip_summed == CHECKSUM_PARTIAL)
7523 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7525 mss = skb_shinfo(skb)->gso_size;
7528 u32 tcp_opt_len, hdr_len;
7530 if (skb_header_cloned(skb) &&
7531 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7535 tcp_opt_len = tcp_optlen(skb);
7537 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7539 if (!skb_is_gso_v6(skb)) {
7541 iph->tot_len = htons(mss + hdr_len);
7544 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7545 tg3_flag(tp, TSO_BUG))
7546 return tg3_tso_bug(tp, skb);
7548 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7549 TXD_FLAG_CPU_POST_DMA);
7551 if (tg3_flag(tp, HW_TSO_1) ||
7552 tg3_flag(tp, HW_TSO_2) ||
7553 tg3_flag(tp, HW_TSO_3)) {
7554 tcp_hdr(skb)->check = 0;
7555 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7557 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7562 if (tg3_flag(tp, HW_TSO_3)) {
7563 mss |= (hdr_len & 0xc) << 12;
7565 base_flags |= 0x00000010;
7566 base_flags |= (hdr_len & 0x3e0) << 5;
7567 } else if (tg3_flag(tp, HW_TSO_2))
7568 mss |= hdr_len << 9;
7569 else if (tg3_flag(tp, HW_TSO_1) ||
7570 tg3_asic_rev(tp) == ASIC_REV_5705) {
7571 if (tcp_opt_len || iph->ihl > 5) {
7574 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7575 mss |= (tsflags << 11);
7578 if (tcp_opt_len || iph->ihl > 5) {
7581 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7582 base_flags |= tsflags << 12;
7587 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7588 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7589 base_flags |= TXD_FLAG_JMB_PKT;
7591 if (vlan_tx_tag_present(skb)) {
7592 base_flags |= TXD_FLAG_VLAN;
7593 vlan = vlan_tx_tag_get(skb);
7596 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7597 tg3_flag(tp, TX_TSTAMP_EN)) {
7598 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7599 base_flags |= TXD_FLAG_HWTSTAMP;
7602 len = skb_headlen(skb);
7604 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7605 if (pci_dma_mapping_error(tp->pdev, mapping))
7609 tnapi->tx_buffers[entry].skb = skb;
7610 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7612 would_hit_hwbug = 0;
7614 if (tg3_flag(tp, 5701_DMA_BUG))
7615 would_hit_hwbug = 1;
7617 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7618 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7620 would_hit_hwbug = 1;
7621 } else if (skb_shinfo(skb)->nr_frags > 0) {
7624 if (!tg3_flag(tp, HW_TSO_1) &&
7625 !tg3_flag(tp, HW_TSO_2) &&
7626 !tg3_flag(tp, HW_TSO_3))
7629 /* Now loop through additional data
7630 * fragments, and queue them.
7632 last = skb_shinfo(skb)->nr_frags - 1;
7633 for (i = 0; i <= last; i++) {
7634 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7636 len = skb_frag_size(frag);
7637 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7638 len, DMA_TO_DEVICE);
7640 tnapi->tx_buffers[entry].skb = NULL;
7641 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7643 if (dma_mapping_error(&tp->pdev->dev, mapping))
7647 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7649 ((i == last) ? TXD_FLAG_END : 0),
7651 would_hit_hwbug = 1;
7657 if (would_hit_hwbug) {
7658 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7660 /* If the workaround fails due to memory/mapping
7661 * failure, silently drop this packet.
7663 entry = tnapi->tx_prod;
7664 budget = tg3_tx_avail(tnapi);
7665 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7666 base_flags, mss, vlan))
7670 skb_tx_timestamp(skb);
7671 netdev_tx_sent_queue(txq, skb->len);
7673 /* Sync BD data before updating mailbox */
7676 /* Packets are ready, update Tx producer idx local and on card. */
7677 tw32_tx_mbox(tnapi->prodmbox, entry);
7679 tnapi->tx_prod = entry;
7680 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7681 netif_tx_stop_queue(txq);
7683 /* netif_tx_stop_queue() must be done before checking
7684 * checking tx index in tg3_tx_avail() below, because in
7685 * tg3_tx(), we update tx index before checking for
7686 * netif_tx_queue_stopped().
7689 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7690 netif_tx_wake_queue(txq);
7694 return NETDEV_TX_OK;
7697 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7698 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7703 return NETDEV_TX_OK;
7706 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7709 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7710 MAC_MODE_PORT_MODE_MASK);
7712 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7714 if (!tg3_flag(tp, 5705_PLUS))
7715 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7717 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7718 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7720 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7722 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7724 if (tg3_flag(tp, 5705_PLUS) ||
7725 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7726 tg3_asic_rev(tp) == ASIC_REV_5700)
7727 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7730 tw32(MAC_MODE, tp->mac_mode);
7734 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7736 u32 val, bmcr, mac_mode, ptest = 0;
7738 tg3_phy_toggle_apd(tp, false);
7739 tg3_phy_toggle_automdix(tp, 0);
7741 if (extlpbk && tg3_phy_set_extloopbk(tp))
7744 bmcr = BMCR_FULLDPLX;
7749 bmcr |= BMCR_SPEED100;
7753 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7755 bmcr |= BMCR_SPEED100;
7758 bmcr |= BMCR_SPEED1000;
7763 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7764 tg3_readphy(tp, MII_CTRL1000, &val);
7765 val |= CTL1000_AS_MASTER |
7766 CTL1000_ENABLE_MASTER;
7767 tg3_writephy(tp, MII_CTRL1000, val);
7769 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7770 MII_TG3_FET_PTEST_TRIM_2;
7771 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7774 bmcr |= BMCR_LOOPBACK;
7776 tg3_writephy(tp, MII_BMCR, bmcr);
7778 /* The write needs to be flushed for the FETs */
7779 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7780 tg3_readphy(tp, MII_BMCR, &bmcr);
7784 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7785 tg3_asic_rev(tp) == ASIC_REV_5785) {
7786 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7787 MII_TG3_FET_PTEST_FRC_TX_LINK |
7788 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7790 /* The write needs to be flushed for the AC131 */
7791 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7794 /* Reset to prevent losing 1st rx packet intermittently */
7795 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7796 tg3_flag(tp, 5780_CLASS)) {
7797 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7799 tw32_f(MAC_RX_MODE, tp->rx_mode);
7802 mac_mode = tp->mac_mode &
7803 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7804 if (speed == SPEED_1000)
7805 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7807 mac_mode |= MAC_MODE_PORT_MODE_MII;
7809 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7810 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7812 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7813 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7814 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7815 mac_mode |= MAC_MODE_LINK_POLARITY;
7817 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7818 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7821 tw32(MAC_MODE, mac_mode);
7827 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7829 struct tg3 *tp = netdev_priv(dev);
7831 if (features & NETIF_F_LOOPBACK) {
7832 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7835 spin_lock_bh(&tp->lock);
7836 tg3_mac_loopback(tp, true);
7837 netif_carrier_on(tp->dev);
7838 spin_unlock_bh(&tp->lock);
7839 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7841 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7844 spin_lock_bh(&tp->lock);
7845 tg3_mac_loopback(tp, false);
7846 /* Force link status check */
7847 tg3_setup_phy(tp, 1);
7848 spin_unlock_bh(&tp->lock);
7849 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7853 static netdev_features_t tg3_fix_features(struct net_device *dev,
7854 netdev_features_t features)
7856 struct tg3 *tp = netdev_priv(dev);
7858 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7859 features &= ~NETIF_F_ALL_TSO;
7864 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7866 netdev_features_t changed = dev->features ^ features;
7868 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7869 tg3_set_loopback(dev, features);
7874 static void tg3_rx_prodring_free(struct tg3 *tp,
7875 struct tg3_rx_prodring_set *tpr)
7879 if (tpr != &tp->napi[0].prodring) {
7880 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7881 i = (i + 1) & tp->rx_std_ring_mask)
7882 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7885 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7886 for (i = tpr->rx_jmb_cons_idx;
7887 i != tpr->rx_jmb_prod_idx;
7888 i = (i + 1) & tp->rx_jmb_ring_mask) {
7889 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7897 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7898 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7901 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7902 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7903 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7908 /* Initialize rx rings for packet processing.
7910 * The chip has been shut down and the driver detached from
7911 * the networking, so no interrupts or new tx packets will
7912 * end up in the driver. tp->{tx,}lock are held and thus
7915 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7916 struct tg3_rx_prodring_set *tpr)
7918 u32 i, rx_pkt_dma_sz;
7920 tpr->rx_std_cons_idx = 0;
7921 tpr->rx_std_prod_idx = 0;
7922 tpr->rx_jmb_cons_idx = 0;
7923 tpr->rx_jmb_prod_idx = 0;
7925 if (tpr != &tp->napi[0].prodring) {
7926 memset(&tpr->rx_std_buffers[0], 0,
7927 TG3_RX_STD_BUFF_RING_SIZE(tp));
7928 if (tpr->rx_jmb_buffers)
7929 memset(&tpr->rx_jmb_buffers[0], 0,
7930 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7934 /* Zero out all descriptors. */
7935 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7937 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7938 if (tg3_flag(tp, 5780_CLASS) &&
7939 tp->dev->mtu > ETH_DATA_LEN)
7940 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7941 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7943 /* Initialize invariants of the rings, we only set this
7944 * stuff once. This works because the card does not
7945 * write into the rx buffer posting rings.
7947 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7948 struct tg3_rx_buffer_desc *rxd;
7950 rxd = &tpr->rx_std[i];
7951 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7952 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7953 rxd->opaque = (RXD_OPAQUE_RING_STD |
7954 (i << RXD_OPAQUE_INDEX_SHIFT));
7957 /* Now allocate fresh SKBs for each rx ring. */
7958 for (i = 0; i < tp->rx_pending; i++) {
7959 unsigned int frag_size;
7961 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7963 netdev_warn(tp->dev,
7964 "Using a smaller RX standard ring. Only "
7965 "%d out of %d buffers were allocated "
7966 "successfully\n", i, tp->rx_pending);
7974 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7977 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7979 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7982 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7983 struct tg3_rx_buffer_desc *rxd;
7985 rxd = &tpr->rx_jmb[i].std;
7986 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7987 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7989 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7990 (i << RXD_OPAQUE_INDEX_SHIFT));
7993 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7994 unsigned int frag_size;
7996 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7998 netdev_warn(tp->dev,
7999 "Using a smaller RX jumbo ring. Only %d "
8000 "out of %d buffers were allocated "
8001 "successfully\n", i, tp->rx_jumbo_pending);
8004 tp->rx_jumbo_pending = i;
8013 tg3_rx_prodring_free(tp, tpr);
8017 static void tg3_rx_prodring_fini(struct tg3 *tp,
8018 struct tg3_rx_prodring_set *tpr)
8020 kfree(tpr->rx_std_buffers);
8021 tpr->rx_std_buffers = NULL;
8022 kfree(tpr->rx_jmb_buffers);
8023 tpr->rx_jmb_buffers = NULL;
8025 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8026 tpr->rx_std, tpr->rx_std_mapping);
8030 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8031 tpr->rx_jmb, tpr->rx_jmb_mapping);
8036 static int tg3_rx_prodring_init(struct tg3 *tp,
8037 struct tg3_rx_prodring_set *tpr)
8039 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8041 if (!tpr->rx_std_buffers)
8044 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8045 TG3_RX_STD_RING_BYTES(tp),
8046 &tpr->rx_std_mapping,
8051 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8052 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8054 if (!tpr->rx_jmb_buffers)
8057 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8058 TG3_RX_JMB_RING_BYTES(tp),
8059 &tpr->rx_jmb_mapping,
8068 tg3_rx_prodring_fini(tp, tpr);
8072 /* Free up pending packets in all rx/tx rings.
8074 * The chip has been shut down and the driver detached from
8075 * the networking, so no interrupts or new tx packets will
8076 * end up in the driver. tp->{tx,}lock is not held and we are not
8077 * in an interrupt context and thus may sleep.
8079 static void tg3_free_rings(struct tg3 *tp)
8083 for (j = 0; j < tp->irq_cnt; j++) {
8084 struct tg3_napi *tnapi = &tp->napi[j];
8086 tg3_rx_prodring_free(tp, &tnapi->prodring);
8088 if (!tnapi->tx_buffers)
8091 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8092 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8097 tg3_tx_skb_unmap(tnapi, i,
8098 skb_shinfo(skb)->nr_frags - 1);
8100 dev_kfree_skb_any(skb);
8102 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8106 /* Initialize tx/rx rings for packet processing.
8108 * The chip has been shut down and the driver detached from
8109 * the networking, so no interrupts or new tx packets will
8110 * end up in the driver. tp->{tx,}lock are held and thus
8113 static int tg3_init_rings(struct tg3 *tp)
8117 /* Free up all the SKBs. */
8120 for (i = 0; i < tp->irq_cnt; i++) {
8121 struct tg3_napi *tnapi = &tp->napi[i];
8123 tnapi->last_tag = 0;
8124 tnapi->last_irq_tag = 0;
8125 tnapi->hw_status->status = 0;
8126 tnapi->hw_status->status_tag = 0;
8127 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8132 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8134 tnapi->rx_rcb_ptr = 0;
8136 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8138 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8147 static void tg3_mem_tx_release(struct tg3 *tp)
8151 for (i = 0; i < tp->irq_max; i++) {
8152 struct tg3_napi *tnapi = &tp->napi[i];
8154 if (tnapi->tx_ring) {
8155 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8156 tnapi->tx_ring, tnapi->tx_desc_mapping);
8157 tnapi->tx_ring = NULL;
8160 kfree(tnapi->tx_buffers);
8161 tnapi->tx_buffers = NULL;
8165 static int tg3_mem_tx_acquire(struct tg3 *tp)
8168 struct tg3_napi *tnapi = &tp->napi[0];
8170 /* If multivector TSS is enabled, vector 0 does not handle
8171 * tx interrupts. Don't allocate any resources for it.
8173 if (tg3_flag(tp, ENABLE_TSS))
8176 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8177 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8178 TG3_TX_RING_SIZE, GFP_KERNEL);
8179 if (!tnapi->tx_buffers)
8182 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8184 &tnapi->tx_desc_mapping,
8186 if (!tnapi->tx_ring)
8193 tg3_mem_tx_release(tp);
8197 static void tg3_mem_rx_release(struct tg3 *tp)
8201 for (i = 0; i < tp->irq_max; i++) {
8202 struct tg3_napi *tnapi = &tp->napi[i];
8204 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8209 dma_free_coherent(&tp->pdev->dev,
8210 TG3_RX_RCB_RING_BYTES(tp),
8212 tnapi->rx_rcb_mapping);
8213 tnapi->rx_rcb = NULL;
8217 static int tg3_mem_rx_acquire(struct tg3 *tp)
8219 unsigned int i, limit;
8221 limit = tp->rxq_cnt;
8223 /* If RSS is enabled, we need a (dummy) producer ring
8224 * set on vector zero. This is the true hw prodring.
8226 if (tg3_flag(tp, ENABLE_RSS))
8229 for (i = 0; i < limit; i++) {
8230 struct tg3_napi *tnapi = &tp->napi[i];
8232 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8235 /* If multivector RSS is enabled, vector 0
8236 * does not handle rx or tx interrupts.
8237 * Don't allocate any resources for it.
8239 if (!i && tg3_flag(tp, ENABLE_RSS))
8242 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8243 TG3_RX_RCB_RING_BYTES(tp),
8244 &tnapi->rx_rcb_mapping,
8245 GFP_KERNEL | __GFP_ZERO);
8253 tg3_mem_rx_release(tp);
8258 * Must not be invoked with interrupt sources disabled and
8259 * the hardware shutdown down.
8261 static void tg3_free_consistent(struct tg3 *tp)
8265 for (i = 0; i < tp->irq_cnt; i++) {
8266 struct tg3_napi *tnapi = &tp->napi[i];
8268 if (tnapi->hw_status) {
8269 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8271 tnapi->status_mapping);
8272 tnapi->hw_status = NULL;
8276 tg3_mem_rx_release(tp);
8277 tg3_mem_tx_release(tp);
8280 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8281 tp->hw_stats, tp->stats_mapping);
8282 tp->hw_stats = NULL;
8287 * Must not be invoked with interrupt sources disabled and
8288 * the hardware shutdown down. Can sleep.
8290 static int tg3_alloc_consistent(struct tg3 *tp)
8294 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8295 sizeof(struct tg3_hw_stats),
8297 GFP_KERNEL | __GFP_ZERO);
8301 for (i = 0; i < tp->irq_cnt; i++) {
8302 struct tg3_napi *tnapi = &tp->napi[i];
8303 struct tg3_hw_status *sblk;
8305 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8307 &tnapi->status_mapping,
8308 GFP_KERNEL | __GFP_ZERO);
8309 if (!tnapi->hw_status)
8312 sblk = tnapi->hw_status;
8314 if (tg3_flag(tp, ENABLE_RSS)) {
8315 u16 *prodptr = NULL;
8318 * When RSS is enabled, the status block format changes
8319 * slightly. The "rx_jumbo_consumer", "reserved",
8320 * and "rx_mini_consumer" members get mapped to the
8321 * other three rx return ring producer indexes.
8325 prodptr = &sblk->idx[0].rx_producer;
8328 prodptr = &sblk->rx_jumbo_consumer;
8331 prodptr = &sblk->reserved;
8334 prodptr = &sblk->rx_mini_consumer;
8337 tnapi->rx_rcb_prod_idx = prodptr;
8339 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8343 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8349 tg3_free_consistent(tp);
8353 #define MAX_WAIT_CNT 1000
8355 /* To stop a block, clear the enable bit and poll till it
8356 * clears. tp->lock is held.
8358 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8363 if (tg3_flag(tp, 5705_PLUS)) {
8370 /* We can't enable/disable these bits of the
8371 * 5705/5750, just say success.
8384 for (i = 0; i < MAX_WAIT_CNT; i++) {
8387 if ((val & enable_bit) == 0)
8391 if (i == MAX_WAIT_CNT && !silent) {
8392 dev_err(&tp->pdev->dev,
8393 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8401 /* tp->lock is held. */
8402 static int tg3_abort_hw(struct tg3 *tp, int silent)
8406 tg3_disable_ints(tp);
8408 tp->rx_mode &= ~RX_MODE_ENABLE;
8409 tw32_f(MAC_RX_MODE, tp->rx_mode);
8412 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8413 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8414 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8415 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8416 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8417 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8419 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8420 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8421 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8422 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8423 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8424 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8425 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8427 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8428 tw32_f(MAC_MODE, tp->mac_mode);
8431 tp->tx_mode &= ~TX_MODE_ENABLE;
8432 tw32_f(MAC_TX_MODE, tp->tx_mode);
8434 for (i = 0; i < MAX_WAIT_CNT; i++) {
8436 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8439 if (i >= MAX_WAIT_CNT) {
8440 dev_err(&tp->pdev->dev,
8441 "%s timed out, TX_MODE_ENABLE will not clear "
8442 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8446 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8447 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8448 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8450 tw32(FTQ_RESET, 0xffffffff);
8451 tw32(FTQ_RESET, 0x00000000);
8453 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8454 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8456 for (i = 0; i < tp->irq_cnt; i++) {
8457 struct tg3_napi *tnapi = &tp->napi[i];
8458 if (tnapi->hw_status)
8459 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8465 /* Save PCI command register before chip reset */
8466 static void tg3_save_pci_state(struct tg3 *tp)
8468 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8471 /* Restore PCI state after chip reset */
8472 static void tg3_restore_pci_state(struct tg3 *tp)
8476 /* Re-enable indirect register accesses. */
8477 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8478 tp->misc_host_ctrl);
8480 /* Set MAX PCI retry to zero. */
8481 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8482 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8483 tg3_flag(tp, PCIX_MODE))
8484 val |= PCISTATE_RETRY_SAME_DMA;
8485 /* Allow reads and writes to the APE register and memory space. */
8486 if (tg3_flag(tp, ENABLE_APE))
8487 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8488 PCISTATE_ALLOW_APE_SHMEM_WR |
8489 PCISTATE_ALLOW_APE_PSPACE_WR;
8490 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8492 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8494 if (!tg3_flag(tp, PCI_EXPRESS)) {
8495 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8496 tp->pci_cacheline_sz);
8497 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8501 /* Make sure PCI-X relaxed ordering bit is clear. */
8502 if (tg3_flag(tp, PCIX_MODE)) {
8505 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8507 pcix_cmd &= ~PCI_X_CMD_ERO;
8508 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8512 if (tg3_flag(tp, 5780_CLASS)) {
8514 /* Chip reset on 5780 will reset MSI enable bit,
8515 * so need to restore it.
8517 if (tg3_flag(tp, USING_MSI)) {
8520 pci_read_config_word(tp->pdev,
8521 tp->msi_cap + PCI_MSI_FLAGS,
8523 pci_write_config_word(tp->pdev,
8524 tp->msi_cap + PCI_MSI_FLAGS,
8525 ctrl | PCI_MSI_FLAGS_ENABLE);
8526 val = tr32(MSGINT_MODE);
8527 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8532 /* tp->lock is held. */
8533 static int tg3_chip_reset(struct tg3 *tp)
8536 void (*write_op)(struct tg3 *, u32, u32);
8541 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8543 /* No matching tg3_nvram_unlock() after this because
8544 * chip reset below will undo the nvram lock.
8546 tp->nvram_lock_cnt = 0;
8548 /* GRC_MISC_CFG core clock reset will clear the memory
8549 * enable bit in PCI register 4 and the MSI enable bit
8550 * on some chips, so we save relevant registers here.
8552 tg3_save_pci_state(tp);
8554 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8555 tg3_flag(tp, 5755_PLUS))
8556 tw32(GRC_FASTBOOT_PC, 0);
8559 * We must avoid the readl() that normally takes place.
8560 * It locks machines, causes machine checks, and other
8561 * fun things. So, temporarily disable the 5701
8562 * hardware workaround, while we do the reset.
8564 write_op = tp->write32;
8565 if (write_op == tg3_write_flush_reg32)
8566 tp->write32 = tg3_write32;
8568 /* Prevent the irq handler from reading or writing PCI registers
8569 * during chip reset when the memory enable bit in the PCI command
8570 * register may be cleared. The chip does not generate interrupt
8571 * at this time, but the irq handler may still be called due to irq
8572 * sharing or irqpoll.
8574 tg3_flag_set(tp, CHIP_RESETTING);
8575 for (i = 0; i < tp->irq_cnt; i++) {
8576 struct tg3_napi *tnapi = &tp->napi[i];
8577 if (tnapi->hw_status) {
8578 tnapi->hw_status->status = 0;
8579 tnapi->hw_status->status_tag = 0;
8581 tnapi->last_tag = 0;
8582 tnapi->last_irq_tag = 0;
8586 for (i = 0; i < tp->irq_cnt; i++)
8587 synchronize_irq(tp->napi[i].irq_vec);
8589 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8590 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8591 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8595 val = GRC_MISC_CFG_CORECLK_RESET;
8597 if (tg3_flag(tp, PCI_EXPRESS)) {
8598 /* Force PCIe 1.0a mode */
8599 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8600 !tg3_flag(tp, 57765_PLUS) &&
8601 tr32(TG3_PCIE_PHY_TSTCTL) ==
8602 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8603 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8605 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8606 tw32(GRC_MISC_CFG, (1 << 29));
8611 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8612 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8613 tw32(GRC_VCPU_EXT_CTRL,
8614 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8617 /* Manage gphy power for all CPMU absent PCIe devices. */
8618 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8619 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8621 tw32(GRC_MISC_CFG, val);
8623 /* restore 5701 hardware bug workaround write method */
8624 tp->write32 = write_op;
8626 /* Unfortunately, we have to delay before the PCI read back.
8627 * Some 575X chips even will not respond to a PCI cfg access
8628 * when the reset command is given to the chip.
8630 * How do these hardware designers expect things to work
8631 * properly if the PCI write is posted for a long period
8632 * of time? It is always necessary to have some method by
8633 * which a register read back can occur to push the write
8634 * out which does the reset.
8636 * For most tg3 variants the trick below was working.
8641 /* Flush PCI posted writes. The normal MMIO registers
8642 * are inaccessible at this time so this is the only
8643 * way to make this reliably (actually, this is no longer
8644 * the case, see above). I tried to use indirect
8645 * register read/write but this upset some 5701 variants.
8647 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8651 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8654 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8658 /* Wait for link training to complete. */
8659 for (j = 0; j < 5000; j++)
8662 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8663 pci_write_config_dword(tp->pdev, 0xc4,
8664 cfg_val | (1 << 15));
8667 /* Clear the "no snoop" and "relaxed ordering" bits. */
8668 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8670 * Older PCIe devices only support the 128 byte
8671 * MPS setting. Enforce the restriction.
8673 if (!tg3_flag(tp, CPMU_PRESENT))
8674 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8675 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8677 /* Clear error status */
8678 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8679 PCI_EXP_DEVSTA_CED |
8680 PCI_EXP_DEVSTA_NFED |
8681 PCI_EXP_DEVSTA_FED |
8682 PCI_EXP_DEVSTA_URD);
8685 tg3_restore_pci_state(tp);
8687 tg3_flag_clear(tp, CHIP_RESETTING);
8688 tg3_flag_clear(tp, ERROR_PROCESSED);
8691 if (tg3_flag(tp, 5780_CLASS))
8692 val = tr32(MEMARB_MODE);
8693 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8695 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8697 tw32(0x5000, 0x400);
8700 if (tg3_flag(tp, IS_SSB_CORE)) {
8702 * BCM4785: In order to avoid repercussions from using
8703 * potentially defective internal ROM, stop the Rx RISC CPU,
8704 * which is not required.
8707 tg3_halt_cpu(tp, RX_CPU_BASE);
8710 tw32(GRC_MODE, tp->grc_mode);
8712 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8715 tw32(0xc4, val | (1 << 15));
8718 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8719 tg3_asic_rev(tp) == ASIC_REV_5705) {
8720 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8721 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8722 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8723 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8726 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8727 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8729 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8730 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8735 tw32_f(MAC_MODE, val);
8738 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8740 err = tg3_poll_fw(tp);
8746 if (tg3_flag(tp, PCI_EXPRESS) &&
8747 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8748 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8749 !tg3_flag(tp, 57765_PLUS)) {
8752 tw32(0x7c00, val | (1 << 25));
8755 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8756 val = tr32(TG3_CPMU_CLCK_ORIDE);
8757 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8760 /* Reprobe ASF enable state. */
8761 tg3_flag_clear(tp, ENABLE_ASF);
8762 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8763 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8765 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8766 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8767 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8770 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8771 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8772 tg3_flag_set(tp, ENABLE_ASF);
8773 tp->last_event_jiffies = jiffies;
8774 if (tg3_flag(tp, 5750_PLUS))
8775 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8777 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8778 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8779 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8780 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8781 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8788 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8789 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8791 /* tp->lock is held. */
8792 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8798 tg3_write_sig_pre_reset(tp, kind);
8800 tg3_abort_hw(tp, silent);
8801 err = tg3_chip_reset(tp);
8803 __tg3_set_mac_addr(tp, 0);
8805 tg3_write_sig_legacy(tp, kind);
8806 tg3_write_sig_post_reset(tp, kind);
8809 /* Save the stats across chip resets... */
8810 tg3_get_nstats(tp, &tp->net_stats_prev);
8811 tg3_get_estats(tp, &tp->estats_prev);
8813 /* And make sure the next sample is new data */
8814 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8823 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8825 struct tg3 *tp = netdev_priv(dev);
8826 struct sockaddr *addr = p;
8827 int err = 0, skip_mac_1 = 0;
8829 if (!is_valid_ether_addr(addr->sa_data))
8830 return -EADDRNOTAVAIL;
8832 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8834 if (!netif_running(dev))
8837 if (tg3_flag(tp, ENABLE_ASF)) {
8838 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8840 addr0_high = tr32(MAC_ADDR_0_HIGH);
8841 addr0_low = tr32(MAC_ADDR_0_LOW);
8842 addr1_high = tr32(MAC_ADDR_1_HIGH);
8843 addr1_low = tr32(MAC_ADDR_1_LOW);
8845 /* Skip MAC addr 1 if ASF is using it. */
8846 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8847 !(addr1_high == 0 && addr1_low == 0))
8850 spin_lock_bh(&tp->lock);
8851 __tg3_set_mac_addr(tp, skip_mac_1);
8852 spin_unlock_bh(&tp->lock);
8857 /* tp->lock is held. */
8858 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8859 dma_addr_t mapping, u32 maxlen_flags,
8863 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8864 ((u64) mapping >> 32));
8866 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8867 ((u64) mapping & 0xffffffff));
8869 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8872 if (!tg3_flag(tp, 5705_PLUS))
8874 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8879 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8883 if (!tg3_flag(tp, ENABLE_TSS)) {
8884 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8885 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8886 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8888 tw32(HOSTCC_TXCOL_TICKS, 0);
8889 tw32(HOSTCC_TXMAX_FRAMES, 0);
8890 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8892 for (; i < tp->txq_cnt; i++) {
8895 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8896 tw32(reg, ec->tx_coalesce_usecs);
8897 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8898 tw32(reg, ec->tx_max_coalesced_frames);
8899 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8900 tw32(reg, ec->tx_max_coalesced_frames_irq);
8904 for (; i < tp->irq_max - 1; i++) {
8905 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8906 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8907 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8911 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8914 u32 limit = tp->rxq_cnt;
8916 if (!tg3_flag(tp, ENABLE_RSS)) {
8917 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8918 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8919 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8922 tw32(HOSTCC_RXCOL_TICKS, 0);
8923 tw32(HOSTCC_RXMAX_FRAMES, 0);
8924 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8927 for (; i < limit; i++) {
8930 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8931 tw32(reg, ec->rx_coalesce_usecs);
8932 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8933 tw32(reg, ec->rx_max_coalesced_frames);
8934 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8935 tw32(reg, ec->rx_max_coalesced_frames_irq);
8938 for (; i < tp->irq_max - 1; i++) {
8939 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8940 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8941 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8945 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8947 tg3_coal_tx_init(tp, ec);
8948 tg3_coal_rx_init(tp, ec);
8950 if (!tg3_flag(tp, 5705_PLUS)) {
8951 u32 val = ec->stats_block_coalesce_usecs;
8953 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8954 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8959 tw32(HOSTCC_STAT_COAL_TICKS, val);
8963 /* tp->lock is held. */
8964 static void tg3_rings_reset(struct tg3 *tp)
8967 u32 stblk, txrcb, rxrcb, limit;
8968 struct tg3_napi *tnapi = &tp->napi[0];
8970 /* Disable all transmit rings but the first. */
8971 if (!tg3_flag(tp, 5705_PLUS))
8972 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8973 else if (tg3_flag(tp, 5717_PLUS))
8974 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8975 else if (tg3_flag(tp, 57765_CLASS) ||
8976 tg3_asic_rev(tp) == ASIC_REV_5762)
8977 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8979 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8981 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8982 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8983 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8984 BDINFO_FLAGS_DISABLED);
8987 /* Disable all receive return rings but the first. */
8988 if (tg3_flag(tp, 5717_PLUS))
8989 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8990 else if (!tg3_flag(tp, 5705_PLUS))
8991 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8992 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8993 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8994 tg3_flag(tp, 57765_CLASS))
8995 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8997 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8999 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9000 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9001 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9002 BDINFO_FLAGS_DISABLED);
9004 /* Disable interrupts */
9005 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9006 tp->napi[0].chk_msi_cnt = 0;
9007 tp->napi[0].last_rx_cons = 0;
9008 tp->napi[0].last_tx_cons = 0;
9010 /* Zero mailbox registers. */
9011 if (tg3_flag(tp, SUPPORT_MSIX)) {
9012 for (i = 1; i < tp->irq_max; i++) {
9013 tp->napi[i].tx_prod = 0;
9014 tp->napi[i].tx_cons = 0;
9015 if (tg3_flag(tp, ENABLE_TSS))
9016 tw32_mailbox(tp->napi[i].prodmbox, 0);
9017 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9018 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9019 tp->napi[i].chk_msi_cnt = 0;
9020 tp->napi[i].last_rx_cons = 0;
9021 tp->napi[i].last_tx_cons = 0;
9023 if (!tg3_flag(tp, ENABLE_TSS))
9024 tw32_mailbox(tp->napi[0].prodmbox, 0);
9026 tp->napi[0].tx_prod = 0;
9027 tp->napi[0].tx_cons = 0;
9028 tw32_mailbox(tp->napi[0].prodmbox, 0);
9029 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9032 /* Make sure the NIC-based send BD rings are disabled. */
9033 if (!tg3_flag(tp, 5705_PLUS)) {
9034 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9035 for (i = 0; i < 16; i++)
9036 tw32_tx_mbox(mbox + i * 8, 0);
9039 txrcb = NIC_SRAM_SEND_RCB;
9040 rxrcb = NIC_SRAM_RCV_RET_RCB;
9042 /* Clear status block in ram. */
9043 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9045 /* Set status block DMA address */
9046 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9047 ((u64) tnapi->status_mapping >> 32));
9048 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9049 ((u64) tnapi->status_mapping & 0xffffffff));
9051 if (tnapi->tx_ring) {
9052 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9053 (TG3_TX_RING_SIZE <<
9054 BDINFO_FLAGS_MAXLEN_SHIFT),
9055 NIC_SRAM_TX_BUFFER_DESC);
9056 txrcb += TG3_BDINFO_SIZE;
9059 if (tnapi->rx_rcb) {
9060 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9061 (tp->rx_ret_ring_mask + 1) <<
9062 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9063 rxrcb += TG3_BDINFO_SIZE;
9066 stblk = HOSTCC_STATBLCK_RING1;
9068 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9069 u64 mapping = (u64)tnapi->status_mapping;
9070 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9071 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9073 /* Clear status block in ram. */
9074 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9076 if (tnapi->tx_ring) {
9077 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9078 (TG3_TX_RING_SIZE <<
9079 BDINFO_FLAGS_MAXLEN_SHIFT),
9080 NIC_SRAM_TX_BUFFER_DESC);
9081 txrcb += TG3_BDINFO_SIZE;
9084 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9085 ((tp->rx_ret_ring_mask + 1) <<
9086 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9089 rxrcb += TG3_BDINFO_SIZE;
9093 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9095 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9097 if (!tg3_flag(tp, 5750_PLUS) ||
9098 tg3_flag(tp, 5780_CLASS) ||
9099 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9100 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9101 tg3_flag(tp, 57765_PLUS))
9102 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9103 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9104 tg3_asic_rev(tp) == ASIC_REV_5787)
9105 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9107 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9109 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9110 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9112 val = min(nic_rep_thresh, host_rep_thresh);
9113 tw32(RCVBDI_STD_THRESH, val);
9115 if (tg3_flag(tp, 57765_PLUS))
9116 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9118 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9121 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9123 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9125 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9126 tw32(RCVBDI_JUMBO_THRESH, val);
9128 if (tg3_flag(tp, 57765_PLUS))
9129 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9132 static inline u32 calc_crc(unsigned char *buf, int len)
9140 for (j = 0; j < len; j++) {
9143 for (k = 0; k < 8; k++) {
9156 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9158 /* accept or reject all multicast frames */
9159 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9160 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9161 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9162 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9165 static void __tg3_set_rx_mode(struct net_device *dev)
9167 struct tg3 *tp = netdev_priv(dev);
9170 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9171 RX_MODE_KEEP_VLAN_TAG);
9173 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9174 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9177 if (!tg3_flag(tp, ENABLE_ASF))
9178 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9181 if (dev->flags & IFF_PROMISC) {
9182 /* Promiscuous mode. */
9183 rx_mode |= RX_MODE_PROMISC;
9184 } else if (dev->flags & IFF_ALLMULTI) {
9185 /* Accept all multicast. */
9186 tg3_set_multi(tp, 1);
9187 } else if (netdev_mc_empty(dev)) {
9188 /* Reject all multicast. */
9189 tg3_set_multi(tp, 0);
9191 /* Accept one or more multicast(s). */
9192 struct netdev_hw_addr *ha;
9193 u32 mc_filter[4] = { 0, };
9198 netdev_for_each_mc_addr(ha, dev) {
9199 crc = calc_crc(ha->addr, ETH_ALEN);
9201 regidx = (bit & 0x60) >> 5;
9203 mc_filter[regidx] |= (1 << bit);
9206 tw32(MAC_HASH_REG_0, mc_filter[0]);
9207 tw32(MAC_HASH_REG_1, mc_filter[1]);
9208 tw32(MAC_HASH_REG_2, mc_filter[2]);
9209 tw32(MAC_HASH_REG_3, mc_filter[3]);
9212 if (rx_mode != tp->rx_mode) {
9213 tp->rx_mode = rx_mode;
9214 tw32_f(MAC_RX_MODE, rx_mode);
9219 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9223 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9224 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9227 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9231 if (!tg3_flag(tp, SUPPORT_MSIX))
9234 if (tp->rxq_cnt == 1) {
9235 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9239 /* Validate table against current IRQ count */
9240 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9241 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9245 if (i != TG3_RSS_INDIR_TBL_SIZE)
9246 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9249 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9252 u32 reg = MAC_RSS_INDIR_TBL_0;
9254 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9255 u32 val = tp->rss_ind_tbl[i];
9257 for (; i % 8; i++) {
9259 val |= tp->rss_ind_tbl[i];
9266 /* tp->lock is held. */
9267 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9269 u32 val, rdmac_mode;
9271 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9273 tg3_disable_ints(tp);
9277 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9279 if (tg3_flag(tp, INIT_COMPLETE))
9280 tg3_abort_hw(tp, 1);
9282 /* Enable MAC control of LPI */
9283 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9284 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9285 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9286 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9287 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9289 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9291 tw32_f(TG3_CPMU_EEE_CTRL,
9292 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9294 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9295 TG3_CPMU_EEEMD_LPI_IN_TX |
9296 TG3_CPMU_EEEMD_LPI_IN_RX |
9297 TG3_CPMU_EEEMD_EEE_ENABLE;
9299 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9300 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9302 if (tg3_flag(tp, ENABLE_APE))
9303 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9305 tw32_f(TG3_CPMU_EEE_MODE, val);
9307 tw32_f(TG3_CPMU_EEE_DBTMR1,
9308 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9309 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9311 tw32_f(TG3_CPMU_EEE_DBTMR2,
9312 TG3_CPMU_DBTMR2_APE_TX_2047US |
9313 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9319 err = tg3_chip_reset(tp);
9323 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9325 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9326 val = tr32(TG3_CPMU_CTRL);
9327 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9328 tw32(TG3_CPMU_CTRL, val);
9330 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9331 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9332 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9333 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9335 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9336 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9337 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9338 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9340 val = tr32(TG3_CPMU_HST_ACC);
9341 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9342 val |= CPMU_HST_ACC_MACCLK_6_25;
9343 tw32(TG3_CPMU_HST_ACC, val);
9346 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9347 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9348 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9349 PCIE_PWR_MGMT_L1_THRESH_4MS;
9350 tw32(PCIE_PWR_MGMT_THRESH, val);
9352 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9353 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9355 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9357 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9358 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9361 if (tg3_flag(tp, L1PLLPD_EN)) {
9362 u32 grc_mode = tr32(GRC_MODE);
9364 /* Access the lower 1K of PL PCIE block registers. */
9365 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9366 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9368 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9369 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9370 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9372 tw32(GRC_MODE, grc_mode);
9375 if (tg3_flag(tp, 57765_CLASS)) {
9376 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9377 u32 grc_mode = tr32(GRC_MODE);
9379 /* Access the lower 1K of PL PCIE block registers. */
9380 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9381 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9383 val = tr32(TG3_PCIE_TLDLPL_PORT +
9384 TG3_PCIE_PL_LO_PHYCTL5);
9385 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9386 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9388 tw32(GRC_MODE, grc_mode);
9391 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9394 /* Fix transmit hangs */
9395 val = tr32(TG3_CPMU_PADRNG_CTL);
9396 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9397 tw32(TG3_CPMU_PADRNG_CTL, val);
9399 grc_mode = tr32(GRC_MODE);
9401 /* Access the lower 1K of DL PCIE block registers. */
9402 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9403 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9405 val = tr32(TG3_PCIE_TLDLPL_PORT +
9406 TG3_PCIE_DL_LO_FTSMAX);
9407 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9408 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9409 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9411 tw32(GRC_MODE, grc_mode);
9414 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9415 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9416 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9417 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9420 /* This works around an issue with Athlon chipsets on
9421 * B3 tigon3 silicon. This bit has no effect on any
9422 * other revision. But do not set this on PCI Express
9423 * chips and don't even touch the clocks if the CPMU is present.
9425 if (!tg3_flag(tp, CPMU_PRESENT)) {
9426 if (!tg3_flag(tp, PCI_EXPRESS))
9427 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9428 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9431 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9432 tg3_flag(tp, PCIX_MODE)) {
9433 val = tr32(TG3PCI_PCISTATE);
9434 val |= PCISTATE_RETRY_SAME_DMA;
9435 tw32(TG3PCI_PCISTATE, val);
9438 if (tg3_flag(tp, ENABLE_APE)) {
9439 /* Allow reads and writes to the
9440 * APE register and memory space.
9442 val = tr32(TG3PCI_PCISTATE);
9443 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9444 PCISTATE_ALLOW_APE_SHMEM_WR |
9445 PCISTATE_ALLOW_APE_PSPACE_WR;
9446 tw32(TG3PCI_PCISTATE, val);
9449 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9450 /* Enable some hw fixes. */
9451 val = tr32(TG3PCI_MSI_DATA);
9452 val |= (1 << 26) | (1 << 28) | (1 << 29);
9453 tw32(TG3PCI_MSI_DATA, val);
9456 /* Descriptor ring init may make accesses to the
9457 * NIC SRAM area to setup the TX descriptors, so we
9458 * can only do this after the hardware has been
9459 * successfully reset.
9461 err = tg3_init_rings(tp);
9465 if (tg3_flag(tp, 57765_PLUS)) {
9466 val = tr32(TG3PCI_DMA_RW_CTRL) &
9467 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9468 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9469 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9470 if (!tg3_flag(tp, 57765_CLASS) &&
9471 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9472 tg3_asic_rev(tp) != ASIC_REV_5762)
9473 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9474 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9475 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9476 tg3_asic_rev(tp) != ASIC_REV_5761) {
9477 /* This value is determined during the probe time DMA
9478 * engine test, tg3_test_dma.
9480 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9483 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9484 GRC_MODE_4X_NIC_SEND_RINGS |
9485 GRC_MODE_NO_TX_PHDR_CSUM |
9486 GRC_MODE_NO_RX_PHDR_CSUM);
9487 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9489 /* Pseudo-header checksum is done by hardware logic and not
9490 * the offload processers, so make the chip do the pseudo-
9491 * header checksums on receive. For transmit it is more
9492 * convenient to do the pseudo-header checksum in software
9493 * as Linux does that on transmit for us in all cases.
9495 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9497 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9499 tw32(TG3_RX_PTP_CTL,
9500 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9502 if (tg3_flag(tp, PTP_CAPABLE))
9503 val |= GRC_MODE_TIME_SYNC_ENABLE;
9505 tw32(GRC_MODE, tp->grc_mode | val);
9507 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9508 val = tr32(GRC_MISC_CFG);
9510 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9511 tw32(GRC_MISC_CFG, val);
9513 /* Initialize MBUF/DESC pool. */
9514 if (tg3_flag(tp, 5750_PLUS)) {
9516 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9517 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9518 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9519 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9521 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9522 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9523 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9524 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9527 fw_len = tp->fw_len;
9528 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9529 tw32(BUFMGR_MB_POOL_ADDR,
9530 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9531 tw32(BUFMGR_MB_POOL_SIZE,
9532 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9535 if (tp->dev->mtu <= ETH_DATA_LEN) {
9536 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9537 tp->bufmgr_config.mbuf_read_dma_low_water);
9538 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9539 tp->bufmgr_config.mbuf_mac_rx_low_water);
9540 tw32(BUFMGR_MB_HIGH_WATER,
9541 tp->bufmgr_config.mbuf_high_water);
9543 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9544 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9545 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9546 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9547 tw32(BUFMGR_MB_HIGH_WATER,
9548 tp->bufmgr_config.mbuf_high_water_jumbo);
9550 tw32(BUFMGR_DMA_LOW_WATER,
9551 tp->bufmgr_config.dma_low_water);
9552 tw32(BUFMGR_DMA_HIGH_WATER,
9553 tp->bufmgr_config.dma_high_water);
9555 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9556 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9557 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9558 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9559 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9560 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9561 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9562 tw32(BUFMGR_MODE, val);
9563 for (i = 0; i < 2000; i++) {
9564 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9569 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9573 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9574 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9576 tg3_setup_rxbd_thresholds(tp);
9578 /* Initialize TG3_BDINFO's at:
9579 * RCVDBDI_STD_BD: standard eth size rx ring
9580 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9581 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9584 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9585 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9586 * ring attribute flags
9587 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9589 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9590 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9592 * The size of each ring is fixed in the firmware, but the location is
9595 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9596 ((u64) tpr->rx_std_mapping >> 32));
9597 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9598 ((u64) tpr->rx_std_mapping & 0xffffffff));
9599 if (!tg3_flag(tp, 5717_PLUS))
9600 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9601 NIC_SRAM_RX_BUFFER_DESC);
9603 /* Disable the mini ring */
9604 if (!tg3_flag(tp, 5705_PLUS))
9605 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9606 BDINFO_FLAGS_DISABLED);
9608 /* Program the jumbo buffer descriptor ring control
9609 * blocks on those devices that have them.
9611 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9612 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9614 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9615 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9616 ((u64) tpr->rx_jmb_mapping >> 32));
9617 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9618 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9619 val = TG3_RX_JMB_RING_SIZE(tp) <<
9620 BDINFO_FLAGS_MAXLEN_SHIFT;
9621 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9622 val | BDINFO_FLAGS_USE_EXT_RECV);
9623 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9624 tg3_flag(tp, 57765_CLASS) ||
9625 tg3_asic_rev(tp) == ASIC_REV_5762)
9626 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9627 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9629 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9630 BDINFO_FLAGS_DISABLED);
9633 if (tg3_flag(tp, 57765_PLUS)) {
9634 val = TG3_RX_STD_RING_SIZE(tp);
9635 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9636 val |= (TG3_RX_STD_DMA_SZ << 2);
9638 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9640 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9642 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9644 tpr->rx_std_prod_idx = tp->rx_pending;
9645 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9647 tpr->rx_jmb_prod_idx =
9648 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9649 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9651 tg3_rings_reset(tp);
9653 /* Initialize MAC address and backoff seed. */
9654 __tg3_set_mac_addr(tp, 0);
9656 /* MTU + ethernet header + FCS + optional VLAN tag */
9657 tw32(MAC_RX_MTU_SIZE,
9658 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9660 /* The slot time is changed by tg3_setup_phy if we
9661 * run at gigabit with half duplex.
9663 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9664 (6 << TX_LENGTHS_IPG_SHIFT) |
9665 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9667 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9668 tg3_asic_rev(tp) == ASIC_REV_5762)
9669 val |= tr32(MAC_TX_LENGTHS) &
9670 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9671 TX_LENGTHS_CNT_DWN_VAL_MSK);
9673 tw32(MAC_TX_LENGTHS, val);
9675 /* Receive rules. */
9676 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9677 tw32(RCVLPC_CONFIG, 0x0181);
9679 /* Calculate RDMAC_MODE setting early, we need it to determine
9680 * the RCVLPC_STATE_ENABLE mask.
9682 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9683 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9684 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9685 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9686 RDMAC_MODE_LNGREAD_ENAB);
9688 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9689 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9691 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9692 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9693 tg3_asic_rev(tp) == ASIC_REV_57780)
9694 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9695 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9696 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9698 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9699 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9700 if (tg3_flag(tp, TSO_CAPABLE) &&
9701 tg3_asic_rev(tp) == ASIC_REV_5705) {
9702 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9703 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9704 !tg3_flag(tp, IS_5788)) {
9705 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9709 if (tg3_flag(tp, PCI_EXPRESS))
9710 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9712 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9714 if (tp->dev->mtu <= ETH_DATA_LEN) {
9715 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9716 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9720 if (tg3_flag(tp, HW_TSO_1) ||
9721 tg3_flag(tp, HW_TSO_2) ||
9722 tg3_flag(tp, HW_TSO_3))
9723 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9725 if (tg3_flag(tp, 57765_PLUS) ||
9726 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9727 tg3_asic_rev(tp) == ASIC_REV_57780)
9728 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9730 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9731 tg3_asic_rev(tp) == ASIC_REV_5762)
9732 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9734 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9735 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9736 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9737 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9738 tg3_flag(tp, 57765_PLUS)) {
9741 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9742 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9744 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9747 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9748 tg3_asic_rev(tp) == ASIC_REV_5762) {
9749 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9750 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9751 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9752 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9753 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9754 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9756 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9759 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9760 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9761 tg3_asic_rev(tp) == ASIC_REV_5762) {
9764 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9765 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9767 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9771 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9772 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9775 /* Receive/send statistics. */
9776 if (tg3_flag(tp, 5750_PLUS)) {
9777 val = tr32(RCVLPC_STATS_ENABLE);
9778 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9779 tw32(RCVLPC_STATS_ENABLE, val);
9780 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9781 tg3_flag(tp, TSO_CAPABLE)) {
9782 val = tr32(RCVLPC_STATS_ENABLE);
9783 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9784 tw32(RCVLPC_STATS_ENABLE, val);
9786 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9788 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9789 tw32(SNDDATAI_STATSENAB, 0xffffff);
9790 tw32(SNDDATAI_STATSCTRL,
9791 (SNDDATAI_SCTRL_ENABLE |
9792 SNDDATAI_SCTRL_FASTUPD));
9794 /* Setup host coalescing engine. */
9795 tw32(HOSTCC_MODE, 0);
9796 for (i = 0; i < 2000; i++) {
9797 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9802 __tg3_set_coalesce(tp, &tp->coal);
9804 if (!tg3_flag(tp, 5705_PLUS)) {
9805 /* Status/statistics block address. See tg3_timer,
9806 * the tg3_periodic_fetch_stats call there, and
9807 * tg3_get_stats to see how this works for 5705/5750 chips.
9809 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9810 ((u64) tp->stats_mapping >> 32));
9811 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9812 ((u64) tp->stats_mapping & 0xffffffff));
9813 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9815 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9817 /* Clear statistics and status block memory areas */
9818 for (i = NIC_SRAM_STATS_BLK;
9819 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9821 tg3_write_mem(tp, i, 0);
9826 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9828 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9829 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9830 if (!tg3_flag(tp, 5705_PLUS))
9831 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9833 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9834 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9835 /* reset to prevent losing 1st rx packet intermittently */
9836 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9840 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9841 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9842 MAC_MODE_FHDE_ENABLE;
9843 if (tg3_flag(tp, ENABLE_APE))
9844 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9845 if (!tg3_flag(tp, 5705_PLUS) &&
9846 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9847 tg3_asic_rev(tp) != ASIC_REV_5700)
9848 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9849 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9852 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9853 * If TG3_FLAG_IS_NIC is zero, we should read the
9854 * register to preserve the GPIO settings for LOMs. The GPIOs,
9855 * whether used as inputs or outputs, are set by boot code after
9858 if (!tg3_flag(tp, IS_NIC)) {
9861 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9862 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9863 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9865 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9866 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9867 GRC_LCLCTRL_GPIO_OUTPUT3;
9869 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9870 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9872 tp->grc_local_ctrl &= ~gpio_mask;
9873 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9875 /* GPIO1 must be driven high for eeprom write protect */
9876 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9877 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9878 GRC_LCLCTRL_GPIO_OUTPUT1);
9880 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9883 if (tg3_flag(tp, USING_MSIX)) {
9884 val = tr32(MSGINT_MODE);
9885 val |= MSGINT_MODE_ENABLE;
9886 if (tp->irq_cnt > 1)
9887 val |= MSGINT_MODE_MULTIVEC_EN;
9888 if (!tg3_flag(tp, 1SHOT_MSI))
9889 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9890 tw32(MSGINT_MODE, val);
9893 if (!tg3_flag(tp, 5705_PLUS)) {
9894 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9898 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9899 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9900 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9901 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9902 WDMAC_MODE_LNGREAD_ENAB);
9904 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9905 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9906 if (tg3_flag(tp, TSO_CAPABLE) &&
9907 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9908 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9910 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9911 !tg3_flag(tp, IS_5788)) {
9912 val |= WDMAC_MODE_RX_ACCEL;
9916 /* Enable host coalescing bug fix */
9917 if (tg3_flag(tp, 5755_PLUS))
9918 val |= WDMAC_MODE_STATUS_TAG_FIX;
9920 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9921 val |= WDMAC_MODE_BURST_ALL_DATA;
9923 tw32_f(WDMAC_MODE, val);
9926 if (tg3_flag(tp, PCIX_MODE)) {
9929 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9931 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9932 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9933 pcix_cmd |= PCI_X_CMD_READ_2K;
9934 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9935 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9936 pcix_cmd |= PCI_X_CMD_READ_2K;
9938 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9942 tw32_f(RDMAC_MODE, rdmac_mode);
9945 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9946 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9947 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9950 if (i < TG3_NUM_RDMA_CHANNELS) {
9951 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9952 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9953 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9954 tg3_flag_set(tp, 5719_RDMA_BUG);
9958 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9959 if (!tg3_flag(tp, 5705_PLUS))
9960 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9962 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9964 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9966 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9968 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9969 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9970 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9971 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9972 val |= RCVDBDI_MODE_LRG_RING_SZ;
9973 tw32(RCVDBDI_MODE, val);
9974 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9975 if (tg3_flag(tp, HW_TSO_1) ||
9976 tg3_flag(tp, HW_TSO_2) ||
9977 tg3_flag(tp, HW_TSO_3))
9978 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9979 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9980 if (tg3_flag(tp, ENABLE_TSS))
9981 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9982 tw32(SNDBDI_MODE, val);
9983 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9985 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9986 err = tg3_load_5701_a0_firmware_fix(tp);
9991 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9992 /* Ignore any errors for the firmware download. If download
9993 * fails, the device will operate with EEE disabled
9995 tg3_load_57766_firmware(tp);
9998 if (tg3_flag(tp, TSO_CAPABLE)) {
9999 err = tg3_load_tso_firmware(tp);
10004 tp->tx_mode = TX_MODE_ENABLE;
10006 if (tg3_flag(tp, 5755_PLUS) ||
10007 tg3_asic_rev(tp) == ASIC_REV_5906)
10008 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10010 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10011 tg3_asic_rev(tp) == ASIC_REV_5762) {
10012 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10013 tp->tx_mode &= ~val;
10014 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10017 tw32_f(MAC_TX_MODE, tp->tx_mode);
10020 if (tg3_flag(tp, ENABLE_RSS)) {
10021 tg3_rss_write_indir_tbl(tp);
10023 /* Setup the "secret" hash key. */
10024 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10025 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10026 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10027 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10028 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10029 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10030 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10031 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10032 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10033 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10036 tp->rx_mode = RX_MODE_ENABLE;
10037 if (tg3_flag(tp, 5755_PLUS))
10038 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10040 if (tg3_flag(tp, ENABLE_RSS))
10041 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10042 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10043 RX_MODE_RSS_IPV6_HASH_EN |
10044 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10045 RX_MODE_RSS_IPV4_HASH_EN |
10046 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10048 tw32_f(MAC_RX_MODE, tp->rx_mode);
10051 tw32(MAC_LED_CTRL, tp->led_ctrl);
10053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10054 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10055 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10058 tw32_f(MAC_RX_MODE, tp->rx_mode);
10061 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10062 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10063 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10064 /* Set drive transmission level to 1.2V */
10065 /* only if the signal pre-emphasis bit is not set */
10066 val = tr32(MAC_SERDES_CFG);
10069 tw32(MAC_SERDES_CFG, val);
10071 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10072 tw32(MAC_SERDES_CFG, 0x616000);
10075 /* Prevent chip from dropping frames when flow control
10078 if (tg3_flag(tp, 57765_CLASS))
10082 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10084 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10085 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10086 /* Use hardware link auto-negotiation */
10087 tg3_flag_set(tp, HW_AUTONEG);
10090 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10091 tg3_asic_rev(tp) == ASIC_REV_5714) {
10094 tmp = tr32(SERDES_RX_CTRL);
10095 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10096 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10097 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10098 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10101 if (!tg3_flag(tp, USE_PHYLIB)) {
10102 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10103 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10105 err = tg3_setup_phy(tp, 0);
10109 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10110 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10113 /* Clear CRC stats. */
10114 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10115 tg3_writephy(tp, MII_TG3_TEST1,
10116 tmp | MII_TG3_TEST1_CRC_EN);
10117 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10122 __tg3_set_rx_mode(tp->dev);
10124 /* Initialize receive rules. */
10125 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10126 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10127 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10128 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10130 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10134 if (tg3_flag(tp, ENABLE_ASF))
10138 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10140 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10142 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10144 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10146 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10148 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10150 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10152 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10154 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10156 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10158 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10160 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10162 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10164 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10172 if (tg3_flag(tp, ENABLE_APE))
10173 /* Write our heartbeat update interval to APE. */
10174 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10175 APE_HOST_HEARTBEAT_INT_DISABLE);
10177 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10182 /* Called at device open time to get the chip ready for
10183 * packet processing. Invoked with tp->lock held.
10185 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10187 tg3_switch_clocks(tp);
10189 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10191 return tg3_reset_hw(tp, reset_phy);
10194 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10198 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10199 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10201 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10204 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10205 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10206 memset(ocir, 0, TG3_OCIR_LEN);
10210 /* sysfs attributes for hwmon */
10211 static ssize_t tg3_show_temp(struct device *dev,
10212 struct device_attribute *devattr, char *buf)
10214 struct pci_dev *pdev = to_pci_dev(dev);
10215 struct net_device *netdev = pci_get_drvdata(pdev);
10216 struct tg3 *tp = netdev_priv(netdev);
10217 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10220 spin_lock_bh(&tp->lock);
10221 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10222 sizeof(temperature));
10223 spin_unlock_bh(&tp->lock);
10224 return sprintf(buf, "%u\n", temperature);
10228 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10229 TG3_TEMP_SENSOR_OFFSET);
10230 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10231 TG3_TEMP_CAUTION_OFFSET);
10232 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10233 TG3_TEMP_MAX_OFFSET);
10235 static struct attribute *tg3_attributes[] = {
10236 &sensor_dev_attr_temp1_input.dev_attr.attr,
10237 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10238 &sensor_dev_attr_temp1_max.dev_attr.attr,
10242 static const struct attribute_group tg3_group = {
10243 .attrs = tg3_attributes,
10246 static void tg3_hwmon_close(struct tg3 *tp)
10248 if (tp->hwmon_dev) {
10249 hwmon_device_unregister(tp->hwmon_dev);
10250 tp->hwmon_dev = NULL;
10251 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10255 static void tg3_hwmon_open(struct tg3 *tp)
10259 struct pci_dev *pdev = tp->pdev;
10260 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10262 tg3_sd_scan_scratchpad(tp, ocirs);
10264 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10265 if (!ocirs[i].src_data_length)
10268 size += ocirs[i].src_hdr_length;
10269 size += ocirs[i].src_data_length;
10275 /* Register hwmon sysfs hooks */
10276 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10278 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10282 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10283 if (IS_ERR(tp->hwmon_dev)) {
10284 tp->hwmon_dev = NULL;
10285 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10286 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10291 #define TG3_STAT_ADD32(PSTAT, REG) \
10292 do { u32 __val = tr32(REG); \
10293 (PSTAT)->low += __val; \
10294 if ((PSTAT)->low < __val) \
10295 (PSTAT)->high += 1; \
10298 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10300 struct tg3_hw_stats *sp = tp->hw_stats;
10305 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10306 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10307 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10308 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10309 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10310 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10311 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10312 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10313 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10314 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10315 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10316 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10317 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10318 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10319 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10320 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10323 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10324 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10325 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10326 tg3_flag_clear(tp, 5719_RDMA_BUG);
10329 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10330 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10331 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10332 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10333 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10334 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10335 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10336 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10337 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10338 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10339 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10340 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10341 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10342 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10344 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10345 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10346 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10347 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10348 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10350 u32 val = tr32(HOSTCC_FLOW_ATTN);
10351 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10353 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10354 sp->rx_discards.low += val;
10355 if (sp->rx_discards.low < val)
10356 sp->rx_discards.high += 1;
10358 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10360 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10363 static void tg3_chk_missed_msi(struct tg3 *tp)
10367 for (i = 0; i < tp->irq_cnt; i++) {
10368 struct tg3_napi *tnapi = &tp->napi[i];
10370 if (tg3_has_work(tnapi)) {
10371 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10372 tnapi->last_tx_cons == tnapi->tx_cons) {
10373 if (tnapi->chk_msi_cnt < 1) {
10374 tnapi->chk_msi_cnt++;
10380 tnapi->chk_msi_cnt = 0;
10381 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10382 tnapi->last_tx_cons = tnapi->tx_cons;
10386 static void tg3_timer(unsigned long __opaque)
10388 struct tg3 *tp = (struct tg3 *) __opaque;
10390 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10391 goto restart_timer;
10393 spin_lock(&tp->lock);
10395 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10396 tg3_flag(tp, 57765_CLASS))
10397 tg3_chk_missed_msi(tp);
10399 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10400 /* BCM4785: Flush posted writes from GbE to host memory. */
10404 if (!tg3_flag(tp, TAGGED_STATUS)) {
10405 /* All of this garbage is because when using non-tagged
10406 * IRQ status the mailbox/status_block protocol the chip
10407 * uses with the cpu is race prone.
10409 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10410 tw32(GRC_LOCAL_CTRL,
10411 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10413 tw32(HOSTCC_MODE, tp->coalesce_mode |
10414 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10417 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10418 spin_unlock(&tp->lock);
10419 tg3_reset_task_schedule(tp);
10420 goto restart_timer;
10424 /* This part only runs once per second. */
10425 if (!--tp->timer_counter) {
10426 if (tg3_flag(tp, 5705_PLUS))
10427 tg3_periodic_fetch_stats(tp);
10429 if (tp->setlpicnt && !--tp->setlpicnt)
10430 tg3_phy_eee_enable(tp);
10432 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10436 mac_stat = tr32(MAC_STATUS);
10439 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10440 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10442 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10446 tg3_setup_phy(tp, 0);
10447 } else if (tg3_flag(tp, POLL_SERDES)) {
10448 u32 mac_stat = tr32(MAC_STATUS);
10449 int need_setup = 0;
10452 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10455 if (!tp->link_up &&
10456 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10457 MAC_STATUS_SIGNAL_DET))) {
10461 if (!tp->serdes_counter) {
10464 ~MAC_MODE_PORT_MODE_MASK));
10466 tw32_f(MAC_MODE, tp->mac_mode);
10469 tg3_setup_phy(tp, 0);
10471 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10472 tg3_flag(tp, 5780_CLASS)) {
10473 tg3_serdes_parallel_detect(tp);
10476 tp->timer_counter = tp->timer_multiplier;
10479 /* Heartbeat is only sent once every 2 seconds.
10481 * The heartbeat is to tell the ASF firmware that the host
10482 * driver is still alive. In the event that the OS crashes,
10483 * ASF needs to reset the hardware to free up the FIFO space
10484 * that may be filled with rx packets destined for the host.
10485 * If the FIFO is full, ASF will no longer function properly.
10487 * Unintended resets have been reported on real time kernels
10488 * where the timer doesn't run on time. Netpoll will also have
10491 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10492 * to check the ring condition when the heartbeat is expiring
10493 * before doing the reset. This will prevent most unintended
10496 if (!--tp->asf_counter) {
10497 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10498 tg3_wait_for_event_ack(tp);
10500 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10501 FWCMD_NICDRV_ALIVE3);
10502 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10503 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10504 TG3_FW_UPDATE_TIMEOUT_SEC);
10506 tg3_generate_fw_event(tp);
10508 tp->asf_counter = tp->asf_multiplier;
10511 spin_unlock(&tp->lock);
10514 tp->timer.expires = jiffies + tp->timer_offset;
10515 add_timer(&tp->timer);
10518 static void tg3_timer_init(struct tg3 *tp)
10520 if (tg3_flag(tp, TAGGED_STATUS) &&
10521 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10522 !tg3_flag(tp, 57765_CLASS))
10523 tp->timer_offset = HZ;
10525 tp->timer_offset = HZ / 10;
10527 BUG_ON(tp->timer_offset > HZ);
10529 tp->timer_multiplier = (HZ / tp->timer_offset);
10530 tp->asf_multiplier = (HZ / tp->timer_offset) *
10531 TG3_FW_UPDATE_FREQ_SEC;
10533 init_timer(&tp->timer);
10534 tp->timer.data = (unsigned long) tp;
10535 tp->timer.function = tg3_timer;
10538 static void tg3_timer_start(struct tg3 *tp)
10540 tp->asf_counter = tp->asf_multiplier;
10541 tp->timer_counter = tp->timer_multiplier;
10543 tp->timer.expires = jiffies + tp->timer_offset;
10544 add_timer(&tp->timer);
10547 static void tg3_timer_stop(struct tg3 *tp)
10549 del_timer_sync(&tp->timer);
10552 /* Restart hardware after configuration changes, self-test, etc.
10553 * Invoked with tp->lock held.
10555 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10556 __releases(tp->lock)
10557 __acquires(tp->lock)
10561 err = tg3_init_hw(tp, reset_phy);
10563 netdev_err(tp->dev,
10564 "Failed to re-initialize device, aborting\n");
10565 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10566 tg3_full_unlock(tp);
10567 tg3_timer_stop(tp);
10569 tg3_napi_enable(tp);
10570 dev_close(tp->dev);
10571 tg3_full_lock(tp, 0);
10576 static void tg3_reset_task(struct work_struct *work)
10578 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10581 tg3_full_lock(tp, 0);
10583 if (!netif_running(tp->dev)) {
10584 tg3_flag_clear(tp, RESET_TASK_PENDING);
10585 tg3_full_unlock(tp);
10589 tg3_full_unlock(tp);
10593 tg3_netif_stop(tp);
10595 tg3_full_lock(tp, 1);
10597 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10598 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10599 tp->write32_rx_mbox = tg3_write_flush_reg32;
10600 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10601 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10605 err = tg3_init_hw(tp, 1);
10609 tg3_netif_start(tp);
10612 tg3_full_unlock(tp);
10617 tg3_flag_clear(tp, RESET_TASK_PENDING);
10620 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10623 unsigned long flags;
10625 struct tg3_napi *tnapi = &tp->napi[irq_num];
10627 if (tp->irq_cnt == 1)
10628 name = tp->dev->name;
10630 name = &tnapi->irq_lbl[0];
10631 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10632 name[IFNAMSIZ-1] = 0;
10635 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10637 if (tg3_flag(tp, 1SHOT_MSI))
10638 fn = tg3_msi_1shot;
10641 fn = tg3_interrupt;
10642 if (tg3_flag(tp, TAGGED_STATUS))
10643 fn = tg3_interrupt_tagged;
10644 flags = IRQF_SHARED;
10647 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10650 static int tg3_test_interrupt(struct tg3 *tp)
10652 struct tg3_napi *tnapi = &tp->napi[0];
10653 struct net_device *dev = tp->dev;
10654 int err, i, intr_ok = 0;
10657 if (!netif_running(dev))
10660 tg3_disable_ints(tp);
10662 free_irq(tnapi->irq_vec, tnapi);
10665 * Turn off MSI one shot mode. Otherwise this test has no
10666 * observable way to know whether the interrupt was delivered.
10668 if (tg3_flag(tp, 57765_PLUS)) {
10669 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10670 tw32(MSGINT_MODE, val);
10673 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10674 IRQF_SHARED, dev->name, tnapi);
10678 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10679 tg3_enable_ints(tp);
10681 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10684 for (i = 0; i < 5; i++) {
10685 u32 int_mbox, misc_host_ctrl;
10687 int_mbox = tr32_mailbox(tnapi->int_mbox);
10688 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10690 if ((int_mbox != 0) ||
10691 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10696 if (tg3_flag(tp, 57765_PLUS) &&
10697 tnapi->hw_status->status_tag != tnapi->last_tag)
10698 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10703 tg3_disable_ints(tp);
10705 free_irq(tnapi->irq_vec, tnapi);
10707 err = tg3_request_irq(tp, 0);
10713 /* Reenable MSI one shot mode. */
10714 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10715 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10716 tw32(MSGINT_MODE, val);
10724 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10725 * successfully restored
10727 static int tg3_test_msi(struct tg3 *tp)
10732 if (!tg3_flag(tp, USING_MSI))
10735 /* Turn off SERR reporting in case MSI terminates with Master
10738 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10739 pci_write_config_word(tp->pdev, PCI_COMMAND,
10740 pci_cmd & ~PCI_COMMAND_SERR);
10742 err = tg3_test_interrupt(tp);
10744 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10749 /* other failures */
10753 /* MSI test failed, go back to INTx mode */
10754 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10755 "to INTx mode. Please report this failure to the PCI "
10756 "maintainer and include system chipset information\n");
10758 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10760 pci_disable_msi(tp->pdev);
10762 tg3_flag_clear(tp, USING_MSI);
10763 tp->napi[0].irq_vec = tp->pdev->irq;
10765 err = tg3_request_irq(tp, 0);
10769 /* Need to reset the chip because the MSI cycle may have terminated
10770 * with Master Abort.
10772 tg3_full_lock(tp, 1);
10774 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10775 err = tg3_init_hw(tp, 1);
10777 tg3_full_unlock(tp);
10780 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10785 static int tg3_request_firmware(struct tg3 *tp)
10787 const struct tg3_firmware_hdr *fw_hdr;
10789 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10790 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10795 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10797 /* Firmware blob starts with version numbers, followed by
10798 * start address and _full_ length including BSS sections
10799 * (which must be longer than the actual data, of course
10802 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10803 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10804 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10805 tp->fw_len, tp->fw_needed);
10806 release_firmware(tp->fw);
10811 /* We no longer need firmware; we have it. */
10812 tp->fw_needed = NULL;
10816 static u32 tg3_irq_count(struct tg3 *tp)
10818 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10821 /* We want as many rx rings enabled as there are cpus.
10822 * In multiqueue MSI-X mode, the first MSI-X vector
10823 * only deals with link interrupts, etc, so we add
10824 * one to the number of vectors we are requesting.
10826 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10832 static bool tg3_enable_msix(struct tg3 *tp)
10835 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10837 tp->txq_cnt = tp->txq_req;
10838 tp->rxq_cnt = tp->rxq_req;
10840 tp->rxq_cnt = netif_get_num_default_rss_queues();
10841 if (tp->rxq_cnt > tp->rxq_max)
10842 tp->rxq_cnt = tp->rxq_max;
10844 /* Disable multiple TX rings by default. Simple round-robin hardware
10845 * scheduling of the TX rings can cause starvation of rings with
10846 * small packets when other rings have TSO or jumbo packets.
10851 tp->irq_cnt = tg3_irq_count(tp);
10853 for (i = 0; i < tp->irq_max; i++) {
10854 msix_ent[i].entry = i;
10855 msix_ent[i].vector = 0;
10858 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10861 } else if (rc != 0) {
10862 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10864 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10867 tp->rxq_cnt = max(rc - 1, 1);
10869 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10872 for (i = 0; i < tp->irq_max; i++)
10873 tp->napi[i].irq_vec = msix_ent[i].vector;
10875 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10876 pci_disable_msix(tp->pdev);
10880 if (tp->irq_cnt == 1)
10883 tg3_flag_set(tp, ENABLE_RSS);
10885 if (tp->txq_cnt > 1)
10886 tg3_flag_set(tp, ENABLE_TSS);
10888 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10893 static void tg3_ints_init(struct tg3 *tp)
10895 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10896 !tg3_flag(tp, TAGGED_STATUS)) {
10897 /* All MSI supporting chips should support tagged
10898 * status. Assert that this is the case.
10900 netdev_warn(tp->dev,
10901 "MSI without TAGGED_STATUS? Not using MSI\n");
10905 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10906 tg3_flag_set(tp, USING_MSIX);
10907 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10908 tg3_flag_set(tp, USING_MSI);
10910 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10911 u32 msi_mode = tr32(MSGINT_MODE);
10912 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10913 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10914 if (!tg3_flag(tp, 1SHOT_MSI))
10915 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10916 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10919 if (!tg3_flag(tp, USING_MSIX)) {
10921 tp->napi[0].irq_vec = tp->pdev->irq;
10924 if (tp->irq_cnt == 1) {
10927 netif_set_real_num_tx_queues(tp->dev, 1);
10928 netif_set_real_num_rx_queues(tp->dev, 1);
10932 static void tg3_ints_fini(struct tg3 *tp)
10934 if (tg3_flag(tp, USING_MSIX))
10935 pci_disable_msix(tp->pdev);
10936 else if (tg3_flag(tp, USING_MSI))
10937 pci_disable_msi(tp->pdev);
10938 tg3_flag_clear(tp, USING_MSI);
10939 tg3_flag_clear(tp, USING_MSIX);
10940 tg3_flag_clear(tp, ENABLE_RSS);
10941 tg3_flag_clear(tp, ENABLE_TSS);
10944 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10947 struct net_device *dev = tp->dev;
10951 * Setup interrupts first so we know how
10952 * many NAPI resources to allocate
10956 tg3_rss_check_indir_tbl(tp);
10958 /* The placement of this call is tied
10959 * to the setup and use of Host TX descriptors.
10961 err = tg3_alloc_consistent(tp);
10967 tg3_napi_enable(tp);
10969 for (i = 0; i < tp->irq_cnt; i++) {
10970 struct tg3_napi *tnapi = &tp->napi[i];
10971 err = tg3_request_irq(tp, i);
10973 for (i--; i >= 0; i--) {
10974 tnapi = &tp->napi[i];
10975 free_irq(tnapi->irq_vec, tnapi);
10981 tg3_full_lock(tp, 0);
10983 err = tg3_init_hw(tp, reset_phy);
10985 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10986 tg3_free_rings(tp);
10989 tg3_full_unlock(tp);
10994 if (test_irq && tg3_flag(tp, USING_MSI)) {
10995 err = tg3_test_msi(tp);
10998 tg3_full_lock(tp, 0);
10999 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11000 tg3_free_rings(tp);
11001 tg3_full_unlock(tp);
11006 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11007 u32 val = tr32(PCIE_TRANSACTION_CFG);
11009 tw32(PCIE_TRANSACTION_CFG,
11010 val | PCIE_TRANS_CFG_1SHOT_MSI);
11016 tg3_hwmon_open(tp);
11018 tg3_full_lock(tp, 0);
11020 tg3_timer_start(tp);
11021 tg3_flag_set(tp, INIT_COMPLETE);
11022 tg3_enable_ints(tp);
11027 tg3_ptp_resume(tp);
11030 tg3_full_unlock(tp);
11032 netif_tx_start_all_queues(dev);
11035 * Reset loopback feature if it was turned on while the device was down
11036 * make sure that it's installed properly now.
11038 if (dev->features & NETIF_F_LOOPBACK)
11039 tg3_set_loopback(dev, dev->features);
11044 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11045 struct tg3_napi *tnapi = &tp->napi[i];
11046 free_irq(tnapi->irq_vec, tnapi);
11050 tg3_napi_disable(tp);
11052 tg3_free_consistent(tp);
11060 static void tg3_stop(struct tg3 *tp)
11064 tg3_reset_task_cancel(tp);
11065 tg3_netif_stop(tp);
11067 tg3_timer_stop(tp);
11069 tg3_hwmon_close(tp);
11073 tg3_full_lock(tp, 1);
11075 tg3_disable_ints(tp);
11077 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11078 tg3_free_rings(tp);
11079 tg3_flag_clear(tp, INIT_COMPLETE);
11081 tg3_full_unlock(tp);
11083 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11084 struct tg3_napi *tnapi = &tp->napi[i];
11085 free_irq(tnapi->irq_vec, tnapi);
11092 tg3_free_consistent(tp);
11095 static int tg3_open(struct net_device *dev)
11097 struct tg3 *tp = netdev_priv(dev);
11100 if (tp->fw_needed) {
11101 err = tg3_request_firmware(tp);
11102 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11104 netdev_warn(tp->dev, "EEE capability disabled\n");
11105 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11106 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11107 netdev_warn(tp->dev, "EEE capability restored\n");
11108 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11110 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11114 netdev_warn(tp->dev, "TSO capability disabled\n");
11115 tg3_flag_clear(tp, TSO_CAPABLE);
11116 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11117 netdev_notice(tp->dev, "TSO capability restored\n");
11118 tg3_flag_set(tp, TSO_CAPABLE);
11122 tg3_carrier_off(tp);
11124 err = tg3_power_up(tp);
11128 tg3_full_lock(tp, 0);
11130 tg3_disable_ints(tp);
11131 tg3_flag_clear(tp, INIT_COMPLETE);
11133 tg3_full_unlock(tp);
11135 err = tg3_start(tp,
11136 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11139 tg3_frob_aux_power(tp, false);
11140 pci_set_power_state(tp->pdev, PCI_D3hot);
11143 if (tg3_flag(tp, PTP_CAPABLE)) {
11144 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11146 if (IS_ERR(tp->ptp_clock))
11147 tp->ptp_clock = NULL;
11153 static int tg3_close(struct net_device *dev)
11155 struct tg3 *tp = netdev_priv(dev);
11161 /* Clear stats across close / open calls */
11162 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11163 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11165 tg3_power_down(tp);
11167 tg3_carrier_off(tp);
11172 static inline u64 get_stat64(tg3_stat64_t *val)
11174 return ((u64)val->high << 32) | ((u64)val->low);
11177 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11179 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11181 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11182 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11183 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11186 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11187 tg3_writephy(tp, MII_TG3_TEST1,
11188 val | MII_TG3_TEST1_CRC_EN);
11189 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11193 tp->phy_crc_errors += val;
11195 return tp->phy_crc_errors;
11198 return get_stat64(&hw_stats->rx_fcs_errors);
11201 #define ESTAT_ADD(member) \
11202 estats->member = old_estats->member + \
11203 get_stat64(&hw_stats->member)
11205 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11207 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11208 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11210 ESTAT_ADD(rx_octets);
11211 ESTAT_ADD(rx_fragments);
11212 ESTAT_ADD(rx_ucast_packets);
11213 ESTAT_ADD(rx_mcast_packets);
11214 ESTAT_ADD(rx_bcast_packets);
11215 ESTAT_ADD(rx_fcs_errors);
11216 ESTAT_ADD(rx_align_errors);
11217 ESTAT_ADD(rx_xon_pause_rcvd);
11218 ESTAT_ADD(rx_xoff_pause_rcvd);
11219 ESTAT_ADD(rx_mac_ctrl_rcvd);
11220 ESTAT_ADD(rx_xoff_entered);
11221 ESTAT_ADD(rx_frame_too_long_errors);
11222 ESTAT_ADD(rx_jabbers);
11223 ESTAT_ADD(rx_undersize_packets);
11224 ESTAT_ADD(rx_in_length_errors);
11225 ESTAT_ADD(rx_out_length_errors);
11226 ESTAT_ADD(rx_64_or_less_octet_packets);
11227 ESTAT_ADD(rx_65_to_127_octet_packets);
11228 ESTAT_ADD(rx_128_to_255_octet_packets);
11229 ESTAT_ADD(rx_256_to_511_octet_packets);
11230 ESTAT_ADD(rx_512_to_1023_octet_packets);
11231 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11232 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11233 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11234 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11235 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11237 ESTAT_ADD(tx_octets);
11238 ESTAT_ADD(tx_collisions);
11239 ESTAT_ADD(tx_xon_sent);
11240 ESTAT_ADD(tx_xoff_sent);
11241 ESTAT_ADD(tx_flow_control);
11242 ESTAT_ADD(tx_mac_errors);
11243 ESTAT_ADD(tx_single_collisions);
11244 ESTAT_ADD(tx_mult_collisions);
11245 ESTAT_ADD(tx_deferred);
11246 ESTAT_ADD(tx_excessive_collisions);
11247 ESTAT_ADD(tx_late_collisions);
11248 ESTAT_ADD(tx_collide_2times);
11249 ESTAT_ADD(tx_collide_3times);
11250 ESTAT_ADD(tx_collide_4times);
11251 ESTAT_ADD(tx_collide_5times);
11252 ESTAT_ADD(tx_collide_6times);
11253 ESTAT_ADD(tx_collide_7times);
11254 ESTAT_ADD(tx_collide_8times);
11255 ESTAT_ADD(tx_collide_9times);
11256 ESTAT_ADD(tx_collide_10times);
11257 ESTAT_ADD(tx_collide_11times);
11258 ESTAT_ADD(tx_collide_12times);
11259 ESTAT_ADD(tx_collide_13times);
11260 ESTAT_ADD(tx_collide_14times);
11261 ESTAT_ADD(tx_collide_15times);
11262 ESTAT_ADD(tx_ucast_packets);
11263 ESTAT_ADD(tx_mcast_packets);
11264 ESTAT_ADD(tx_bcast_packets);
11265 ESTAT_ADD(tx_carrier_sense_errors);
11266 ESTAT_ADD(tx_discards);
11267 ESTAT_ADD(tx_errors);
11269 ESTAT_ADD(dma_writeq_full);
11270 ESTAT_ADD(dma_write_prioq_full);
11271 ESTAT_ADD(rxbds_empty);
11272 ESTAT_ADD(rx_discards);
11273 ESTAT_ADD(rx_errors);
11274 ESTAT_ADD(rx_threshold_hit);
11276 ESTAT_ADD(dma_readq_full);
11277 ESTAT_ADD(dma_read_prioq_full);
11278 ESTAT_ADD(tx_comp_queue_full);
11280 ESTAT_ADD(ring_set_send_prod_index);
11281 ESTAT_ADD(ring_status_update);
11282 ESTAT_ADD(nic_irqs);
11283 ESTAT_ADD(nic_avoided_irqs);
11284 ESTAT_ADD(nic_tx_threshold_hit);
11286 ESTAT_ADD(mbuf_lwm_thresh_hit);
11289 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11291 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11292 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11294 stats->rx_packets = old_stats->rx_packets +
11295 get_stat64(&hw_stats->rx_ucast_packets) +
11296 get_stat64(&hw_stats->rx_mcast_packets) +
11297 get_stat64(&hw_stats->rx_bcast_packets);
11299 stats->tx_packets = old_stats->tx_packets +
11300 get_stat64(&hw_stats->tx_ucast_packets) +
11301 get_stat64(&hw_stats->tx_mcast_packets) +
11302 get_stat64(&hw_stats->tx_bcast_packets);
11304 stats->rx_bytes = old_stats->rx_bytes +
11305 get_stat64(&hw_stats->rx_octets);
11306 stats->tx_bytes = old_stats->tx_bytes +
11307 get_stat64(&hw_stats->tx_octets);
11309 stats->rx_errors = old_stats->rx_errors +
11310 get_stat64(&hw_stats->rx_errors);
11311 stats->tx_errors = old_stats->tx_errors +
11312 get_stat64(&hw_stats->tx_errors) +
11313 get_stat64(&hw_stats->tx_mac_errors) +
11314 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11315 get_stat64(&hw_stats->tx_discards);
11317 stats->multicast = old_stats->multicast +
11318 get_stat64(&hw_stats->rx_mcast_packets);
11319 stats->collisions = old_stats->collisions +
11320 get_stat64(&hw_stats->tx_collisions);
11322 stats->rx_length_errors = old_stats->rx_length_errors +
11323 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11324 get_stat64(&hw_stats->rx_undersize_packets);
11326 stats->rx_over_errors = old_stats->rx_over_errors +
11327 get_stat64(&hw_stats->rxbds_empty);
11328 stats->rx_frame_errors = old_stats->rx_frame_errors +
11329 get_stat64(&hw_stats->rx_align_errors);
11330 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11331 get_stat64(&hw_stats->tx_discards);
11332 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11333 get_stat64(&hw_stats->tx_carrier_sense_errors);
11335 stats->rx_crc_errors = old_stats->rx_crc_errors +
11336 tg3_calc_crc_errors(tp);
11338 stats->rx_missed_errors = old_stats->rx_missed_errors +
11339 get_stat64(&hw_stats->rx_discards);
11341 stats->rx_dropped = tp->rx_dropped;
11342 stats->tx_dropped = tp->tx_dropped;
11345 static int tg3_get_regs_len(struct net_device *dev)
11347 return TG3_REG_BLK_SIZE;
11350 static void tg3_get_regs(struct net_device *dev,
11351 struct ethtool_regs *regs, void *_p)
11353 struct tg3 *tp = netdev_priv(dev);
11357 memset(_p, 0, TG3_REG_BLK_SIZE);
11359 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11362 tg3_full_lock(tp, 0);
11364 tg3_dump_legacy_regs(tp, (u32 *)_p);
11366 tg3_full_unlock(tp);
11369 static int tg3_get_eeprom_len(struct net_device *dev)
11371 struct tg3 *tp = netdev_priv(dev);
11373 return tp->nvram_size;
11376 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11378 struct tg3 *tp = netdev_priv(dev);
11381 u32 i, offset, len, b_offset, b_count;
11384 if (tg3_flag(tp, NO_NVRAM))
11387 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11390 offset = eeprom->offset;
11394 eeprom->magic = TG3_EEPROM_MAGIC;
11397 /* adjustments to start on required 4 byte boundary */
11398 b_offset = offset & 3;
11399 b_count = 4 - b_offset;
11400 if (b_count > len) {
11401 /* i.e. offset=1 len=2 */
11404 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11407 memcpy(data, ((char *)&val) + b_offset, b_count);
11410 eeprom->len += b_count;
11413 /* read bytes up to the last 4 byte boundary */
11414 pd = &data[eeprom->len];
11415 for (i = 0; i < (len - (len & 3)); i += 4) {
11416 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11421 memcpy(pd + i, &val, 4);
11426 /* read last bytes not ending on 4 byte boundary */
11427 pd = &data[eeprom->len];
11429 b_offset = offset + len - b_count;
11430 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11433 memcpy(pd, &val, b_count);
11434 eeprom->len += b_count;
11439 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11441 struct tg3 *tp = netdev_priv(dev);
11443 u32 offset, len, b_offset, odd_len;
11447 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11450 if (tg3_flag(tp, NO_NVRAM) ||
11451 eeprom->magic != TG3_EEPROM_MAGIC)
11454 offset = eeprom->offset;
11457 if ((b_offset = (offset & 3))) {
11458 /* adjustments to start on required 4 byte boundary */
11459 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11470 /* adjustments to end on required 4 byte boundary */
11472 len = (len + 3) & ~3;
11473 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11479 if (b_offset || odd_len) {
11480 buf = kmalloc(len, GFP_KERNEL);
11484 memcpy(buf, &start, 4);
11486 memcpy(buf+len-4, &end, 4);
11487 memcpy(buf + b_offset, data, eeprom->len);
11490 ret = tg3_nvram_write_block(tp, offset, len, buf);
11498 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11500 struct tg3 *tp = netdev_priv(dev);
11502 if (tg3_flag(tp, USE_PHYLIB)) {
11503 struct phy_device *phydev;
11504 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11506 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11507 return phy_ethtool_gset(phydev, cmd);
11510 cmd->supported = (SUPPORTED_Autoneg);
11512 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11513 cmd->supported |= (SUPPORTED_1000baseT_Half |
11514 SUPPORTED_1000baseT_Full);
11516 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11517 cmd->supported |= (SUPPORTED_100baseT_Half |
11518 SUPPORTED_100baseT_Full |
11519 SUPPORTED_10baseT_Half |
11520 SUPPORTED_10baseT_Full |
11522 cmd->port = PORT_TP;
11524 cmd->supported |= SUPPORTED_FIBRE;
11525 cmd->port = PORT_FIBRE;
11528 cmd->advertising = tp->link_config.advertising;
11529 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11530 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11531 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11532 cmd->advertising |= ADVERTISED_Pause;
11534 cmd->advertising |= ADVERTISED_Pause |
11535 ADVERTISED_Asym_Pause;
11537 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11538 cmd->advertising |= ADVERTISED_Asym_Pause;
11541 if (netif_running(dev) && tp->link_up) {
11542 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11543 cmd->duplex = tp->link_config.active_duplex;
11544 cmd->lp_advertising = tp->link_config.rmt_adv;
11545 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11546 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11547 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11549 cmd->eth_tp_mdix = ETH_TP_MDI;
11552 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11553 cmd->duplex = DUPLEX_UNKNOWN;
11554 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11556 cmd->phy_address = tp->phy_addr;
11557 cmd->transceiver = XCVR_INTERNAL;
11558 cmd->autoneg = tp->link_config.autoneg;
11564 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11566 struct tg3 *tp = netdev_priv(dev);
11567 u32 speed = ethtool_cmd_speed(cmd);
11569 if (tg3_flag(tp, USE_PHYLIB)) {
11570 struct phy_device *phydev;
11571 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11573 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11574 return phy_ethtool_sset(phydev, cmd);
11577 if (cmd->autoneg != AUTONEG_ENABLE &&
11578 cmd->autoneg != AUTONEG_DISABLE)
11581 if (cmd->autoneg == AUTONEG_DISABLE &&
11582 cmd->duplex != DUPLEX_FULL &&
11583 cmd->duplex != DUPLEX_HALF)
11586 if (cmd->autoneg == AUTONEG_ENABLE) {
11587 u32 mask = ADVERTISED_Autoneg |
11589 ADVERTISED_Asym_Pause;
11591 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11592 mask |= ADVERTISED_1000baseT_Half |
11593 ADVERTISED_1000baseT_Full;
11595 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11596 mask |= ADVERTISED_100baseT_Half |
11597 ADVERTISED_100baseT_Full |
11598 ADVERTISED_10baseT_Half |
11599 ADVERTISED_10baseT_Full |
11602 mask |= ADVERTISED_FIBRE;
11604 if (cmd->advertising & ~mask)
11607 mask &= (ADVERTISED_1000baseT_Half |
11608 ADVERTISED_1000baseT_Full |
11609 ADVERTISED_100baseT_Half |
11610 ADVERTISED_100baseT_Full |
11611 ADVERTISED_10baseT_Half |
11612 ADVERTISED_10baseT_Full);
11614 cmd->advertising &= mask;
11616 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11617 if (speed != SPEED_1000)
11620 if (cmd->duplex != DUPLEX_FULL)
11623 if (speed != SPEED_100 &&
11629 tg3_full_lock(tp, 0);
11631 tp->link_config.autoneg = cmd->autoneg;
11632 if (cmd->autoneg == AUTONEG_ENABLE) {
11633 tp->link_config.advertising = (cmd->advertising |
11634 ADVERTISED_Autoneg);
11635 tp->link_config.speed = SPEED_UNKNOWN;
11636 tp->link_config.duplex = DUPLEX_UNKNOWN;
11638 tp->link_config.advertising = 0;
11639 tp->link_config.speed = speed;
11640 tp->link_config.duplex = cmd->duplex;
11643 tg3_warn_mgmt_link_flap(tp);
11645 if (netif_running(dev))
11646 tg3_setup_phy(tp, 1);
11648 tg3_full_unlock(tp);
11653 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11655 struct tg3 *tp = netdev_priv(dev);
11657 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11658 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11659 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11660 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11663 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11665 struct tg3 *tp = netdev_priv(dev);
11667 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11668 wol->supported = WAKE_MAGIC;
11670 wol->supported = 0;
11672 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11673 wol->wolopts = WAKE_MAGIC;
11674 memset(&wol->sopass, 0, sizeof(wol->sopass));
11677 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11679 struct tg3 *tp = netdev_priv(dev);
11680 struct device *dp = &tp->pdev->dev;
11682 if (wol->wolopts & ~WAKE_MAGIC)
11684 if ((wol->wolopts & WAKE_MAGIC) &&
11685 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11688 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11690 spin_lock_bh(&tp->lock);
11691 if (device_may_wakeup(dp))
11692 tg3_flag_set(tp, WOL_ENABLE);
11694 tg3_flag_clear(tp, WOL_ENABLE);
11695 spin_unlock_bh(&tp->lock);
11700 static u32 tg3_get_msglevel(struct net_device *dev)
11702 struct tg3 *tp = netdev_priv(dev);
11703 return tp->msg_enable;
11706 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11708 struct tg3 *tp = netdev_priv(dev);
11709 tp->msg_enable = value;
11712 static int tg3_nway_reset(struct net_device *dev)
11714 struct tg3 *tp = netdev_priv(dev);
11717 if (!netif_running(dev))
11720 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11723 tg3_warn_mgmt_link_flap(tp);
11725 if (tg3_flag(tp, USE_PHYLIB)) {
11726 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11728 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11732 spin_lock_bh(&tp->lock);
11734 tg3_readphy(tp, MII_BMCR, &bmcr);
11735 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11736 ((bmcr & BMCR_ANENABLE) ||
11737 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11738 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11742 spin_unlock_bh(&tp->lock);
11748 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11750 struct tg3 *tp = netdev_priv(dev);
11752 ering->rx_max_pending = tp->rx_std_ring_mask;
11753 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11754 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11756 ering->rx_jumbo_max_pending = 0;
11758 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11760 ering->rx_pending = tp->rx_pending;
11761 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11762 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11764 ering->rx_jumbo_pending = 0;
11766 ering->tx_pending = tp->napi[0].tx_pending;
11769 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11771 struct tg3 *tp = netdev_priv(dev);
11772 int i, irq_sync = 0, err = 0;
11774 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11775 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11776 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11777 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11778 (tg3_flag(tp, TSO_BUG) &&
11779 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11782 if (netif_running(dev)) {
11784 tg3_netif_stop(tp);
11788 tg3_full_lock(tp, irq_sync);
11790 tp->rx_pending = ering->rx_pending;
11792 if (tg3_flag(tp, MAX_RXPEND_64) &&
11793 tp->rx_pending > 63)
11794 tp->rx_pending = 63;
11795 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11797 for (i = 0; i < tp->irq_max; i++)
11798 tp->napi[i].tx_pending = ering->tx_pending;
11800 if (netif_running(dev)) {
11801 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11802 err = tg3_restart_hw(tp, 0);
11804 tg3_netif_start(tp);
11807 tg3_full_unlock(tp);
11809 if (irq_sync && !err)
11815 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11817 struct tg3 *tp = netdev_priv(dev);
11819 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11821 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11822 epause->rx_pause = 1;
11824 epause->rx_pause = 0;
11826 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11827 epause->tx_pause = 1;
11829 epause->tx_pause = 0;
11832 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11834 struct tg3 *tp = netdev_priv(dev);
11837 if (tp->link_config.autoneg == AUTONEG_ENABLE)
11838 tg3_warn_mgmt_link_flap(tp);
11840 if (tg3_flag(tp, USE_PHYLIB)) {
11842 struct phy_device *phydev;
11844 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11846 if (!(phydev->supported & SUPPORTED_Pause) ||
11847 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11848 (epause->rx_pause != epause->tx_pause)))
11851 tp->link_config.flowctrl = 0;
11852 if (epause->rx_pause) {
11853 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11855 if (epause->tx_pause) {
11856 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11857 newadv = ADVERTISED_Pause;
11859 newadv = ADVERTISED_Pause |
11860 ADVERTISED_Asym_Pause;
11861 } else if (epause->tx_pause) {
11862 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11863 newadv = ADVERTISED_Asym_Pause;
11867 if (epause->autoneg)
11868 tg3_flag_set(tp, PAUSE_AUTONEG);
11870 tg3_flag_clear(tp, PAUSE_AUTONEG);
11872 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11873 u32 oldadv = phydev->advertising &
11874 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11875 if (oldadv != newadv) {
11876 phydev->advertising &=
11877 ~(ADVERTISED_Pause |
11878 ADVERTISED_Asym_Pause);
11879 phydev->advertising |= newadv;
11880 if (phydev->autoneg) {
11882 * Always renegotiate the link to
11883 * inform our link partner of our
11884 * flow control settings, even if the
11885 * flow control is forced. Let
11886 * tg3_adjust_link() do the final
11887 * flow control setup.
11889 return phy_start_aneg(phydev);
11893 if (!epause->autoneg)
11894 tg3_setup_flow_control(tp, 0, 0);
11896 tp->link_config.advertising &=
11897 ~(ADVERTISED_Pause |
11898 ADVERTISED_Asym_Pause);
11899 tp->link_config.advertising |= newadv;
11904 if (netif_running(dev)) {
11905 tg3_netif_stop(tp);
11909 tg3_full_lock(tp, irq_sync);
11911 if (epause->autoneg)
11912 tg3_flag_set(tp, PAUSE_AUTONEG);
11914 tg3_flag_clear(tp, PAUSE_AUTONEG);
11915 if (epause->rx_pause)
11916 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11918 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11919 if (epause->tx_pause)
11920 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11922 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11924 if (netif_running(dev)) {
11925 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11926 err = tg3_restart_hw(tp, 0);
11928 tg3_netif_start(tp);
11931 tg3_full_unlock(tp);
11937 static int tg3_get_sset_count(struct net_device *dev, int sset)
11941 return TG3_NUM_TEST;
11943 return TG3_NUM_STATS;
11945 return -EOPNOTSUPP;
11949 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11950 u32 *rules __always_unused)
11952 struct tg3 *tp = netdev_priv(dev);
11954 if (!tg3_flag(tp, SUPPORT_MSIX))
11955 return -EOPNOTSUPP;
11957 switch (info->cmd) {
11958 case ETHTOOL_GRXRINGS:
11959 if (netif_running(tp->dev))
11960 info->data = tp->rxq_cnt;
11962 info->data = num_online_cpus();
11963 if (info->data > TG3_RSS_MAX_NUM_QS)
11964 info->data = TG3_RSS_MAX_NUM_QS;
11967 /* The first interrupt vector only
11968 * handles link interrupts.
11974 return -EOPNOTSUPP;
11978 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11981 struct tg3 *tp = netdev_priv(dev);
11983 if (tg3_flag(tp, SUPPORT_MSIX))
11984 size = TG3_RSS_INDIR_TBL_SIZE;
11989 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11991 struct tg3 *tp = netdev_priv(dev);
11994 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11995 indir[i] = tp->rss_ind_tbl[i];
12000 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12002 struct tg3 *tp = netdev_priv(dev);
12005 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12006 tp->rss_ind_tbl[i] = indir[i];
12008 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12011 /* It is legal to write the indirection
12012 * table while the device is running.
12014 tg3_full_lock(tp, 0);
12015 tg3_rss_write_indir_tbl(tp);
12016 tg3_full_unlock(tp);
12021 static void tg3_get_channels(struct net_device *dev,
12022 struct ethtool_channels *channel)
12024 struct tg3 *tp = netdev_priv(dev);
12025 u32 deflt_qs = netif_get_num_default_rss_queues();
12027 channel->max_rx = tp->rxq_max;
12028 channel->max_tx = tp->txq_max;
12030 if (netif_running(dev)) {
12031 channel->rx_count = tp->rxq_cnt;
12032 channel->tx_count = tp->txq_cnt;
12035 channel->rx_count = tp->rxq_req;
12037 channel->rx_count = min(deflt_qs, tp->rxq_max);
12040 channel->tx_count = tp->txq_req;
12042 channel->tx_count = min(deflt_qs, tp->txq_max);
12046 static int tg3_set_channels(struct net_device *dev,
12047 struct ethtool_channels *channel)
12049 struct tg3 *tp = netdev_priv(dev);
12051 if (!tg3_flag(tp, SUPPORT_MSIX))
12052 return -EOPNOTSUPP;
12054 if (channel->rx_count > tp->rxq_max ||
12055 channel->tx_count > tp->txq_max)
12058 tp->rxq_req = channel->rx_count;
12059 tp->txq_req = channel->tx_count;
12061 if (!netif_running(dev))
12066 tg3_carrier_off(tp);
12068 tg3_start(tp, true, false, false);
12073 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12075 switch (stringset) {
12077 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12080 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12083 WARN_ON(1); /* we need a WARN() */
12088 static int tg3_set_phys_id(struct net_device *dev,
12089 enum ethtool_phys_id_state state)
12091 struct tg3 *tp = netdev_priv(dev);
12093 if (!netif_running(tp->dev))
12097 case ETHTOOL_ID_ACTIVE:
12098 return 1; /* cycle on/off once per second */
12100 case ETHTOOL_ID_ON:
12101 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12102 LED_CTRL_1000MBPS_ON |
12103 LED_CTRL_100MBPS_ON |
12104 LED_CTRL_10MBPS_ON |
12105 LED_CTRL_TRAFFIC_OVERRIDE |
12106 LED_CTRL_TRAFFIC_BLINK |
12107 LED_CTRL_TRAFFIC_LED);
12110 case ETHTOOL_ID_OFF:
12111 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12112 LED_CTRL_TRAFFIC_OVERRIDE);
12115 case ETHTOOL_ID_INACTIVE:
12116 tw32(MAC_LED_CTRL, tp->led_ctrl);
12123 static void tg3_get_ethtool_stats(struct net_device *dev,
12124 struct ethtool_stats *estats, u64 *tmp_stats)
12126 struct tg3 *tp = netdev_priv(dev);
12129 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12131 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12134 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12138 u32 offset = 0, len = 0;
12141 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12144 if (magic == TG3_EEPROM_MAGIC) {
12145 for (offset = TG3_NVM_DIR_START;
12146 offset < TG3_NVM_DIR_END;
12147 offset += TG3_NVM_DIRENT_SIZE) {
12148 if (tg3_nvram_read(tp, offset, &val))
12151 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12152 TG3_NVM_DIRTYPE_EXTVPD)
12156 if (offset != TG3_NVM_DIR_END) {
12157 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12158 if (tg3_nvram_read(tp, offset + 4, &offset))
12161 offset = tg3_nvram_logical_addr(tp, offset);
12165 if (!offset || !len) {
12166 offset = TG3_NVM_VPD_OFF;
12167 len = TG3_NVM_VPD_LEN;
12170 buf = kmalloc(len, GFP_KERNEL);
12174 if (magic == TG3_EEPROM_MAGIC) {
12175 for (i = 0; i < len; i += 4) {
12176 /* The data is in little-endian format in NVRAM.
12177 * Use the big-endian read routines to preserve
12178 * the byte order as it exists in NVRAM.
12180 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12186 unsigned int pos = 0;
12188 ptr = (u8 *)&buf[0];
12189 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12190 cnt = pci_read_vpd(tp->pdev, pos,
12192 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12210 #define NVRAM_TEST_SIZE 0x100
12211 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12212 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12213 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12214 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12215 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12216 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12217 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12218 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12220 static int tg3_test_nvram(struct tg3 *tp)
12222 u32 csum, magic, len;
12224 int i, j, k, err = 0, size;
12226 if (tg3_flag(tp, NO_NVRAM))
12229 if (tg3_nvram_read(tp, 0, &magic) != 0)
12232 if (magic == TG3_EEPROM_MAGIC)
12233 size = NVRAM_TEST_SIZE;
12234 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12235 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12236 TG3_EEPROM_SB_FORMAT_1) {
12237 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12238 case TG3_EEPROM_SB_REVISION_0:
12239 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12241 case TG3_EEPROM_SB_REVISION_2:
12242 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12244 case TG3_EEPROM_SB_REVISION_3:
12245 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12247 case TG3_EEPROM_SB_REVISION_4:
12248 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12250 case TG3_EEPROM_SB_REVISION_5:
12251 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12253 case TG3_EEPROM_SB_REVISION_6:
12254 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12261 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12262 size = NVRAM_SELFBOOT_HW_SIZE;
12266 buf = kmalloc(size, GFP_KERNEL);
12271 for (i = 0, j = 0; i < size; i += 4, j++) {
12272 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12279 /* Selfboot format */
12280 magic = be32_to_cpu(buf[0]);
12281 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12282 TG3_EEPROM_MAGIC_FW) {
12283 u8 *buf8 = (u8 *) buf, csum8 = 0;
12285 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12286 TG3_EEPROM_SB_REVISION_2) {
12287 /* For rev 2, the csum doesn't include the MBA. */
12288 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12290 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12293 for (i = 0; i < size; i++)
12306 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12307 TG3_EEPROM_MAGIC_HW) {
12308 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12309 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12310 u8 *buf8 = (u8 *) buf;
12312 /* Separate the parity bits and the data bytes. */
12313 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12314 if ((i == 0) || (i == 8)) {
12318 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12319 parity[k++] = buf8[i] & msk;
12321 } else if (i == 16) {
12325 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12326 parity[k++] = buf8[i] & msk;
12329 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12330 parity[k++] = buf8[i] & msk;
12333 data[j++] = buf8[i];
12337 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12338 u8 hw8 = hweight8(data[i]);
12340 if ((hw8 & 0x1) && parity[i])
12342 else if (!(hw8 & 0x1) && !parity[i])
12351 /* Bootstrap checksum at offset 0x10 */
12352 csum = calc_crc((unsigned char *) buf, 0x10);
12353 if (csum != le32_to_cpu(buf[0x10/4]))
12356 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12357 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12358 if (csum != le32_to_cpu(buf[0xfc/4]))
12363 buf = tg3_vpd_readblock(tp, &len);
12367 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12369 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12373 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12376 i += PCI_VPD_LRDT_TAG_SIZE;
12377 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12378 PCI_VPD_RO_KEYWORD_CHKSUM);
12382 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12384 for (i = 0; i <= j; i++)
12385 csum8 += ((u8 *)buf)[i];
12399 #define TG3_SERDES_TIMEOUT_SEC 2
12400 #define TG3_COPPER_TIMEOUT_SEC 6
12402 static int tg3_test_link(struct tg3 *tp)
12406 if (!netif_running(tp->dev))
12409 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12410 max = TG3_SERDES_TIMEOUT_SEC;
12412 max = TG3_COPPER_TIMEOUT_SEC;
12414 for (i = 0; i < max; i++) {
12418 if (msleep_interruptible(1000))
12425 /* Only test the commonly used registers */
12426 static int tg3_test_registers(struct tg3 *tp)
12428 int i, is_5705, is_5750;
12429 u32 offset, read_mask, write_mask, val, save_val, read_val;
12433 #define TG3_FL_5705 0x1
12434 #define TG3_FL_NOT_5705 0x2
12435 #define TG3_FL_NOT_5788 0x4
12436 #define TG3_FL_NOT_5750 0x8
12440 /* MAC Control Registers */
12441 { MAC_MODE, TG3_FL_NOT_5705,
12442 0x00000000, 0x00ef6f8c },
12443 { MAC_MODE, TG3_FL_5705,
12444 0x00000000, 0x01ef6b8c },
12445 { MAC_STATUS, TG3_FL_NOT_5705,
12446 0x03800107, 0x00000000 },
12447 { MAC_STATUS, TG3_FL_5705,
12448 0x03800100, 0x00000000 },
12449 { MAC_ADDR_0_HIGH, 0x0000,
12450 0x00000000, 0x0000ffff },
12451 { MAC_ADDR_0_LOW, 0x0000,
12452 0x00000000, 0xffffffff },
12453 { MAC_RX_MTU_SIZE, 0x0000,
12454 0x00000000, 0x0000ffff },
12455 { MAC_TX_MODE, 0x0000,
12456 0x00000000, 0x00000070 },
12457 { MAC_TX_LENGTHS, 0x0000,
12458 0x00000000, 0x00003fff },
12459 { MAC_RX_MODE, TG3_FL_NOT_5705,
12460 0x00000000, 0x000007fc },
12461 { MAC_RX_MODE, TG3_FL_5705,
12462 0x00000000, 0x000007dc },
12463 { MAC_HASH_REG_0, 0x0000,
12464 0x00000000, 0xffffffff },
12465 { MAC_HASH_REG_1, 0x0000,
12466 0x00000000, 0xffffffff },
12467 { MAC_HASH_REG_2, 0x0000,
12468 0x00000000, 0xffffffff },
12469 { MAC_HASH_REG_3, 0x0000,
12470 0x00000000, 0xffffffff },
12472 /* Receive Data and Receive BD Initiator Control Registers. */
12473 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12474 0x00000000, 0xffffffff },
12475 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12476 0x00000000, 0xffffffff },
12477 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12478 0x00000000, 0x00000003 },
12479 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12480 0x00000000, 0xffffffff },
12481 { RCVDBDI_STD_BD+0, 0x0000,
12482 0x00000000, 0xffffffff },
12483 { RCVDBDI_STD_BD+4, 0x0000,
12484 0x00000000, 0xffffffff },
12485 { RCVDBDI_STD_BD+8, 0x0000,
12486 0x00000000, 0xffff0002 },
12487 { RCVDBDI_STD_BD+0xc, 0x0000,
12488 0x00000000, 0xffffffff },
12490 /* Receive BD Initiator Control Registers. */
12491 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12492 0x00000000, 0xffffffff },
12493 { RCVBDI_STD_THRESH, TG3_FL_5705,
12494 0x00000000, 0x000003ff },
12495 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12496 0x00000000, 0xffffffff },
12498 /* Host Coalescing Control Registers. */
12499 { HOSTCC_MODE, TG3_FL_NOT_5705,
12500 0x00000000, 0x00000004 },
12501 { HOSTCC_MODE, TG3_FL_5705,
12502 0x00000000, 0x000000f6 },
12503 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12504 0x00000000, 0xffffffff },
12505 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12506 0x00000000, 0x000003ff },
12507 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12508 0x00000000, 0xffffffff },
12509 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12510 0x00000000, 0x000003ff },
12511 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12512 0x00000000, 0xffffffff },
12513 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12514 0x00000000, 0x000000ff },
12515 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12516 0x00000000, 0xffffffff },
12517 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12518 0x00000000, 0x000000ff },
12519 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12520 0x00000000, 0xffffffff },
12521 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12522 0x00000000, 0xffffffff },
12523 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12524 0x00000000, 0xffffffff },
12525 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12526 0x00000000, 0x000000ff },
12527 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12528 0x00000000, 0xffffffff },
12529 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12530 0x00000000, 0x000000ff },
12531 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12532 0x00000000, 0xffffffff },
12533 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12534 0x00000000, 0xffffffff },
12535 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12536 0x00000000, 0xffffffff },
12537 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12538 0x00000000, 0xffffffff },
12539 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12540 0x00000000, 0xffffffff },
12541 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12542 0xffffffff, 0x00000000 },
12543 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12544 0xffffffff, 0x00000000 },
12546 /* Buffer Manager Control Registers. */
12547 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12548 0x00000000, 0x007fff80 },
12549 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12550 0x00000000, 0x007fffff },
12551 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12552 0x00000000, 0x0000003f },
12553 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12554 0x00000000, 0x000001ff },
12555 { BUFMGR_MB_HIGH_WATER, 0x0000,
12556 0x00000000, 0x000001ff },
12557 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12558 0xffffffff, 0x00000000 },
12559 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12560 0xffffffff, 0x00000000 },
12562 /* Mailbox Registers */
12563 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12564 0x00000000, 0x000001ff },
12565 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12566 0x00000000, 0x000001ff },
12567 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12568 0x00000000, 0x000007ff },
12569 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12570 0x00000000, 0x000001ff },
12572 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12575 is_5705 = is_5750 = 0;
12576 if (tg3_flag(tp, 5705_PLUS)) {
12578 if (tg3_flag(tp, 5750_PLUS))
12582 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12583 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12586 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12589 if (tg3_flag(tp, IS_5788) &&
12590 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12593 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12596 offset = (u32) reg_tbl[i].offset;
12597 read_mask = reg_tbl[i].read_mask;
12598 write_mask = reg_tbl[i].write_mask;
12600 /* Save the original register content */
12601 save_val = tr32(offset);
12603 /* Determine the read-only value. */
12604 read_val = save_val & read_mask;
12606 /* Write zero to the register, then make sure the read-only bits
12607 * are not changed and the read/write bits are all zeros.
12611 val = tr32(offset);
12613 /* Test the read-only and read/write bits. */
12614 if (((val & read_mask) != read_val) || (val & write_mask))
12617 /* Write ones to all the bits defined by RdMask and WrMask, then
12618 * make sure the read-only bits are not changed and the
12619 * read/write bits are all ones.
12621 tw32(offset, read_mask | write_mask);
12623 val = tr32(offset);
12625 /* Test the read-only bits. */
12626 if ((val & read_mask) != read_val)
12629 /* Test the read/write bits. */
12630 if ((val & write_mask) != write_mask)
12633 tw32(offset, save_val);
12639 if (netif_msg_hw(tp))
12640 netdev_err(tp->dev,
12641 "Register test failed at offset %x\n", offset);
12642 tw32(offset, save_val);
12646 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12648 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12652 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12653 for (j = 0; j < len; j += 4) {
12656 tg3_write_mem(tp, offset + j, test_pattern[i]);
12657 tg3_read_mem(tp, offset + j, &val);
12658 if (val != test_pattern[i])
12665 static int tg3_test_memory(struct tg3 *tp)
12667 static struct mem_entry {
12670 } mem_tbl_570x[] = {
12671 { 0x00000000, 0x00b50},
12672 { 0x00002000, 0x1c000},
12673 { 0xffffffff, 0x00000}
12674 }, mem_tbl_5705[] = {
12675 { 0x00000100, 0x0000c},
12676 { 0x00000200, 0x00008},
12677 { 0x00004000, 0x00800},
12678 { 0x00006000, 0x01000},
12679 { 0x00008000, 0x02000},
12680 { 0x00010000, 0x0e000},
12681 { 0xffffffff, 0x00000}
12682 }, mem_tbl_5755[] = {
12683 { 0x00000200, 0x00008},
12684 { 0x00004000, 0x00800},
12685 { 0x00006000, 0x00800},
12686 { 0x00008000, 0x02000},
12687 { 0x00010000, 0x0c000},
12688 { 0xffffffff, 0x00000}
12689 }, mem_tbl_5906[] = {
12690 { 0x00000200, 0x00008},
12691 { 0x00004000, 0x00400},
12692 { 0x00006000, 0x00400},
12693 { 0x00008000, 0x01000},
12694 { 0x00010000, 0x01000},
12695 { 0xffffffff, 0x00000}
12696 }, mem_tbl_5717[] = {
12697 { 0x00000200, 0x00008},
12698 { 0x00010000, 0x0a000},
12699 { 0x00020000, 0x13c00},
12700 { 0xffffffff, 0x00000}
12701 }, mem_tbl_57765[] = {
12702 { 0x00000200, 0x00008},
12703 { 0x00004000, 0x00800},
12704 { 0x00006000, 0x09800},
12705 { 0x00010000, 0x0a000},
12706 { 0xffffffff, 0x00000}
12708 struct mem_entry *mem_tbl;
12712 if (tg3_flag(tp, 5717_PLUS))
12713 mem_tbl = mem_tbl_5717;
12714 else if (tg3_flag(tp, 57765_CLASS) ||
12715 tg3_asic_rev(tp) == ASIC_REV_5762)
12716 mem_tbl = mem_tbl_57765;
12717 else if (tg3_flag(tp, 5755_PLUS))
12718 mem_tbl = mem_tbl_5755;
12719 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12720 mem_tbl = mem_tbl_5906;
12721 else if (tg3_flag(tp, 5705_PLUS))
12722 mem_tbl = mem_tbl_5705;
12724 mem_tbl = mem_tbl_570x;
12726 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12727 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12735 #define TG3_TSO_MSS 500
12737 #define TG3_TSO_IP_HDR_LEN 20
12738 #define TG3_TSO_TCP_HDR_LEN 20
12739 #define TG3_TSO_TCP_OPT_LEN 12
12741 static const u8 tg3_tso_header[] = {
12743 0x45, 0x00, 0x00, 0x00,
12744 0x00, 0x00, 0x40, 0x00,
12745 0x40, 0x06, 0x00, 0x00,
12746 0x0a, 0x00, 0x00, 0x01,
12747 0x0a, 0x00, 0x00, 0x02,
12748 0x0d, 0x00, 0xe0, 0x00,
12749 0x00, 0x00, 0x01, 0x00,
12750 0x00, 0x00, 0x02, 0x00,
12751 0x80, 0x10, 0x10, 0x00,
12752 0x14, 0x09, 0x00, 0x00,
12753 0x01, 0x01, 0x08, 0x0a,
12754 0x11, 0x11, 0x11, 0x11,
12755 0x11, 0x11, 0x11, 0x11,
12758 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12760 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12761 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12763 struct sk_buff *skb;
12764 u8 *tx_data, *rx_data;
12766 int num_pkts, tx_len, rx_len, i, err;
12767 struct tg3_rx_buffer_desc *desc;
12768 struct tg3_napi *tnapi, *rnapi;
12769 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12771 tnapi = &tp->napi[0];
12772 rnapi = &tp->napi[0];
12773 if (tp->irq_cnt > 1) {
12774 if (tg3_flag(tp, ENABLE_RSS))
12775 rnapi = &tp->napi[1];
12776 if (tg3_flag(tp, ENABLE_TSS))
12777 tnapi = &tp->napi[1];
12779 coal_now = tnapi->coal_now | rnapi->coal_now;
12784 skb = netdev_alloc_skb(tp->dev, tx_len);
12788 tx_data = skb_put(skb, tx_len);
12789 memcpy(tx_data, tp->dev->dev_addr, 6);
12790 memset(tx_data + 6, 0x0, 8);
12792 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12794 if (tso_loopback) {
12795 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12797 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12798 TG3_TSO_TCP_OPT_LEN;
12800 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12801 sizeof(tg3_tso_header));
12804 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12805 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12807 /* Set the total length field in the IP header */
12808 iph->tot_len = htons((u16)(mss + hdr_len));
12810 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12811 TXD_FLAG_CPU_POST_DMA);
12813 if (tg3_flag(tp, HW_TSO_1) ||
12814 tg3_flag(tp, HW_TSO_2) ||
12815 tg3_flag(tp, HW_TSO_3)) {
12817 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12818 th = (struct tcphdr *)&tx_data[val];
12821 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12823 if (tg3_flag(tp, HW_TSO_3)) {
12824 mss |= (hdr_len & 0xc) << 12;
12825 if (hdr_len & 0x10)
12826 base_flags |= 0x00000010;
12827 base_flags |= (hdr_len & 0x3e0) << 5;
12828 } else if (tg3_flag(tp, HW_TSO_2))
12829 mss |= hdr_len << 9;
12830 else if (tg3_flag(tp, HW_TSO_1) ||
12831 tg3_asic_rev(tp) == ASIC_REV_5705) {
12832 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12834 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12837 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12840 data_off = ETH_HLEN;
12842 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12843 tx_len > VLAN_ETH_FRAME_LEN)
12844 base_flags |= TXD_FLAG_JMB_PKT;
12847 for (i = data_off; i < tx_len; i++)
12848 tx_data[i] = (u8) (i & 0xff);
12850 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12851 if (pci_dma_mapping_error(tp->pdev, map)) {
12852 dev_kfree_skb(skb);
12856 val = tnapi->tx_prod;
12857 tnapi->tx_buffers[val].skb = skb;
12858 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12860 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12865 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12867 budget = tg3_tx_avail(tnapi);
12868 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12869 base_flags | TXD_FLAG_END, mss, 0)) {
12870 tnapi->tx_buffers[val].skb = NULL;
12871 dev_kfree_skb(skb);
12877 /* Sync BD data before updating mailbox */
12880 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12881 tr32_mailbox(tnapi->prodmbox);
12885 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12886 for (i = 0; i < 35; i++) {
12887 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12892 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12893 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12894 if ((tx_idx == tnapi->tx_prod) &&
12895 (rx_idx == (rx_start_idx + num_pkts)))
12899 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12900 dev_kfree_skb(skb);
12902 if (tx_idx != tnapi->tx_prod)
12905 if (rx_idx != rx_start_idx + num_pkts)
12909 while (rx_idx != rx_start_idx) {
12910 desc = &rnapi->rx_rcb[rx_start_idx++];
12911 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12912 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12914 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12915 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12918 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12921 if (!tso_loopback) {
12922 if (rx_len != tx_len)
12925 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12926 if (opaque_key != RXD_OPAQUE_RING_STD)
12929 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12932 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12933 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12934 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12938 if (opaque_key == RXD_OPAQUE_RING_STD) {
12939 rx_data = tpr->rx_std_buffers[desc_idx].data;
12940 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12942 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12943 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12944 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12949 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12950 PCI_DMA_FROMDEVICE);
12952 rx_data += TG3_RX_OFFSET(tp);
12953 for (i = data_off; i < rx_len; i++, val++) {
12954 if (*(rx_data + i) != (u8) (val & 0xff))
12961 /* tg3_free_rings will unmap and free the rx_data */
12966 #define TG3_STD_LOOPBACK_FAILED 1
12967 #define TG3_JMB_LOOPBACK_FAILED 2
12968 #define TG3_TSO_LOOPBACK_FAILED 4
12969 #define TG3_LOOPBACK_FAILED \
12970 (TG3_STD_LOOPBACK_FAILED | \
12971 TG3_JMB_LOOPBACK_FAILED | \
12972 TG3_TSO_LOOPBACK_FAILED)
12974 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12978 u32 jmb_pkt_sz = 9000;
12981 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12983 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12984 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12986 if (!netif_running(tp->dev)) {
12987 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12988 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12990 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12994 err = tg3_reset_hw(tp, 1);
12996 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12997 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12999 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13003 if (tg3_flag(tp, ENABLE_RSS)) {
13006 /* Reroute all rx packets to the 1st queue */
13007 for (i = MAC_RSS_INDIR_TBL_0;
13008 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13012 /* HW errata - mac loopback fails in some cases on 5780.
13013 * Normal traffic and PHY loopback are not affected by
13014 * errata. Also, the MAC loopback test is deprecated for
13015 * all newer ASIC revisions.
13017 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13018 !tg3_flag(tp, CPMU_PRESENT)) {
13019 tg3_mac_loopback(tp, true);
13021 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13022 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13024 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13025 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13026 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13028 tg3_mac_loopback(tp, false);
13031 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13032 !tg3_flag(tp, USE_PHYLIB)) {
13035 tg3_phy_lpbk_set(tp, 0, false);
13037 /* Wait for link */
13038 for (i = 0; i < 100; i++) {
13039 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13044 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13045 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13046 if (tg3_flag(tp, TSO_CAPABLE) &&
13047 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13048 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13049 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13050 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13051 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13054 tg3_phy_lpbk_set(tp, 0, true);
13056 /* All link indications report up, but the hardware
13057 * isn't really ready for about 20 msec. Double it
13062 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13063 data[TG3_EXT_LOOPB_TEST] |=
13064 TG3_STD_LOOPBACK_FAILED;
13065 if (tg3_flag(tp, TSO_CAPABLE) &&
13066 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13067 data[TG3_EXT_LOOPB_TEST] |=
13068 TG3_TSO_LOOPBACK_FAILED;
13069 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13070 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13071 data[TG3_EXT_LOOPB_TEST] |=
13072 TG3_JMB_LOOPBACK_FAILED;
13075 /* Re-enable gphy autopowerdown. */
13076 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13077 tg3_phy_toggle_apd(tp, true);
13080 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13081 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13084 tp->phy_flags |= eee_cap;
13089 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13092 struct tg3 *tp = netdev_priv(dev);
13093 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13095 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13096 tg3_power_up(tp)) {
13097 etest->flags |= ETH_TEST_FL_FAILED;
13098 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13102 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13104 if (tg3_test_nvram(tp) != 0) {
13105 etest->flags |= ETH_TEST_FL_FAILED;
13106 data[TG3_NVRAM_TEST] = 1;
13108 if (!doextlpbk && tg3_test_link(tp)) {
13109 etest->flags |= ETH_TEST_FL_FAILED;
13110 data[TG3_LINK_TEST] = 1;
13112 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13113 int err, err2 = 0, irq_sync = 0;
13115 if (netif_running(dev)) {
13117 tg3_netif_stop(tp);
13121 tg3_full_lock(tp, irq_sync);
13122 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13123 err = tg3_nvram_lock(tp);
13124 tg3_halt_cpu(tp, RX_CPU_BASE);
13125 if (!tg3_flag(tp, 5705_PLUS))
13126 tg3_halt_cpu(tp, TX_CPU_BASE);
13128 tg3_nvram_unlock(tp);
13130 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13133 if (tg3_test_registers(tp) != 0) {
13134 etest->flags |= ETH_TEST_FL_FAILED;
13135 data[TG3_REGISTER_TEST] = 1;
13138 if (tg3_test_memory(tp) != 0) {
13139 etest->flags |= ETH_TEST_FL_FAILED;
13140 data[TG3_MEMORY_TEST] = 1;
13144 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13146 if (tg3_test_loopback(tp, data, doextlpbk))
13147 etest->flags |= ETH_TEST_FL_FAILED;
13149 tg3_full_unlock(tp);
13151 if (tg3_test_interrupt(tp) != 0) {
13152 etest->flags |= ETH_TEST_FL_FAILED;
13153 data[TG3_INTERRUPT_TEST] = 1;
13156 tg3_full_lock(tp, 0);
13158 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13159 if (netif_running(dev)) {
13160 tg3_flag_set(tp, INIT_COMPLETE);
13161 err2 = tg3_restart_hw(tp, 1);
13163 tg3_netif_start(tp);
13166 tg3_full_unlock(tp);
13168 if (irq_sync && !err2)
13171 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13172 tg3_power_down(tp);
13176 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13177 struct ifreq *ifr, int cmd)
13179 struct tg3 *tp = netdev_priv(dev);
13180 struct hwtstamp_config stmpconf;
13182 if (!tg3_flag(tp, PTP_CAPABLE))
13185 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13188 if (stmpconf.flags)
13191 switch (stmpconf.tx_type) {
13192 case HWTSTAMP_TX_ON:
13193 tg3_flag_set(tp, TX_TSTAMP_EN);
13195 case HWTSTAMP_TX_OFF:
13196 tg3_flag_clear(tp, TX_TSTAMP_EN);
13202 switch (stmpconf.rx_filter) {
13203 case HWTSTAMP_FILTER_NONE:
13206 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13207 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13208 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13210 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13211 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13212 TG3_RX_PTP_CTL_SYNC_EVNT;
13214 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13215 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13216 TG3_RX_PTP_CTL_DELAY_REQ;
13218 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13219 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13220 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13222 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13223 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13224 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13226 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13227 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13228 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13230 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13231 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13232 TG3_RX_PTP_CTL_SYNC_EVNT;
13234 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13235 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13236 TG3_RX_PTP_CTL_SYNC_EVNT;
13238 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13239 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13240 TG3_RX_PTP_CTL_SYNC_EVNT;
13242 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13243 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13244 TG3_RX_PTP_CTL_DELAY_REQ;
13246 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13247 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13248 TG3_RX_PTP_CTL_DELAY_REQ;
13250 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13251 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13252 TG3_RX_PTP_CTL_DELAY_REQ;
13258 if (netif_running(dev) && tp->rxptpctl)
13259 tw32(TG3_RX_PTP_CTL,
13260 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13262 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13266 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13268 struct mii_ioctl_data *data = if_mii(ifr);
13269 struct tg3 *tp = netdev_priv(dev);
13272 if (tg3_flag(tp, USE_PHYLIB)) {
13273 struct phy_device *phydev;
13274 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13276 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13277 return phy_mii_ioctl(phydev, ifr, cmd);
13282 data->phy_id = tp->phy_addr;
13285 case SIOCGMIIREG: {
13288 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13289 break; /* We have no PHY */
13291 if (!netif_running(dev))
13294 spin_lock_bh(&tp->lock);
13295 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13296 data->reg_num & 0x1f, &mii_regval);
13297 spin_unlock_bh(&tp->lock);
13299 data->val_out = mii_regval;
13305 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13306 break; /* We have no PHY */
13308 if (!netif_running(dev))
13311 spin_lock_bh(&tp->lock);
13312 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13313 data->reg_num & 0x1f, data->val_in);
13314 spin_unlock_bh(&tp->lock);
13318 case SIOCSHWTSTAMP:
13319 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13325 return -EOPNOTSUPP;
13328 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13330 struct tg3 *tp = netdev_priv(dev);
13332 memcpy(ec, &tp->coal, sizeof(*ec));
13336 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13338 struct tg3 *tp = netdev_priv(dev);
13339 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13340 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13342 if (!tg3_flag(tp, 5705_PLUS)) {
13343 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13344 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13345 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13346 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13349 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13350 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13351 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13352 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13353 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13354 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13355 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13356 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13357 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13358 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13361 /* No rx interrupts will be generated if both are zero */
13362 if ((ec->rx_coalesce_usecs == 0) &&
13363 (ec->rx_max_coalesced_frames == 0))
13366 /* No tx interrupts will be generated if both are zero */
13367 if ((ec->tx_coalesce_usecs == 0) &&
13368 (ec->tx_max_coalesced_frames == 0))
13371 /* Only copy relevant parameters, ignore all others. */
13372 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13373 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13374 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13375 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13376 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13377 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13378 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13379 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13380 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13382 if (netif_running(dev)) {
13383 tg3_full_lock(tp, 0);
13384 __tg3_set_coalesce(tp, &tp->coal);
13385 tg3_full_unlock(tp);
13390 static const struct ethtool_ops tg3_ethtool_ops = {
13391 .get_settings = tg3_get_settings,
13392 .set_settings = tg3_set_settings,
13393 .get_drvinfo = tg3_get_drvinfo,
13394 .get_regs_len = tg3_get_regs_len,
13395 .get_regs = tg3_get_regs,
13396 .get_wol = tg3_get_wol,
13397 .set_wol = tg3_set_wol,
13398 .get_msglevel = tg3_get_msglevel,
13399 .set_msglevel = tg3_set_msglevel,
13400 .nway_reset = tg3_nway_reset,
13401 .get_link = ethtool_op_get_link,
13402 .get_eeprom_len = tg3_get_eeprom_len,
13403 .get_eeprom = tg3_get_eeprom,
13404 .set_eeprom = tg3_set_eeprom,
13405 .get_ringparam = tg3_get_ringparam,
13406 .set_ringparam = tg3_set_ringparam,
13407 .get_pauseparam = tg3_get_pauseparam,
13408 .set_pauseparam = tg3_set_pauseparam,
13409 .self_test = tg3_self_test,
13410 .get_strings = tg3_get_strings,
13411 .set_phys_id = tg3_set_phys_id,
13412 .get_ethtool_stats = tg3_get_ethtool_stats,
13413 .get_coalesce = tg3_get_coalesce,
13414 .set_coalesce = tg3_set_coalesce,
13415 .get_sset_count = tg3_get_sset_count,
13416 .get_rxnfc = tg3_get_rxnfc,
13417 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13418 .get_rxfh_indir = tg3_get_rxfh_indir,
13419 .set_rxfh_indir = tg3_set_rxfh_indir,
13420 .get_channels = tg3_get_channels,
13421 .set_channels = tg3_set_channels,
13422 .get_ts_info = tg3_get_ts_info,
13425 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13426 struct rtnl_link_stats64 *stats)
13428 struct tg3 *tp = netdev_priv(dev);
13430 spin_lock_bh(&tp->lock);
13431 if (!tp->hw_stats) {
13432 spin_unlock_bh(&tp->lock);
13433 return &tp->net_stats_prev;
13436 tg3_get_nstats(tp, stats);
13437 spin_unlock_bh(&tp->lock);
13442 static void tg3_set_rx_mode(struct net_device *dev)
13444 struct tg3 *tp = netdev_priv(dev);
13446 if (!netif_running(dev))
13449 tg3_full_lock(tp, 0);
13450 __tg3_set_rx_mode(dev);
13451 tg3_full_unlock(tp);
13454 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13457 dev->mtu = new_mtu;
13459 if (new_mtu > ETH_DATA_LEN) {
13460 if (tg3_flag(tp, 5780_CLASS)) {
13461 netdev_update_features(dev);
13462 tg3_flag_clear(tp, TSO_CAPABLE);
13464 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13467 if (tg3_flag(tp, 5780_CLASS)) {
13468 tg3_flag_set(tp, TSO_CAPABLE);
13469 netdev_update_features(dev);
13471 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13475 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13477 struct tg3 *tp = netdev_priv(dev);
13478 int err, reset_phy = 0;
13480 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13483 if (!netif_running(dev)) {
13484 /* We'll just catch it later when the
13487 tg3_set_mtu(dev, tp, new_mtu);
13493 tg3_netif_stop(tp);
13495 tg3_full_lock(tp, 1);
13497 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13499 tg3_set_mtu(dev, tp, new_mtu);
13501 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13502 * breaks all requests to 256 bytes.
13504 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13507 err = tg3_restart_hw(tp, reset_phy);
13510 tg3_netif_start(tp);
13512 tg3_full_unlock(tp);
13520 static const struct net_device_ops tg3_netdev_ops = {
13521 .ndo_open = tg3_open,
13522 .ndo_stop = tg3_close,
13523 .ndo_start_xmit = tg3_start_xmit,
13524 .ndo_get_stats64 = tg3_get_stats64,
13525 .ndo_validate_addr = eth_validate_addr,
13526 .ndo_set_rx_mode = tg3_set_rx_mode,
13527 .ndo_set_mac_address = tg3_set_mac_addr,
13528 .ndo_do_ioctl = tg3_ioctl,
13529 .ndo_tx_timeout = tg3_tx_timeout,
13530 .ndo_change_mtu = tg3_change_mtu,
13531 .ndo_fix_features = tg3_fix_features,
13532 .ndo_set_features = tg3_set_features,
13533 #ifdef CONFIG_NET_POLL_CONTROLLER
13534 .ndo_poll_controller = tg3_poll_controller,
13538 static void tg3_get_eeprom_size(struct tg3 *tp)
13540 u32 cursize, val, magic;
13542 tp->nvram_size = EEPROM_CHIP_SIZE;
13544 if (tg3_nvram_read(tp, 0, &magic) != 0)
13547 if ((magic != TG3_EEPROM_MAGIC) &&
13548 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13549 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13553 * Size the chip by reading offsets at increasing powers of two.
13554 * When we encounter our validation signature, we know the addressing
13555 * has wrapped around, and thus have our chip size.
13559 while (cursize < tp->nvram_size) {
13560 if (tg3_nvram_read(tp, cursize, &val) != 0)
13569 tp->nvram_size = cursize;
13572 static void tg3_get_nvram_size(struct tg3 *tp)
13576 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13579 /* Selfboot format */
13580 if (val != TG3_EEPROM_MAGIC) {
13581 tg3_get_eeprom_size(tp);
13585 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13587 /* This is confusing. We want to operate on the
13588 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13589 * call will read from NVRAM and byteswap the data
13590 * according to the byteswapping settings for all
13591 * other register accesses. This ensures the data we
13592 * want will always reside in the lower 16-bits.
13593 * However, the data in NVRAM is in LE format, which
13594 * means the data from the NVRAM read will always be
13595 * opposite the endianness of the CPU. The 16-bit
13596 * byteswap then brings the data to CPU endianness.
13598 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13602 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13605 static void tg3_get_nvram_info(struct tg3 *tp)
13609 nvcfg1 = tr32(NVRAM_CFG1);
13610 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13611 tg3_flag_set(tp, FLASH);
13613 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13614 tw32(NVRAM_CFG1, nvcfg1);
13617 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13618 tg3_flag(tp, 5780_CLASS)) {
13619 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13620 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13621 tp->nvram_jedecnum = JEDEC_ATMEL;
13622 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13623 tg3_flag_set(tp, NVRAM_BUFFERED);
13625 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13626 tp->nvram_jedecnum = JEDEC_ATMEL;
13627 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13629 case FLASH_VENDOR_ATMEL_EEPROM:
13630 tp->nvram_jedecnum = JEDEC_ATMEL;
13631 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13632 tg3_flag_set(tp, NVRAM_BUFFERED);
13634 case FLASH_VENDOR_ST:
13635 tp->nvram_jedecnum = JEDEC_ST;
13636 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13637 tg3_flag_set(tp, NVRAM_BUFFERED);
13639 case FLASH_VENDOR_SAIFUN:
13640 tp->nvram_jedecnum = JEDEC_SAIFUN;
13641 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13643 case FLASH_VENDOR_SST_SMALL:
13644 case FLASH_VENDOR_SST_LARGE:
13645 tp->nvram_jedecnum = JEDEC_SST;
13646 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13650 tp->nvram_jedecnum = JEDEC_ATMEL;
13651 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13652 tg3_flag_set(tp, NVRAM_BUFFERED);
13656 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13658 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13659 case FLASH_5752PAGE_SIZE_256:
13660 tp->nvram_pagesize = 256;
13662 case FLASH_5752PAGE_SIZE_512:
13663 tp->nvram_pagesize = 512;
13665 case FLASH_5752PAGE_SIZE_1K:
13666 tp->nvram_pagesize = 1024;
13668 case FLASH_5752PAGE_SIZE_2K:
13669 tp->nvram_pagesize = 2048;
13671 case FLASH_5752PAGE_SIZE_4K:
13672 tp->nvram_pagesize = 4096;
13674 case FLASH_5752PAGE_SIZE_264:
13675 tp->nvram_pagesize = 264;
13677 case FLASH_5752PAGE_SIZE_528:
13678 tp->nvram_pagesize = 528;
13683 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13687 nvcfg1 = tr32(NVRAM_CFG1);
13689 /* NVRAM protection for TPM */
13690 if (nvcfg1 & (1 << 27))
13691 tg3_flag_set(tp, PROTECTED_NVRAM);
13693 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13694 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13695 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13696 tp->nvram_jedecnum = JEDEC_ATMEL;
13697 tg3_flag_set(tp, NVRAM_BUFFERED);
13699 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13700 tp->nvram_jedecnum = JEDEC_ATMEL;
13701 tg3_flag_set(tp, NVRAM_BUFFERED);
13702 tg3_flag_set(tp, FLASH);
13704 case FLASH_5752VENDOR_ST_M45PE10:
13705 case FLASH_5752VENDOR_ST_M45PE20:
13706 case FLASH_5752VENDOR_ST_M45PE40:
13707 tp->nvram_jedecnum = JEDEC_ST;
13708 tg3_flag_set(tp, NVRAM_BUFFERED);
13709 tg3_flag_set(tp, FLASH);
13713 if (tg3_flag(tp, FLASH)) {
13714 tg3_nvram_get_pagesize(tp, nvcfg1);
13716 /* For eeprom, set pagesize to maximum eeprom size */
13717 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13719 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13720 tw32(NVRAM_CFG1, nvcfg1);
13724 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13726 u32 nvcfg1, protect = 0;
13728 nvcfg1 = tr32(NVRAM_CFG1);
13730 /* NVRAM protection for TPM */
13731 if (nvcfg1 & (1 << 27)) {
13732 tg3_flag_set(tp, PROTECTED_NVRAM);
13736 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13738 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13739 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13740 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13741 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13742 tp->nvram_jedecnum = JEDEC_ATMEL;
13743 tg3_flag_set(tp, NVRAM_BUFFERED);
13744 tg3_flag_set(tp, FLASH);
13745 tp->nvram_pagesize = 264;
13746 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13747 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13748 tp->nvram_size = (protect ? 0x3e200 :
13749 TG3_NVRAM_SIZE_512KB);
13750 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13751 tp->nvram_size = (protect ? 0x1f200 :
13752 TG3_NVRAM_SIZE_256KB);
13754 tp->nvram_size = (protect ? 0x1f200 :
13755 TG3_NVRAM_SIZE_128KB);
13757 case FLASH_5752VENDOR_ST_M45PE10:
13758 case FLASH_5752VENDOR_ST_M45PE20:
13759 case FLASH_5752VENDOR_ST_M45PE40:
13760 tp->nvram_jedecnum = JEDEC_ST;
13761 tg3_flag_set(tp, NVRAM_BUFFERED);
13762 tg3_flag_set(tp, FLASH);
13763 tp->nvram_pagesize = 256;
13764 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13765 tp->nvram_size = (protect ?
13766 TG3_NVRAM_SIZE_64KB :
13767 TG3_NVRAM_SIZE_128KB);
13768 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13769 tp->nvram_size = (protect ?
13770 TG3_NVRAM_SIZE_64KB :
13771 TG3_NVRAM_SIZE_256KB);
13773 tp->nvram_size = (protect ?
13774 TG3_NVRAM_SIZE_128KB :
13775 TG3_NVRAM_SIZE_512KB);
13780 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13784 nvcfg1 = tr32(NVRAM_CFG1);
13786 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13787 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13788 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13789 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13790 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13791 tp->nvram_jedecnum = JEDEC_ATMEL;
13792 tg3_flag_set(tp, NVRAM_BUFFERED);
13793 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13795 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13796 tw32(NVRAM_CFG1, nvcfg1);
13798 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13799 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13800 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13801 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13802 tp->nvram_jedecnum = JEDEC_ATMEL;
13803 tg3_flag_set(tp, NVRAM_BUFFERED);
13804 tg3_flag_set(tp, FLASH);
13805 tp->nvram_pagesize = 264;
13807 case FLASH_5752VENDOR_ST_M45PE10:
13808 case FLASH_5752VENDOR_ST_M45PE20:
13809 case FLASH_5752VENDOR_ST_M45PE40:
13810 tp->nvram_jedecnum = JEDEC_ST;
13811 tg3_flag_set(tp, NVRAM_BUFFERED);
13812 tg3_flag_set(tp, FLASH);
13813 tp->nvram_pagesize = 256;
13818 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13820 u32 nvcfg1, protect = 0;
13822 nvcfg1 = tr32(NVRAM_CFG1);
13824 /* NVRAM protection for TPM */
13825 if (nvcfg1 & (1 << 27)) {
13826 tg3_flag_set(tp, PROTECTED_NVRAM);
13830 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13832 case FLASH_5761VENDOR_ATMEL_ADB021D:
13833 case FLASH_5761VENDOR_ATMEL_ADB041D:
13834 case FLASH_5761VENDOR_ATMEL_ADB081D:
13835 case FLASH_5761VENDOR_ATMEL_ADB161D:
13836 case FLASH_5761VENDOR_ATMEL_MDB021D:
13837 case FLASH_5761VENDOR_ATMEL_MDB041D:
13838 case FLASH_5761VENDOR_ATMEL_MDB081D:
13839 case FLASH_5761VENDOR_ATMEL_MDB161D:
13840 tp->nvram_jedecnum = JEDEC_ATMEL;
13841 tg3_flag_set(tp, NVRAM_BUFFERED);
13842 tg3_flag_set(tp, FLASH);
13843 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13844 tp->nvram_pagesize = 256;
13846 case FLASH_5761VENDOR_ST_A_M45PE20:
13847 case FLASH_5761VENDOR_ST_A_M45PE40:
13848 case FLASH_5761VENDOR_ST_A_M45PE80:
13849 case FLASH_5761VENDOR_ST_A_M45PE16:
13850 case FLASH_5761VENDOR_ST_M_M45PE20:
13851 case FLASH_5761VENDOR_ST_M_M45PE40:
13852 case FLASH_5761VENDOR_ST_M_M45PE80:
13853 case FLASH_5761VENDOR_ST_M_M45PE16:
13854 tp->nvram_jedecnum = JEDEC_ST;
13855 tg3_flag_set(tp, NVRAM_BUFFERED);
13856 tg3_flag_set(tp, FLASH);
13857 tp->nvram_pagesize = 256;
13862 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13865 case FLASH_5761VENDOR_ATMEL_ADB161D:
13866 case FLASH_5761VENDOR_ATMEL_MDB161D:
13867 case FLASH_5761VENDOR_ST_A_M45PE16:
13868 case FLASH_5761VENDOR_ST_M_M45PE16:
13869 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13871 case FLASH_5761VENDOR_ATMEL_ADB081D:
13872 case FLASH_5761VENDOR_ATMEL_MDB081D:
13873 case FLASH_5761VENDOR_ST_A_M45PE80:
13874 case FLASH_5761VENDOR_ST_M_M45PE80:
13875 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13877 case FLASH_5761VENDOR_ATMEL_ADB041D:
13878 case FLASH_5761VENDOR_ATMEL_MDB041D:
13879 case FLASH_5761VENDOR_ST_A_M45PE40:
13880 case FLASH_5761VENDOR_ST_M_M45PE40:
13881 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13883 case FLASH_5761VENDOR_ATMEL_ADB021D:
13884 case FLASH_5761VENDOR_ATMEL_MDB021D:
13885 case FLASH_5761VENDOR_ST_A_M45PE20:
13886 case FLASH_5761VENDOR_ST_M_M45PE20:
13887 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13893 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13895 tp->nvram_jedecnum = JEDEC_ATMEL;
13896 tg3_flag_set(tp, NVRAM_BUFFERED);
13897 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13900 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13904 nvcfg1 = tr32(NVRAM_CFG1);
13906 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13907 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13908 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13909 tp->nvram_jedecnum = JEDEC_ATMEL;
13910 tg3_flag_set(tp, NVRAM_BUFFERED);
13911 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13913 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13914 tw32(NVRAM_CFG1, nvcfg1);
13916 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13917 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13918 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13919 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13920 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13921 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13922 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13923 tp->nvram_jedecnum = JEDEC_ATMEL;
13924 tg3_flag_set(tp, NVRAM_BUFFERED);
13925 tg3_flag_set(tp, FLASH);
13927 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13928 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13929 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13930 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13931 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13933 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13934 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13935 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13937 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13938 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13939 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13943 case FLASH_5752VENDOR_ST_M45PE10:
13944 case FLASH_5752VENDOR_ST_M45PE20:
13945 case FLASH_5752VENDOR_ST_M45PE40:
13946 tp->nvram_jedecnum = JEDEC_ST;
13947 tg3_flag_set(tp, NVRAM_BUFFERED);
13948 tg3_flag_set(tp, FLASH);
13950 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13951 case FLASH_5752VENDOR_ST_M45PE10:
13952 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13954 case FLASH_5752VENDOR_ST_M45PE20:
13955 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13957 case FLASH_5752VENDOR_ST_M45PE40:
13958 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13963 tg3_flag_set(tp, NO_NVRAM);
13967 tg3_nvram_get_pagesize(tp, nvcfg1);
13968 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13969 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13973 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13977 nvcfg1 = tr32(NVRAM_CFG1);
13979 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13980 case FLASH_5717VENDOR_ATMEL_EEPROM:
13981 case FLASH_5717VENDOR_MICRO_EEPROM:
13982 tp->nvram_jedecnum = JEDEC_ATMEL;
13983 tg3_flag_set(tp, NVRAM_BUFFERED);
13984 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13986 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13987 tw32(NVRAM_CFG1, nvcfg1);
13989 case FLASH_5717VENDOR_ATMEL_MDB011D:
13990 case FLASH_5717VENDOR_ATMEL_ADB011B:
13991 case FLASH_5717VENDOR_ATMEL_ADB011D:
13992 case FLASH_5717VENDOR_ATMEL_MDB021D:
13993 case FLASH_5717VENDOR_ATMEL_ADB021B:
13994 case FLASH_5717VENDOR_ATMEL_ADB021D:
13995 case FLASH_5717VENDOR_ATMEL_45USPT:
13996 tp->nvram_jedecnum = JEDEC_ATMEL;
13997 tg3_flag_set(tp, NVRAM_BUFFERED);
13998 tg3_flag_set(tp, FLASH);
14000 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14001 case FLASH_5717VENDOR_ATMEL_MDB021D:
14002 /* Detect size with tg3_nvram_get_size() */
14004 case FLASH_5717VENDOR_ATMEL_ADB021B:
14005 case FLASH_5717VENDOR_ATMEL_ADB021D:
14006 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14009 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14013 case FLASH_5717VENDOR_ST_M_M25PE10:
14014 case FLASH_5717VENDOR_ST_A_M25PE10:
14015 case FLASH_5717VENDOR_ST_M_M45PE10:
14016 case FLASH_5717VENDOR_ST_A_M45PE10:
14017 case FLASH_5717VENDOR_ST_M_M25PE20:
14018 case FLASH_5717VENDOR_ST_A_M25PE20:
14019 case FLASH_5717VENDOR_ST_M_M45PE20:
14020 case FLASH_5717VENDOR_ST_A_M45PE20:
14021 case FLASH_5717VENDOR_ST_25USPT:
14022 case FLASH_5717VENDOR_ST_45USPT:
14023 tp->nvram_jedecnum = JEDEC_ST;
14024 tg3_flag_set(tp, NVRAM_BUFFERED);
14025 tg3_flag_set(tp, FLASH);
14027 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14028 case FLASH_5717VENDOR_ST_M_M25PE20:
14029 case FLASH_5717VENDOR_ST_M_M45PE20:
14030 /* Detect size with tg3_nvram_get_size() */
14032 case FLASH_5717VENDOR_ST_A_M25PE20:
14033 case FLASH_5717VENDOR_ST_A_M45PE20:
14034 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14037 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14042 tg3_flag_set(tp, NO_NVRAM);
14046 tg3_nvram_get_pagesize(tp, nvcfg1);
14047 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14048 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14051 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14053 u32 nvcfg1, nvmpinstrp;
14055 nvcfg1 = tr32(NVRAM_CFG1);
14056 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14058 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14059 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14060 tg3_flag_set(tp, NO_NVRAM);
14064 switch (nvmpinstrp) {
14065 case FLASH_5762_EEPROM_HD:
14066 nvmpinstrp = FLASH_5720_EEPROM_HD;
14068 case FLASH_5762_EEPROM_LD:
14069 nvmpinstrp = FLASH_5720_EEPROM_LD;
14071 case FLASH_5720VENDOR_M_ST_M45PE20:
14072 /* This pinstrap supports multiple sizes, so force it
14073 * to read the actual size from location 0xf0.
14075 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14080 switch (nvmpinstrp) {
14081 case FLASH_5720_EEPROM_HD:
14082 case FLASH_5720_EEPROM_LD:
14083 tp->nvram_jedecnum = JEDEC_ATMEL;
14084 tg3_flag_set(tp, NVRAM_BUFFERED);
14086 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14087 tw32(NVRAM_CFG1, nvcfg1);
14088 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14089 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14091 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14093 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14094 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14095 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14096 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14097 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14098 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14099 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14100 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14101 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14102 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14103 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14104 case FLASH_5720VENDOR_ATMEL_45USPT:
14105 tp->nvram_jedecnum = JEDEC_ATMEL;
14106 tg3_flag_set(tp, NVRAM_BUFFERED);
14107 tg3_flag_set(tp, FLASH);
14109 switch (nvmpinstrp) {
14110 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14111 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14112 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14113 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14115 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14116 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14117 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14118 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14120 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14121 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14122 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14125 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14126 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14130 case FLASH_5720VENDOR_M_ST_M25PE10:
14131 case FLASH_5720VENDOR_M_ST_M45PE10:
14132 case FLASH_5720VENDOR_A_ST_M25PE10:
14133 case FLASH_5720VENDOR_A_ST_M45PE10:
14134 case FLASH_5720VENDOR_M_ST_M25PE20:
14135 case FLASH_5720VENDOR_M_ST_M45PE20:
14136 case FLASH_5720VENDOR_A_ST_M25PE20:
14137 case FLASH_5720VENDOR_A_ST_M45PE20:
14138 case FLASH_5720VENDOR_M_ST_M25PE40:
14139 case FLASH_5720VENDOR_M_ST_M45PE40:
14140 case FLASH_5720VENDOR_A_ST_M25PE40:
14141 case FLASH_5720VENDOR_A_ST_M45PE40:
14142 case FLASH_5720VENDOR_M_ST_M25PE80:
14143 case FLASH_5720VENDOR_M_ST_M45PE80:
14144 case FLASH_5720VENDOR_A_ST_M25PE80:
14145 case FLASH_5720VENDOR_A_ST_M45PE80:
14146 case FLASH_5720VENDOR_ST_25USPT:
14147 case FLASH_5720VENDOR_ST_45USPT:
14148 tp->nvram_jedecnum = JEDEC_ST;
14149 tg3_flag_set(tp, NVRAM_BUFFERED);
14150 tg3_flag_set(tp, FLASH);
14152 switch (nvmpinstrp) {
14153 case FLASH_5720VENDOR_M_ST_M25PE20:
14154 case FLASH_5720VENDOR_M_ST_M45PE20:
14155 case FLASH_5720VENDOR_A_ST_M25PE20:
14156 case FLASH_5720VENDOR_A_ST_M45PE20:
14157 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14159 case FLASH_5720VENDOR_M_ST_M25PE40:
14160 case FLASH_5720VENDOR_M_ST_M45PE40:
14161 case FLASH_5720VENDOR_A_ST_M25PE40:
14162 case FLASH_5720VENDOR_A_ST_M45PE40:
14163 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14165 case FLASH_5720VENDOR_M_ST_M25PE80:
14166 case FLASH_5720VENDOR_M_ST_M45PE80:
14167 case FLASH_5720VENDOR_A_ST_M25PE80:
14168 case FLASH_5720VENDOR_A_ST_M45PE80:
14169 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14172 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14173 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14178 tg3_flag_set(tp, NO_NVRAM);
14182 tg3_nvram_get_pagesize(tp, nvcfg1);
14183 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14184 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14186 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14189 if (tg3_nvram_read(tp, 0, &val))
14192 if (val != TG3_EEPROM_MAGIC &&
14193 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14194 tg3_flag_set(tp, NO_NVRAM);
14198 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14199 static void tg3_nvram_init(struct tg3 *tp)
14201 if (tg3_flag(tp, IS_SSB_CORE)) {
14202 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14203 tg3_flag_clear(tp, NVRAM);
14204 tg3_flag_clear(tp, NVRAM_BUFFERED);
14205 tg3_flag_set(tp, NO_NVRAM);
14209 tw32_f(GRC_EEPROM_ADDR,
14210 (EEPROM_ADDR_FSM_RESET |
14211 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14212 EEPROM_ADDR_CLKPERD_SHIFT)));
14216 /* Enable seeprom accesses. */
14217 tw32_f(GRC_LOCAL_CTRL,
14218 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14221 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14222 tg3_asic_rev(tp) != ASIC_REV_5701) {
14223 tg3_flag_set(tp, NVRAM);
14225 if (tg3_nvram_lock(tp)) {
14226 netdev_warn(tp->dev,
14227 "Cannot get nvram lock, %s failed\n",
14231 tg3_enable_nvram_access(tp);
14233 tp->nvram_size = 0;
14235 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14236 tg3_get_5752_nvram_info(tp);
14237 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14238 tg3_get_5755_nvram_info(tp);
14239 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14240 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14241 tg3_asic_rev(tp) == ASIC_REV_5785)
14242 tg3_get_5787_nvram_info(tp);
14243 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14244 tg3_get_5761_nvram_info(tp);
14245 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14246 tg3_get_5906_nvram_info(tp);
14247 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14248 tg3_flag(tp, 57765_CLASS))
14249 tg3_get_57780_nvram_info(tp);
14250 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14251 tg3_asic_rev(tp) == ASIC_REV_5719)
14252 tg3_get_5717_nvram_info(tp);
14253 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14254 tg3_asic_rev(tp) == ASIC_REV_5762)
14255 tg3_get_5720_nvram_info(tp);
14257 tg3_get_nvram_info(tp);
14259 if (tp->nvram_size == 0)
14260 tg3_get_nvram_size(tp);
14262 tg3_disable_nvram_access(tp);
14263 tg3_nvram_unlock(tp);
14266 tg3_flag_clear(tp, NVRAM);
14267 tg3_flag_clear(tp, NVRAM_BUFFERED);
14269 tg3_get_eeprom_size(tp);
14273 struct subsys_tbl_ent {
14274 u16 subsys_vendor, subsys_devid;
14278 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14279 /* Broadcom boards. */
14280 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14281 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14282 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14283 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14284 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14285 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14286 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14287 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14288 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14289 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14290 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14291 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14292 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14293 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14294 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14295 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14296 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14297 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14298 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14299 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14300 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14301 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14304 { TG3PCI_SUBVENDOR_ID_3COM,
14305 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14306 { TG3PCI_SUBVENDOR_ID_3COM,
14307 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14308 { TG3PCI_SUBVENDOR_ID_3COM,
14309 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14310 { TG3PCI_SUBVENDOR_ID_3COM,
14311 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14312 { TG3PCI_SUBVENDOR_ID_3COM,
14313 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14316 { TG3PCI_SUBVENDOR_ID_DELL,
14317 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14318 { TG3PCI_SUBVENDOR_ID_DELL,
14319 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14320 { TG3PCI_SUBVENDOR_ID_DELL,
14321 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14322 { TG3PCI_SUBVENDOR_ID_DELL,
14323 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14325 /* Compaq boards. */
14326 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14327 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14328 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14329 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14330 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14331 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14332 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14333 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14334 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14335 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14338 { TG3PCI_SUBVENDOR_ID_IBM,
14339 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14342 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14346 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14347 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14348 tp->pdev->subsystem_vendor) &&
14349 (subsys_id_to_phy_id[i].subsys_devid ==
14350 tp->pdev->subsystem_device))
14351 return &subsys_id_to_phy_id[i];
14356 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14360 tp->phy_id = TG3_PHY_ID_INVALID;
14361 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14363 /* Assume an onboard device and WOL capable by default. */
14364 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14365 tg3_flag_set(tp, WOL_CAP);
14367 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14368 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14369 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14370 tg3_flag_set(tp, IS_NIC);
14372 val = tr32(VCPU_CFGSHDW);
14373 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14374 tg3_flag_set(tp, ASPM_WORKAROUND);
14375 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14376 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14377 tg3_flag_set(tp, WOL_ENABLE);
14378 device_set_wakeup_enable(&tp->pdev->dev, true);
14383 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14384 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14385 u32 nic_cfg, led_cfg;
14386 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14387 int eeprom_phy_serdes = 0;
14389 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14390 tp->nic_sram_data_cfg = nic_cfg;
14392 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14393 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14394 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14395 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14396 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14397 (ver > 0) && (ver < 0x100))
14398 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14400 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14401 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14403 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14404 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14405 eeprom_phy_serdes = 1;
14407 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14408 if (nic_phy_id != 0) {
14409 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14410 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14412 eeprom_phy_id = (id1 >> 16) << 10;
14413 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14414 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14418 tp->phy_id = eeprom_phy_id;
14419 if (eeprom_phy_serdes) {
14420 if (!tg3_flag(tp, 5705_PLUS))
14421 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14423 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14426 if (tg3_flag(tp, 5750_PLUS))
14427 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14428 SHASTA_EXT_LED_MODE_MASK);
14430 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14434 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14435 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14438 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14439 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14442 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14443 tp->led_ctrl = LED_CTRL_MODE_MAC;
14445 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14446 * read on some older 5700/5701 bootcode.
14448 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14449 tg3_asic_rev(tp) == ASIC_REV_5701)
14450 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14454 case SHASTA_EXT_LED_SHARED:
14455 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14456 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14457 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14458 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14459 LED_CTRL_MODE_PHY_2);
14462 case SHASTA_EXT_LED_MAC:
14463 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14466 case SHASTA_EXT_LED_COMBO:
14467 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14468 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14469 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14470 LED_CTRL_MODE_PHY_2);
14475 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14476 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14477 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14478 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14480 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14481 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14483 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14484 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14485 if ((tp->pdev->subsystem_vendor ==
14486 PCI_VENDOR_ID_ARIMA) &&
14487 (tp->pdev->subsystem_device == 0x205a ||
14488 tp->pdev->subsystem_device == 0x2063))
14489 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14491 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14492 tg3_flag_set(tp, IS_NIC);
14495 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14496 tg3_flag_set(tp, ENABLE_ASF);
14497 if (tg3_flag(tp, 5750_PLUS))
14498 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14501 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14502 tg3_flag(tp, 5750_PLUS))
14503 tg3_flag_set(tp, ENABLE_APE);
14505 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14506 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14507 tg3_flag_clear(tp, WOL_CAP);
14509 if (tg3_flag(tp, WOL_CAP) &&
14510 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14511 tg3_flag_set(tp, WOL_ENABLE);
14512 device_set_wakeup_enable(&tp->pdev->dev, true);
14515 if (cfg2 & (1 << 17))
14516 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14518 /* serdes signal pre-emphasis in register 0x590 set by */
14519 /* bootcode if bit 18 is set */
14520 if (cfg2 & (1 << 18))
14521 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14523 if ((tg3_flag(tp, 57765_PLUS) ||
14524 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14525 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14526 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14527 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14529 if (tg3_flag(tp, PCI_EXPRESS)) {
14532 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14533 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14534 !tg3_flag(tp, 57765_PLUS) &&
14535 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14536 tg3_flag_set(tp, ASPM_WORKAROUND);
14537 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14538 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14539 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14540 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14543 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14544 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14545 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14546 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14547 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14548 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14551 if (tg3_flag(tp, WOL_CAP))
14552 device_set_wakeup_enable(&tp->pdev->dev,
14553 tg3_flag(tp, WOL_ENABLE));
14555 device_set_wakeup_capable(&tp->pdev->dev, false);
14558 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14561 u32 val2, off = offset * 8;
14563 err = tg3_nvram_lock(tp);
14567 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14568 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14569 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14570 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14573 for (i = 0; i < 100; i++) {
14574 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14575 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14576 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14582 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14584 tg3_nvram_unlock(tp);
14585 if (val2 & APE_OTP_STATUS_CMD_DONE)
14591 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14596 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14597 tw32(OTP_CTRL, cmd);
14599 /* Wait for up to 1 ms for command to execute. */
14600 for (i = 0; i < 100; i++) {
14601 val = tr32(OTP_STATUS);
14602 if (val & OTP_STATUS_CMD_DONE)
14607 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14610 /* Read the gphy configuration from the OTP region of the chip. The gphy
14611 * configuration is a 32-bit value that straddles the alignment boundary.
14612 * We do two 32-bit reads and then shift and merge the results.
14614 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14616 u32 bhalf_otp, thalf_otp;
14618 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14620 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14623 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14625 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14628 thalf_otp = tr32(OTP_READ_DATA);
14630 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14632 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14635 bhalf_otp = tr32(OTP_READ_DATA);
14637 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14640 static void tg3_phy_init_link_config(struct tg3 *tp)
14642 u32 adv = ADVERTISED_Autoneg;
14644 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14645 adv |= ADVERTISED_1000baseT_Half |
14646 ADVERTISED_1000baseT_Full;
14648 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14649 adv |= ADVERTISED_100baseT_Half |
14650 ADVERTISED_100baseT_Full |
14651 ADVERTISED_10baseT_Half |
14652 ADVERTISED_10baseT_Full |
14655 adv |= ADVERTISED_FIBRE;
14657 tp->link_config.advertising = adv;
14658 tp->link_config.speed = SPEED_UNKNOWN;
14659 tp->link_config.duplex = DUPLEX_UNKNOWN;
14660 tp->link_config.autoneg = AUTONEG_ENABLE;
14661 tp->link_config.active_speed = SPEED_UNKNOWN;
14662 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14667 static int tg3_phy_probe(struct tg3 *tp)
14669 u32 hw_phy_id_1, hw_phy_id_2;
14670 u32 hw_phy_id, hw_phy_id_masked;
14673 /* flow control autonegotiation is default behavior */
14674 tg3_flag_set(tp, PAUSE_AUTONEG);
14675 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14677 if (tg3_flag(tp, ENABLE_APE)) {
14678 switch (tp->pci_fn) {
14680 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14683 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14686 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14689 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14694 if (!tg3_flag(tp, ENABLE_ASF) &&
14695 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14696 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14697 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14698 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14700 if (tg3_flag(tp, USE_PHYLIB))
14701 return tg3_phy_init(tp);
14703 /* Reading the PHY ID register can conflict with ASF
14704 * firmware access to the PHY hardware.
14707 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14708 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14710 /* Now read the physical PHY_ID from the chip and verify
14711 * that it is sane. If it doesn't look good, we fall back
14712 * to either the hard-coded table based PHY_ID and failing
14713 * that the value found in the eeprom area.
14715 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14716 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14718 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14719 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14720 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14722 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14725 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14726 tp->phy_id = hw_phy_id;
14727 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14728 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14730 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14732 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14733 /* Do nothing, phy ID already set up in
14734 * tg3_get_eeprom_hw_cfg().
14737 struct subsys_tbl_ent *p;
14739 /* No eeprom signature? Try the hardcoded
14740 * subsys device table.
14742 p = tg3_lookup_by_subsys(tp);
14744 tp->phy_id = p->phy_id;
14745 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14746 /* For now we saw the IDs 0xbc050cd0,
14747 * 0xbc050f80 and 0xbc050c30 on devices
14748 * connected to an BCM4785 and there are
14749 * probably more. Just assume that the phy is
14750 * supported when it is connected to a SSB core
14757 tp->phy_id == TG3_PHY_ID_BCM8002)
14758 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14762 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14763 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14764 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14765 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14766 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14767 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14768 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14769 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14770 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14771 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14773 tg3_phy_init_link_config(tp);
14775 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14776 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14777 !tg3_flag(tp, ENABLE_APE) &&
14778 !tg3_flag(tp, ENABLE_ASF)) {
14781 tg3_readphy(tp, MII_BMSR, &bmsr);
14782 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14783 (bmsr & BMSR_LSTATUS))
14784 goto skip_phy_reset;
14786 err = tg3_phy_reset(tp);
14790 tg3_phy_set_wirespeed(tp);
14792 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14793 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14794 tp->link_config.flowctrl);
14796 tg3_writephy(tp, MII_BMCR,
14797 BMCR_ANENABLE | BMCR_ANRESTART);
14802 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14803 err = tg3_init_5401phy_dsp(tp);
14807 err = tg3_init_5401phy_dsp(tp);
14813 static void tg3_read_vpd(struct tg3 *tp)
14816 unsigned int block_end, rosize, len;
14820 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14824 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14826 goto out_not_found;
14828 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14829 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14830 i += PCI_VPD_LRDT_TAG_SIZE;
14832 if (block_end > vpdlen)
14833 goto out_not_found;
14835 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14836 PCI_VPD_RO_KEYWORD_MFR_ID);
14838 len = pci_vpd_info_field_size(&vpd_data[j]);
14840 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14841 if (j + len > block_end || len != 4 ||
14842 memcmp(&vpd_data[j], "1028", 4))
14845 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14846 PCI_VPD_RO_KEYWORD_VENDOR0);
14850 len = pci_vpd_info_field_size(&vpd_data[j]);
14852 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14853 if (j + len > block_end)
14856 if (len >= sizeof(tp->fw_ver))
14857 len = sizeof(tp->fw_ver) - 1;
14858 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14859 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14864 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14865 PCI_VPD_RO_KEYWORD_PARTNO);
14867 goto out_not_found;
14869 len = pci_vpd_info_field_size(&vpd_data[i]);
14871 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14872 if (len > TG3_BPN_SIZE ||
14873 (len + i) > vpdlen)
14874 goto out_not_found;
14876 memcpy(tp->board_part_number, &vpd_data[i], len);
14880 if (tp->board_part_number[0])
14884 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14885 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14886 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14887 strcpy(tp->board_part_number, "BCM5717");
14888 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14889 strcpy(tp->board_part_number, "BCM5718");
14892 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14893 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14894 strcpy(tp->board_part_number, "BCM57780");
14895 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14896 strcpy(tp->board_part_number, "BCM57760");
14897 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14898 strcpy(tp->board_part_number, "BCM57790");
14899 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14900 strcpy(tp->board_part_number, "BCM57788");
14903 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14904 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14905 strcpy(tp->board_part_number, "BCM57761");
14906 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14907 strcpy(tp->board_part_number, "BCM57765");
14908 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14909 strcpy(tp->board_part_number, "BCM57781");
14910 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14911 strcpy(tp->board_part_number, "BCM57785");
14912 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14913 strcpy(tp->board_part_number, "BCM57791");
14914 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14915 strcpy(tp->board_part_number, "BCM57795");
14918 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14919 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14920 strcpy(tp->board_part_number, "BCM57762");
14921 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14922 strcpy(tp->board_part_number, "BCM57766");
14923 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14924 strcpy(tp->board_part_number, "BCM57782");
14925 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14926 strcpy(tp->board_part_number, "BCM57786");
14929 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14930 strcpy(tp->board_part_number, "BCM95906");
14933 strcpy(tp->board_part_number, "none");
14937 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14941 if (tg3_nvram_read(tp, offset, &val) ||
14942 (val & 0xfc000000) != 0x0c000000 ||
14943 tg3_nvram_read(tp, offset + 4, &val) ||
14950 static void tg3_read_bc_ver(struct tg3 *tp)
14952 u32 val, offset, start, ver_offset;
14954 bool newver = false;
14956 if (tg3_nvram_read(tp, 0xc, &offset) ||
14957 tg3_nvram_read(tp, 0x4, &start))
14960 offset = tg3_nvram_logical_addr(tp, offset);
14962 if (tg3_nvram_read(tp, offset, &val))
14965 if ((val & 0xfc000000) == 0x0c000000) {
14966 if (tg3_nvram_read(tp, offset + 4, &val))
14973 dst_off = strlen(tp->fw_ver);
14976 if (TG3_VER_SIZE - dst_off < 16 ||
14977 tg3_nvram_read(tp, offset + 8, &ver_offset))
14980 offset = offset + ver_offset - start;
14981 for (i = 0; i < 16; i += 4) {
14983 if (tg3_nvram_read_be32(tp, offset + i, &v))
14986 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14991 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14994 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14995 TG3_NVM_BCVER_MAJSFT;
14996 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14997 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14998 "v%d.%02d", major, minor);
15002 static void tg3_read_hwsb_ver(struct tg3 *tp)
15004 u32 val, major, minor;
15006 /* Use native endian representation */
15007 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15010 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15011 TG3_NVM_HWSB_CFG1_MAJSFT;
15012 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15013 TG3_NVM_HWSB_CFG1_MINSFT;
15015 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15018 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15020 u32 offset, major, minor, build;
15022 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15024 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15027 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15028 case TG3_EEPROM_SB_REVISION_0:
15029 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15031 case TG3_EEPROM_SB_REVISION_2:
15032 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15034 case TG3_EEPROM_SB_REVISION_3:
15035 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15037 case TG3_EEPROM_SB_REVISION_4:
15038 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15040 case TG3_EEPROM_SB_REVISION_5:
15041 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15043 case TG3_EEPROM_SB_REVISION_6:
15044 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15050 if (tg3_nvram_read(tp, offset, &val))
15053 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15054 TG3_EEPROM_SB_EDH_BLD_SHFT;
15055 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15056 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15057 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15059 if (minor > 99 || build > 26)
15062 offset = strlen(tp->fw_ver);
15063 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15064 " v%d.%02d", major, minor);
15067 offset = strlen(tp->fw_ver);
15068 if (offset < TG3_VER_SIZE - 1)
15069 tp->fw_ver[offset] = 'a' + build - 1;
15073 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15075 u32 val, offset, start;
15078 for (offset = TG3_NVM_DIR_START;
15079 offset < TG3_NVM_DIR_END;
15080 offset += TG3_NVM_DIRENT_SIZE) {
15081 if (tg3_nvram_read(tp, offset, &val))
15084 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15088 if (offset == TG3_NVM_DIR_END)
15091 if (!tg3_flag(tp, 5705_PLUS))
15092 start = 0x08000000;
15093 else if (tg3_nvram_read(tp, offset - 4, &start))
15096 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15097 !tg3_fw_img_is_valid(tp, offset) ||
15098 tg3_nvram_read(tp, offset + 8, &val))
15101 offset += val - start;
15103 vlen = strlen(tp->fw_ver);
15105 tp->fw_ver[vlen++] = ',';
15106 tp->fw_ver[vlen++] = ' ';
15108 for (i = 0; i < 4; i++) {
15110 if (tg3_nvram_read_be32(tp, offset, &v))
15113 offset += sizeof(v);
15115 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15116 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15120 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15125 static void tg3_probe_ncsi(struct tg3 *tp)
15129 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15130 if (apedata != APE_SEG_SIG_MAGIC)
15133 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15134 if (!(apedata & APE_FW_STATUS_READY))
15137 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15138 tg3_flag_set(tp, APE_HAS_NCSI);
15141 static void tg3_read_dash_ver(struct tg3 *tp)
15147 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15149 if (tg3_flag(tp, APE_HAS_NCSI))
15151 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15156 vlen = strlen(tp->fw_ver);
15158 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15160 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15161 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15162 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15163 (apedata & APE_FW_VERSION_BLDMSK));
15166 static void tg3_read_otp_ver(struct tg3 *tp)
15170 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15173 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15174 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15175 TG3_OTP_MAGIC0_VALID(val)) {
15176 u64 val64 = (u64) val << 32 | val2;
15180 for (i = 0; i < 7; i++) {
15181 if ((val64 & 0xff) == 0)
15183 ver = val64 & 0xff;
15186 vlen = strlen(tp->fw_ver);
15187 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15191 static void tg3_read_fw_ver(struct tg3 *tp)
15194 bool vpd_vers = false;
15196 if (tp->fw_ver[0] != 0)
15199 if (tg3_flag(tp, NO_NVRAM)) {
15200 strcat(tp->fw_ver, "sb");
15201 tg3_read_otp_ver(tp);
15205 if (tg3_nvram_read(tp, 0, &val))
15208 if (val == TG3_EEPROM_MAGIC)
15209 tg3_read_bc_ver(tp);
15210 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15211 tg3_read_sb_ver(tp, val);
15212 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15213 tg3_read_hwsb_ver(tp);
15215 if (tg3_flag(tp, ENABLE_ASF)) {
15216 if (tg3_flag(tp, ENABLE_APE)) {
15217 tg3_probe_ncsi(tp);
15219 tg3_read_dash_ver(tp);
15220 } else if (!vpd_vers) {
15221 tg3_read_mgmtfw_ver(tp);
15225 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15228 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15230 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15231 return TG3_RX_RET_MAX_SIZE_5717;
15232 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15233 return TG3_RX_RET_MAX_SIZE_5700;
15235 return TG3_RX_RET_MAX_SIZE_5705;
15238 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15239 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15240 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15241 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15245 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15247 struct pci_dev *peer;
15248 unsigned int func, devnr = tp->pdev->devfn & ~7;
15250 for (func = 0; func < 8; func++) {
15251 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15252 if (peer && peer != tp->pdev)
15256 /* 5704 can be configured in single-port mode, set peer to
15257 * tp->pdev in that case.
15265 * We don't need to keep the refcount elevated; there's no way
15266 * to remove one half of this device without removing the other
15273 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15275 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15276 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15279 /* All devices that use the alternate
15280 * ASIC REV location have a CPMU.
15282 tg3_flag_set(tp, CPMU_PRESENT);
15284 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15285 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15286 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15287 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15288 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15289 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15290 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15291 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15292 reg = TG3PCI_GEN2_PRODID_ASICREV;
15293 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15294 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15295 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15296 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15297 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15298 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15299 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15300 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15301 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15302 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15303 reg = TG3PCI_GEN15_PRODID_ASICREV;
15305 reg = TG3PCI_PRODID_ASICREV;
15307 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15310 /* Wrong chip ID in 5752 A0. This code can be removed later
15311 * as A0 is not in production.
15313 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15314 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15316 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15317 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15319 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15320 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15321 tg3_asic_rev(tp) == ASIC_REV_5720)
15322 tg3_flag_set(tp, 5717_PLUS);
15324 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15325 tg3_asic_rev(tp) == ASIC_REV_57766)
15326 tg3_flag_set(tp, 57765_CLASS);
15328 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15329 tg3_asic_rev(tp) == ASIC_REV_5762)
15330 tg3_flag_set(tp, 57765_PLUS);
15332 /* Intentionally exclude ASIC_REV_5906 */
15333 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15334 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15335 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15336 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15337 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15338 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15339 tg3_flag(tp, 57765_PLUS))
15340 tg3_flag_set(tp, 5755_PLUS);
15342 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15343 tg3_asic_rev(tp) == ASIC_REV_5714)
15344 tg3_flag_set(tp, 5780_CLASS);
15346 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15347 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15348 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15349 tg3_flag(tp, 5755_PLUS) ||
15350 tg3_flag(tp, 5780_CLASS))
15351 tg3_flag_set(tp, 5750_PLUS);
15353 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15354 tg3_flag(tp, 5750_PLUS))
15355 tg3_flag_set(tp, 5705_PLUS);
15358 static bool tg3_10_100_only_device(struct tg3 *tp,
15359 const struct pci_device_id *ent)
15361 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15363 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15364 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15365 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15368 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15369 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15370 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15380 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15383 u32 pci_state_reg, grc_misc_cfg;
15388 /* Force memory write invalidate off. If we leave it on,
15389 * then on 5700_BX chips we have to enable a workaround.
15390 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15391 * to match the cacheline size. The Broadcom driver have this
15392 * workaround but turns MWI off all the times so never uses
15393 * it. This seems to suggest that the workaround is insufficient.
15395 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15396 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15397 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15399 /* Important! -- Make sure register accesses are byteswapped
15400 * correctly. Also, for those chips that require it, make
15401 * sure that indirect register accesses are enabled before
15402 * the first operation.
15404 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15406 tp->misc_host_ctrl |= (misc_ctrl_reg &
15407 MISC_HOST_CTRL_CHIPREV);
15408 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15409 tp->misc_host_ctrl);
15411 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15413 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15414 * we need to disable memory and use config. cycles
15415 * only to access all registers. The 5702/03 chips
15416 * can mistakenly decode the special cycles from the
15417 * ICH chipsets as memory write cycles, causing corruption
15418 * of register and memory space. Only certain ICH bridges
15419 * will drive special cycles with non-zero data during the
15420 * address phase which can fall within the 5703's address
15421 * range. This is not an ICH bug as the PCI spec allows
15422 * non-zero address during special cycles. However, only
15423 * these ICH bridges are known to drive non-zero addresses
15424 * during special cycles.
15426 * Since special cycles do not cross PCI bridges, we only
15427 * enable this workaround if the 5703 is on the secondary
15428 * bus of these ICH bridges.
15430 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15431 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15432 static struct tg3_dev_id {
15436 } ich_chipsets[] = {
15437 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15439 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15441 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15443 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15447 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15448 struct pci_dev *bridge = NULL;
15450 while (pci_id->vendor != 0) {
15451 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15457 if (pci_id->rev != PCI_ANY_ID) {
15458 if (bridge->revision > pci_id->rev)
15461 if (bridge->subordinate &&
15462 (bridge->subordinate->number ==
15463 tp->pdev->bus->number)) {
15464 tg3_flag_set(tp, ICH_WORKAROUND);
15465 pci_dev_put(bridge);
15471 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15472 static struct tg3_dev_id {
15475 } bridge_chipsets[] = {
15476 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15477 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15480 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15481 struct pci_dev *bridge = NULL;
15483 while (pci_id->vendor != 0) {
15484 bridge = pci_get_device(pci_id->vendor,
15491 if (bridge->subordinate &&
15492 (bridge->subordinate->number <=
15493 tp->pdev->bus->number) &&
15494 (bridge->subordinate->busn_res.end >=
15495 tp->pdev->bus->number)) {
15496 tg3_flag_set(tp, 5701_DMA_BUG);
15497 pci_dev_put(bridge);
15503 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15504 * DMA addresses > 40-bit. This bridge may have other additional
15505 * 57xx devices behind it in some 4-port NIC designs for example.
15506 * Any tg3 device found behind the bridge will also need the 40-bit
15509 if (tg3_flag(tp, 5780_CLASS)) {
15510 tg3_flag_set(tp, 40BIT_DMA_BUG);
15511 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15513 struct pci_dev *bridge = NULL;
15516 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15517 PCI_DEVICE_ID_SERVERWORKS_EPB,
15519 if (bridge && bridge->subordinate &&
15520 (bridge->subordinate->number <=
15521 tp->pdev->bus->number) &&
15522 (bridge->subordinate->busn_res.end >=
15523 tp->pdev->bus->number)) {
15524 tg3_flag_set(tp, 40BIT_DMA_BUG);
15525 pci_dev_put(bridge);
15531 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15532 tg3_asic_rev(tp) == ASIC_REV_5714)
15533 tp->pdev_peer = tg3_find_peer(tp);
15535 /* Determine TSO capabilities */
15536 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15537 ; /* Do nothing. HW bug. */
15538 else if (tg3_flag(tp, 57765_PLUS))
15539 tg3_flag_set(tp, HW_TSO_3);
15540 else if (tg3_flag(tp, 5755_PLUS) ||
15541 tg3_asic_rev(tp) == ASIC_REV_5906)
15542 tg3_flag_set(tp, HW_TSO_2);
15543 else if (tg3_flag(tp, 5750_PLUS)) {
15544 tg3_flag_set(tp, HW_TSO_1);
15545 tg3_flag_set(tp, TSO_BUG);
15546 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15547 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15548 tg3_flag_clear(tp, TSO_BUG);
15549 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15550 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15551 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15552 tg3_flag_set(tp, FW_TSO);
15553 tg3_flag_set(tp, TSO_BUG);
15554 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15555 tp->fw_needed = FIRMWARE_TG3TSO5;
15557 tp->fw_needed = FIRMWARE_TG3TSO;
15560 /* Selectively allow TSO based on operating conditions */
15561 if (tg3_flag(tp, HW_TSO_1) ||
15562 tg3_flag(tp, HW_TSO_2) ||
15563 tg3_flag(tp, HW_TSO_3) ||
15564 tg3_flag(tp, FW_TSO)) {
15565 /* For firmware TSO, assume ASF is disabled.
15566 * We'll disable TSO later if we discover ASF
15567 * is enabled in tg3_get_eeprom_hw_cfg().
15569 tg3_flag_set(tp, TSO_CAPABLE);
15571 tg3_flag_clear(tp, TSO_CAPABLE);
15572 tg3_flag_clear(tp, TSO_BUG);
15573 tp->fw_needed = NULL;
15576 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15577 tp->fw_needed = FIRMWARE_TG3;
15579 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15580 tp->fw_needed = FIRMWARE_TG357766;
15584 if (tg3_flag(tp, 5750_PLUS)) {
15585 tg3_flag_set(tp, SUPPORT_MSI);
15586 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15587 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15588 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15589 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15590 tp->pdev_peer == tp->pdev))
15591 tg3_flag_clear(tp, SUPPORT_MSI);
15593 if (tg3_flag(tp, 5755_PLUS) ||
15594 tg3_asic_rev(tp) == ASIC_REV_5906) {
15595 tg3_flag_set(tp, 1SHOT_MSI);
15598 if (tg3_flag(tp, 57765_PLUS)) {
15599 tg3_flag_set(tp, SUPPORT_MSIX);
15600 tp->irq_max = TG3_IRQ_MAX_VECS;
15606 if (tp->irq_max > 1) {
15607 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15608 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15610 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15611 tg3_asic_rev(tp) == ASIC_REV_5720)
15612 tp->txq_max = tp->irq_max - 1;
15615 if (tg3_flag(tp, 5755_PLUS) ||
15616 tg3_asic_rev(tp) == ASIC_REV_5906)
15617 tg3_flag_set(tp, SHORT_DMA_BUG);
15619 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15620 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15622 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15623 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15624 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15625 tg3_asic_rev(tp) == ASIC_REV_5762)
15626 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15628 if (tg3_flag(tp, 57765_PLUS) &&
15629 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15630 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15632 if (!tg3_flag(tp, 5705_PLUS) ||
15633 tg3_flag(tp, 5780_CLASS) ||
15634 tg3_flag(tp, USE_JUMBO_BDFLAG))
15635 tg3_flag_set(tp, JUMBO_CAPABLE);
15637 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15640 if (pci_is_pcie(tp->pdev)) {
15643 tg3_flag_set(tp, PCI_EXPRESS);
15645 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15646 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15647 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15648 tg3_flag_clear(tp, HW_TSO_2);
15649 tg3_flag_clear(tp, TSO_CAPABLE);
15651 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15652 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15653 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15654 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15655 tg3_flag_set(tp, CLKREQ_BUG);
15656 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15657 tg3_flag_set(tp, L1PLLPD_EN);
15659 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15660 /* BCM5785 devices are effectively PCIe devices, and should
15661 * follow PCIe codepaths, but do not have a PCIe capabilities
15664 tg3_flag_set(tp, PCI_EXPRESS);
15665 } else if (!tg3_flag(tp, 5705_PLUS) ||
15666 tg3_flag(tp, 5780_CLASS)) {
15667 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15668 if (!tp->pcix_cap) {
15669 dev_err(&tp->pdev->dev,
15670 "Cannot find PCI-X capability, aborting\n");
15674 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15675 tg3_flag_set(tp, PCIX_MODE);
15678 /* If we have an AMD 762 or VIA K8T800 chipset, write
15679 * reordering to the mailbox registers done by the host
15680 * controller can cause major troubles. We read back from
15681 * every mailbox register write to force the writes to be
15682 * posted to the chip in order.
15684 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15685 !tg3_flag(tp, PCI_EXPRESS))
15686 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15688 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15689 &tp->pci_cacheline_sz);
15690 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15691 &tp->pci_lat_timer);
15692 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15693 tp->pci_lat_timer < 64) {
15694 tp->pci_lat_timer = 64;
15695 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15696 tp->pci_lat_timer);
15699 /* Important! -- It is critical that the PCI-X hw workaround
15700 * situation is decided before the first MMIO register access.
15702 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15703 /* 5700 BX chips need to have their TX producer index
15704 * mailboxes written twice to workaround a bug.
15706 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15708 /* If we are in PCI-X mode, enable register write workaround.
15710 * The workaround is to use indirect register accesses
15711 * for all chip writes not to mailbox registers.
15713 if (tg3_flag(tp, PCIX_MODE)) {
15716 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15718 /* The chip can have it's power management PCI config
15719 * space registers clobbered due to this bug.
15720 * So explicitly force the chip into D0 here.
15722 pci_read_config_dword(tp->pdev,
15723 tp->pm_cap + PCI_PM_CTRL,
15725 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15726 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15727 pci_write_config_dword(tp->pdev,
15728 tp->pm_cap + PCI_PM_CTRL,
15731 /* Also, force SERR#/PERR# in PCI command. */
15732 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15733 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15734 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15738 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15739 tg3_flag_set(tp, PCI_HIGH_SPEED);
15740 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15741 tg3_flag_set(tp, PCI_32BIT);
15743 /* Chip-specific fixup from Broadcom driver */
15744 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15745 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15746 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15747 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15750 /* Default fast path register access methods */
15751 tp->read32 = tg3_read32;
15752 tp->write32 = tg3_write32;
15753 tp->read32_mbox = tg3_read32;
15754 tp->write32_mbox = tg3_write32;
15755 tp->write32_tx_mbox = tg3_write32;
15756 tp->write32_rx_mbox = tg3_write32;
15758 /* Various workaround register access methods */
15759 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15760 tp->write32 = tg3_write_indirect_reg32;
15761 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15762 (tg3_flag(tp, PCI_EXPRESS) &&
15763 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15765 * Back to back register writes can cause problems on these
15766 * chips, the workaround is to read back all reg writes
15767 * except those to mailbox regs.
15769 * See tg3_write_indirect_reg32().
15771 tp->write32 = tg3_write_flush_reg32;
15774 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15775 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15776 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15777 tp->write32_rx_mbox = tg3_write_flush_reg32;
15780 if (tg3_flag(tp, ICH_WORKAROUND)) {
15781 tp->read32 = tg3_read_indirect_reg32;
15782 tp->write32 = tg3_write_indirect_reg32;
15783 tp->read32_mbox = tg3_read_indirect_mbox;
15784 tp->write32_mbox = tg3_write_indirect_mbox;
15785 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15786 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15791 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15792 pci_cmd &= ~PCI_COMMAND_MEMORY;
15793 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15795 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15796 tp->read32_mbox = tg3_read32_mbox_5906;
15797 tp->write32_mbox = tg3_write32_mbox_5906;
15798 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15799 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15802 if (tp->write32 == tg3_write_indirect_reg32 ||
15803 (tg3_flag(tp, PCIX_MODE) &&
15804 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15805 tg3_asic_rev(tp) == ASIC_REV_5701)))
15806 tg3_flag_set(tp, SRAM_USE_CONFIG);
15808 /* The memory arbiter has to be enabled in order for SRAM accesses
15809 * to succeed. Normally on powerup the tg3 chip firmware will make
15810 * sure it is enabled, but other entities such as system netboot
15811 * code might disable it.
15813 val = tr32(MEMARB_MODE);
15814 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15816 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15817 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15818 tg3_flag(tp, 5780_CLASS)) {
15819 if (tg3_flag(tp, PCIX_MODE)) {
15820 pci_read_config_dword(tp->pdev,
15821 tp->pcix_cap + PCI_X_STATUS,
15823 tp->pci_fn = val & 0x7;
15825 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15826 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15827 tg3_asic_rev(tp) == ASIC_REV_5720) {
15828 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15829 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15830 val = tr32(TG3_CPMU_STATUS);
15832 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15833 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15835 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15836 TG3_CPMU_STATUS_FSHFT_5719;
15839 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15840 tp->write32_tx_mbox = tg3_write_flush_reg32;
15841 tp->write32_rx_mbox = tg3_write_flush_reg32;
15844 /* Get eeprom hw config before calling tg3_set_power_state().
15845 * In particular, the TG3_FLAG_IS_NIC flag must be
15846 * determined before calling tg3_set_power_state() so that
15847 * we know whether or not to switch out of Vaux power.
15848 * When the flag is set, it means that GPIO1 is used for eeprom
15849 * write protect and also implies that it is a LOM where GPIOs
15850 * are not used to switch power.
15852 tg3_get_eeprom_hw_cfg(tp);
15854 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15855 tg3_flag_clear(tp, TSO_CAPABLE);
15856 tg3_flag_clear(tp, TSO_BUG);
15857 tp->fw_needed = NULL;
15860 if (tg3_flag(tp, ENABLE_APE)) {
15861 /* Allow reads and writes to the
15862 * APE register and memory space.
15864 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15865 PCISTATE_ALLOW_APE_SHMEM_WR |
15866 PCISTATE_ALLOW_APE_PSPACE_WR;
15867 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15870 tg3_ape_lock_init(tp);
15873 /* Set up tp->grc_local_ctrl before calling
15874 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15875 * will bring 5700's external PHY out of reset.
15876 * It is also used as eeprom write protect on LOMs.
15878 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15879 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15880 tg3_flag(tp, EEPROM_WRITE_PROT))
15881 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15882 GRC_LCLCTRL_GPIO_OUTPUT1);
15883 /* Unused GPIO3 must be driven as output on 5752 because there
15884 * are no pull-up resistors on unused GPIO pins.
15886 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15887 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15889 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15890 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15891 tg3_flag(tp, 57765_CLASS))
15892 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15894 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15895 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15896 /* Turn off the debug UART. */
15897 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15898 if (tg3_flag(tp, IS_NIC))
15899 /* Keep VMain power. */
15900 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15901 GRC_LCLCTRL_GPIO_OUTPUT0;
15904 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15905 tp->grc_local_ctrl |=
15906 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15908 /* Switch out of Vaux if it is a NIC */
15909 tg3_pwrsrc_switch_to_vmain(tp);
15911 /* Derive initial jumbo mode from MTU assigned in
15912 * ether_setup() via the alloc_etherdev() call
15914 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15915 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15917 /* Determine WakeOnLan speed to use. */
15918 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15919 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15920 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15921 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15922 tg3_flag_clear(tp, WOL_SPEED_100MB);
15924 tg3_flag_set(tp, WOL_SPEED_100MB);
15927 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15928 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15930 /* A few boards don't want Ethernet@WireSpeed phy feature */
15931 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15932 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15933 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15934 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15935 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15936 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15937 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15939 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15940 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15941 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15942 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15943 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15945 if (tg3_flag(tp, 5705_PLUS) &&
15946 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15947 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15948 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15949 !tg3_flag(tp, 57765_PLUS)) {
15950 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15951 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15952 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15953 tg3_asic_rev(tp) == ASIC_REV_5761) {
15954 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15955 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15956 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15957 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15958 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15960 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15963 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15964 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15965 tp->phy_otp = tg3_read_otp_phycfg(tp);
15966 if (tp->phy_otp == 0)
15967 tp->phy_otp = TG3_OTP_DEFAULT;
15970 if (tg3_flag(tp, CPMU_PRESENT))
15971 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15973 tp->mi_mode = MAC_MI_MODE_BASE;
15975 tp->coalesce_mode = 0;
15976 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15977 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15978 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15980 /* Set these bits to enable statistics workaround. */
15981 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15982 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15983 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15984 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15985 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15988 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15989 tg3_asic_rev(tp) == ASIC_REV_57780)
15990 tg3_flag_set(tp, USE_PHYLIB);
15992 err = tg3_mdio_init(tp);
15996 /* Initialize data/descriptor byte/word swapping. */
15997 val = tr32(GRC_MODE);
15998 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15999 tg3_asic_rev(tp) == ASIC_REV_5762)
16000 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16001 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16002 GRC_MODE_B2HRX_ENABLE |
16003 GRC_MODE_HTX2B_ENABLE |
16004 GRC_MODE_HOST_STACKUP);
16006 val &= GRC_MODE_HOST_STACKUP;
16008 tw32(GRC_MODE, val | tp->grc_mode);
16010 tg3_switch_clocks(tp);
16012 /* Clear this out for sanity. */
16013 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16015 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16017 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16018 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16019 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16020 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16021 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16022 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16023 void __iomem *sram_base;
16025 /* Write some dummy words into the SRAM status block
16026 * area, see if it reads back correctly. If the return
16027 * value is bad, force enable the PCIX workaround.
16029 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16031 writel(0x00000000, sram_base);
16032 writel(0x00000000, sram_base + 4);
16033 writel(0xffffffff, sram_base + 4);
16034 if (readl(sram_base) != 0x00000000)
16035 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16040 tg3_nvram_init(tp);
16042 /* If the device has an NVRAM, no need to load patch firmware */
16043 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16044 !tg3_flag(tp, NO_NVRAM))
16045 tp->fw_needed = NULL;
16047 grc_misc_cfg = tr32(GRC_MISC_CFG);
16048 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16050 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16051 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16052 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16053 tg3_flag_set(tp, IS_5788);
16055 if (!tg3_flag(tp, IS_5788) &&
16056 tg3_asic_rev(tp) != ASIC_REV_5700)
16057 tg3_flag_set(tp, TAGGED_STATUS);
16058 if (tg3_flag(tp, TAGGED_STATUS)) {
16059 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16060 HOSTCC_MODE_CLRTICK_TXBD);
16062 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16063 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16064 tp->misc_host_ctrl);
16067 /* Preserve the APE MAC_MODE bits */
16068 if (tg3_flag(tp, ENABLE_APE))
16069 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16073 if (tg3_10_100_only_device(tp, ent))
16074 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16076 err = tg3_phy_probe(tp);
16078 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16079 /* ... but do not return immediately ... */
16084 tg3_read_fw_ver(tp);
16086 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16087 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16089 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16090 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16092 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16095 /* 5700 {AX,BX} chips have a broken status block link
16096 * change bit implementation, so we must use the
16097 * status register in those cases.
16099 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16100 tg3_flag_set(tp, USE_LINKCHG_REG);
16102 tg3_flag_clear(tp, USE_LINKCHG_REG);
16104 /* The led_ctrl is set during tg3_phy_probe, here we might
16105 * have to force the link status polling mechanism based
16106 * upon subsystem IDs.
16108 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16109 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16110 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16111 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16112 tg3_flag_set(tp, USE_LINKCHG_REG);
16115 /* For all SERDES we poll the MAC status register. */
16116 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16117 tg3_flag_set(tp, POLL_SERDES);
16119 tg3_flag_clear(tp, POLL_SERDES);
16121 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16122 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16123 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16124 tg3_flag(tp, PCIX_MODE)) {
16125 tp->rx_offset = NET_SKB_PAD;
16126 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16127 tp->rx_copy_thresh = ~(u16)0;
16131 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16132 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16133 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16135 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16137 /* Increment the rx prod index on the rx std ring by at most
16138 * 8 for these chips to workaround hw errata.
16140 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16141 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16142 tg3_asic_rev(tp) == ASIC_REV_5755)
16143 tp->rx_std_max_post = 8;
16145 if (tg3_flag(tp, ASPM_WORKAROUND))
16146 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16147 PCIE_PWR_MGMT_L1_THRESH_MSK;
16152 #ifdef CONFIG_SPARC
16153 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16155 struct net_device *dev = tp->dev;
16156 struct pci_dev *pdev = tp->pdev;
16157 struct device_node *dp = pci_device_to_OF_node(pdev);
16158 const unsigned char *addr;
16161 addr = of_get_property(dp, "local-mac-address", &len);
16162 if (addr && len == 6) {
16163 memcpy(dev->dev_addr, addr, 6);
16169 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16171 struct net_device *dev = tp->dev;
16173 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16178 static int tg3_get_device_address(struct tg3 *tp)
16180 struct net_device *dev = tp->dev;
16181 u32 hi, lo, mac_offset;
16185 #ifdef CONFIG_SPARC
16186 if (!tg3_get_macaddr_sparc(tp))
16190 if (tg3_flag(tp, IS_SSB_CORE)) {
16191 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16192 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16197 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16198 tg3_flag(tp, 5780_CLASS)) {
16199 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16201 if (tg3_nvram_lock(tp))
16202 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16204 tg3_nvram_unlock(tp);
16205 } else if (tg3_flag(tp, 5717_PLUS)) {
16206 if (tp->pci_fn & 1)
16208 if (tp->pci_fn > 1)
16209 mac_offset += 0x18c;
16210 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16213 /* First try to get it from MAC address mailbox. */
16214 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16215 if ((hi >> 16) == 0x484b) {
16216 dev->dev_addr[0] = (hi >> 8) & 0xff;
16217 dev->dev_addr[1] = (hi >> 0) & 0xff;
16219 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16220 dev->dev_addr[2] = (lo >> 24) & 0xff;
16221 dev->dev_addr[3] = (lo >> 16) & 0xff;
16222 dev->dev_addr[4] = (lo >> 8) & 0xff;
16223 dev->dev_addr[5] = (lo >> 0) & 0xff;
16225 /* Some old bootcode may report a 0 MAC address in SRAM */
16226 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16229 /* Next, try NVRAM. */
16230 if (!tg3_flag(tp, NO_NVRAM) &&
16231 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16232 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16233 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16234 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16236 /* Finally just fetch it out of the MAC control regs. */
16238 hi = tr32(MAC_ADDR_0_HIGH);
16239 lo = tr32(MAC_ADDR_0_LOW);
16241 dev->dev_addr[5] = lo & 0xff;
16242 dev->dev_addr[4] = (lo >> 8) & 0xff;
16243 dev->dev_addr[3] = (lo >> 16) & 0xff;
16244 dev->dev_addr[2] = (lo >> 24) & 0xff;
16245 dev->dev_addr[1] = hi & 0xff;
16246 dev->dev_addr[0] = (hi >> 8) & 0xff;
16250 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16251 #ifdef CONFIG_SPARC
16252 if (!tg3_get_default_macaddr_sparc(tp))
16260 #define BOUNDARY_SINGLE_CACHELINE 1
16261 #define BOUNDARY_MULTI_CACHELINE 2
16263 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16265 int cacheline_size;
16269 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16271 cacheline_size = 1024;
16273 cacheline_size = (int) byte * 4;
16275 /* On 5703 and later chips, the boundary bits have no
16278 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16279 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16280 !tg3_flag(tp, PCI_EXPRESS))
16283 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16284 goal = BOUNDARY_MULTI_CACHELINE;
16286 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16287 goal = BOUNDARY_SINGLE_CACHELINE;
16293 if (tg3_flag(tp, 57765_PLUS)) {
16294 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16301 /* PCI controllers on most RISC systems tend to disconnect
16302 * when a device tries to burst across a cache-line boundary.
16303 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16305 * Unfortunately, for PCI-E there are only limited
16306 * write-side controls for this, and thus for reads
16307 * we will still get the disconnects. We'll also waste
16308 * these PCI cycles for both read and write for chips
16309 * other than 5700 and 5701 which do not implement the
16312 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16313 switch (cacheline_size) {
16318 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16319 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16320 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16322 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16323 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16328 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16329 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16333 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16334 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16337 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16338 switch (cacheline_size) {
16342 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16343 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16344 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16350 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16351 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16355 switch (cacheline_size) {
16357 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16358 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16359 DMA_RWCTRL_WRITE_BNDRY_16);
16364 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16365 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16366 DMA_RWCTRL_WRITE_BNDRY_32);
16371 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16372 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16373 DMA_RWCTRL_WRITE_BNDRY_64);
16378 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16379 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16380 DMA_RWCTRL_WRITE_BNDRY_128);
16385 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16386 DMA_RWCTRL_WRITE_BNDRY_256);
16389 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16390 DMA_RWCTRL_WRITE_BNDRY_512);
16394 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16395 DMA_RWCTRL_WRITE_BNDRY_1024);
16404 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16405 int size, int to_device)
16407 struct tg3_internal_buffer_desc test_desc;
16408 u32 sram_dma_descs;
16411 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16413 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16414 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16415 tw32(RDMAC_STATUS, 0);
16416 tw32(WDMAC_STATUS, 0);
16418 tw32(BUFMGR_MODE, 0);
16419 tw32(FTQ_RESET, 0);
16421 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16422 test_desc.addr_lo = buf_dma & 0xffffffff;
16423 test_desc.nic_mbuf = 0x00002100;
16424 test_desc.len = size;
16427 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16428 * the *second* time the tg3 driver was getting loaded after an
16431 * Broadcom tells me:
16432 * ...the DMA engine is connected to the GRC block and a DMA
16433 * reset may affect the GRC block in some unpredictable way...
16434 * The behavior of resets to individual blocks has not been tested.
16436 * Broadcom noted the GRC reset will also reset all sub-components.
16439 test_desc.cqid_sqid = (13 << 8) | 2;
16441 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16444 test_desc.cqid_sqid = (16 << 8) | 7;
16446 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16449 test_desc.flags = 0x00000005;
16451 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16454 val = *(((u32 *)&test_desc) + i);
16455 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16456 sram_dma_descs + (i * sizeof(u32)));
16457 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16459 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16462 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16464 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16467 for (i = 0; i < 40; i++) {
16471 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16473 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16474 if ((val & 0xffff) == sram_dma_descs) {
16485 #define TEST_BUFFER_SIZE 0x2000
16487 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16488 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16492 static int tg3_test_dma(struct tg3 *tp)
16494 dma_addr_t buf_dma;
16495 u32 *buf, saved_dma_rwctrl;
16498 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16499 &buf_dma, GFP_KERNEL);
16505 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16506 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16508 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16510 if (tg3_flag(tp, 57765_PLUS))
16513 if (tg3_flag(tp, PCI_EXPRESS)) {
16514 /* DMA read watermark not used on PCIE */
16515 tp->dma_rwctrl |= 0x00180000;
16516 } else if (!tg3_flag(tp, PCIX_MODE)) {
16517 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16518 tg3_asic_rev(tp) == ASIC_REV_5750)
16519 tp->dma_rwctrl |= 0x003f0000;
16521 tp->dma_rwctrl |= 0x003f000f;
16523 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16524 tg3_asic_rev(tp) == ASIC_REV_5704) {
16525 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16526 u32 read_water = 0x7;
16528 /* If the 5704 is behind the EPB bridge, we can
16529 * do the less restrictive ONE_DMA workaround for
16530 * better performance.
16532 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16533 tg3_asic_rev(tp) == ASIC_REV_5704)
16534 tp->dma_rwctrl |= 0x8000;
16535 else if (ccval == 0x6 || ccval == 0x7)
16536 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16538 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16540 /* Set bit 23 to enable PCIX hw bug fix */
16542 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16543 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16545 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16546 /* 5780 always in PCIX mode */
16547 tp->dma_rwctrl |= 0x00144000;
16548 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16549 /* 5714 always in PCIX mode */
16550 tp->dma_rwctrl |= 0x00148000;
16552 tp->dma_rwctrl |= 0x001b000f;
16555 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16556 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16558 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16559 tg3_asic_rev(tp) == ASIC_REV_5704)
16560 tp->dma_rwctrl &= 0xfffffff0;
16562 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16563 tg3_asic_rev(tp) == ASIC_REV_5701) {
16564 /* Remove this if it causes problems for some boards. */
16565 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16567 /* On 5700/5701 chips, we need to set this bit.
16568 * Otherwise the chip will issue cacheline transactions
16569 * to streamable DMA memory with not all the byte
16570 * enables turned on. This is an error on several
16571 * RISC PCI controllers, in particular sparc64.
16573 * On 5703/5704 chips, this bit has been reassigned
16574 * a different meaning. In particular, it is used
16575 * on those chips to enable a PCI-X workaround.
16577 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16580 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16583 /* Unneeded, already done by tg3_get_invariants. */
16584 tg3_switch_clocks(tp);
16587 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16588 tg3_asic_rev(tp) != ASIC_REV_5701)
16591 /* It is best to perform DMA test with maximum write burst size
16592 * to expose the 5700/5701 write DMA bug.
16594 saved_dma_rwctrl = tp->dma_rwctrl;
16595 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16596 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16601 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16604 /* Send the buffer to the chip. */
16605 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16607 dev_err(&tp->pdev->dev,
16608 "%s: Buffer write failed. err = %d\n",
16614 /* validate data reached card RAM correctly. */
16615 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16617 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16618 if (le32_to_cpu(val) != p[i]) {
16619 dev_err(&tp->pdev->dev,
16620 "%s: Buffer corrupted on device! "
16621 "(%d != %d)\n", __func__, val, i);
16622 /* ret = -ENODEV here? */
16627 /* Now read it back. */
16628 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16630 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16631 "err = %d\n", __func__, ret);
16636 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16640 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16641 DMA_RWCTRL_WRITE_BNDRY_16) {
16642 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16643 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16644 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16647 dev_err(&tp->pdev->dev,
16648 "%s: Buffer corrupted on read back! "
16649 "(%d != %d)\n", __func__, p[i], i);
16655 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16661 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16662 DMA_RWCTRL_WRITE_BNDRY_16) {
16663 /* DMA test passed without adjusting DMA boundary,
16664 * now look for chipsets that are known to expose the
16665 * DMA bug without failing the test.
16667 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16668 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16669 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16671 /* Safe to use the calculated DMA boundary. */
16672 tp->dma_rwctrl = saved_dma_rwctrl;
16675 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16679 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16684 static void tg3_init_bufmgr_config(struct tg3 *tp)
16686 if (tg3_flag(tp, 57765_PLUS)) {
16687 tp->bufmgr_config.mbuf_read_dma_low_water =
16688 DEFAULT_MB_RDMA_LOW_WATER_5705;
16689 tp->bufmgr_config.mbuf_mac_rx_low_water =
16690 DEFAULT_MB_MACRX_LOW_WATER_57765;
16691 tp->bufmgr_config.mbuf_high_water =
16692 DEFAULT_MB_HIGH_WATER_57765;
16694 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16695 DEFAULT_MB_RDMA_LOW_WATER_5705;
16696 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16697 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16698 tp->bufmgr_config.mbuf_high_water_jumbo =
16699 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16700 } else if (tg3_flag(tp, 5705_PLUS)) {
16701 tp->bufmgr_config.mbuf_read_dma_low_water =
16702 DEFAULT_MB_RDMA_LOW_WATER_5705;
16703 tp->bufmgr_config.mbuf_mac_rx_low_water =
16704 DEFAULT_MB_MACRX_LOW_WATER_5705;
16705 tp->bufmgr_config.mbuf_high_water =
16706 DEFAULT_MB_HIGH_WATER_5705;
16707 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16708 tp->bufmgr_config.mbuf_mac_rx_low_water =
16709 DEFAULT_MB_MACRX_LOW_WATER_5906;
16710 tp->bufmgr_config.mbuf_high_water =
16711 DEFAULT_MB_HIGH_WATER_5906;
16714 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16715 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16716 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16717 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16718 tp->bufmgr_config.mbuf_high_water_jumbo =
16719 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16721 tp->bufmgr_config.mbuf_read_dma_low_water =
16722 DEFAULT_MB_RDMA_LOW_WATER;
16723 tp->bufmgr_config.mbuf_mac_rx_low_water =
16724 DEFAULT_MB_MACRX_LOW_WATER;
16725 tp->bufmgr_config.mbuf_high_water =
16726 DEFAULT_MB_HIGH_WATER;
16728 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16729 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16730 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16731 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16732 tp->bufmgr_config.mbuf_high_water_jumbo =
16733 DEFAULT_MB_HIGH_WATER_JUMBO;
16736 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16737 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16740 static char *tg3_phy_string(struct tg3 *tp)
16742 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16743 case TG3_PHY_ID_BCM5400: return "5400";
16744 case TG3_PHY_ID_BCM5401: return "5401";
16745 case TG3_PHY_ID_BCM5411: return "5411";
16746 case TG3_PHY_ID_BCM5701: return "5701";
16747 case TG3_PHY_ID_BCM5703: return "5703";
16748 case TG3_PHY_ID_BCM5704: return "5704";
16749 case TG3_PHY_ID_BCM5705: return "5705";
16750 case TG3_PHY_ID_BCM5750: return "5750";
16751 case TG3_PHY_ID_BCM5752: return "5752";
16752 case TG3_PHY_ID_BCM5714: return "5714";
16753 case TG3_PHY_ID_BCM5780: return "5780";
16754 case TG3_PHY_ID_BCM5755: return "5755";
16755 case TG3_PHY_ID_BCM5787: return "5787";
16756 case TG3_PHY_ID_BCM5784: return "5784";
16757 case TG3_PHY_ID_BCM5756: return "5722/5756";
16758 case TG3_PHY_ID_BCM5906: return "5906";
16759 case TG3_PHY_ID_BCM5761: return "5761";
16760 case TG3_PHY_ID_BCM5718C: return "5718C";
16761 case TG3_PHY_ID_BCM5718S: return "5718S";
16762 case TG3_PHY_ID_BCM57765: return "57765";
16763 case TG3_PHY_ID_BCM5719C: return "5719C";
16764 case TG3_PHY_ID_BCM5720C: return "5720C";
16765 case TG3_PHY_ID_BCM5762: return "5762C";
16766 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16767 case 0: return "serdes";
16768 default: return "unknown";
16772 static char *tg3_bus_string(struct tg3 *tp, char *str)
16774 if (tg3_flag(tp, PCI_EXPRESS)) {
16775 strcpy(str, "PCI Express");
16777 } else if (tg3_flag(tp, PCIX_MODE)) {
16778 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16780 strcpy(str, "PCIX:");
16782 if ((clock_ctrl == 7) ||
16783 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16784 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16785 strcat(str, "133MHz");
16786 else if (clock_ctrl == 0)
16787 strcat(str, "33MHz");
16788 else if (clock_ctrl == 2)
16789 strcat(str, "50MHz");
16790 else if (clock_ctrl == 4)
16791 strcat(str, "66MHz");
16792 else if (clock_ctrl == 6)
16793 strcat(str, "100MHz");
16795 strcpy(str, "PCI:");
16796 if (tg3_flag(tp, PCI_HIGH_SPEED))
16797 strcat(str, "66MHz");
16799 strcat(str, "33MHz");
16801 if (tg3_flag(tp, PCI_32BIT))
16802 strcat(str, ":32-bit");
16804 strcat(str, ":64-bit");
16808 static void tg3_init_coal(struct tg3 *tp)
16810 struct ethtool_coalesce *ec = &tp->coal;
16812 memset(ec, 0, sizeof(*ec));
16813 ec->cmd = ETHTOOL_GCOALESCE;
16814 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16815 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16816 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16817 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16818 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16819 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16820 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16821 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16822 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16824 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16825 HOSTCC_MODE_CLRTICK_TXBD)) {
16826 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16827 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16828 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16829 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16832 if (tg3_flag(tp, 5705_PLUS)) {
16833 ec->rx_coalesce_usecs_irq = 0;
16834 ec->tx_coalesce_usecs_irq = 0;
16835 ec->stats_block_coalesce_usecs = 0;
16839 static int tg3_init_one(struct pci_dev *pdev,
16840 const struct pci_device_id *ent)
16842 struct net_device *dev;
16844 int i, err, pm_cap;
16845 u32 sndmbx, rcvmbx, intmbx;
16847 u64 dma_mask, persist_dma_mask;
16848 netdev_features_t features = 0;
16850 printk_once(KERN_INFO "%s\n", version);
16852 err = pci_enable_device(pdev);
16854 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16858 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16860 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16861 goto err_out_disable_pdev;
16864 pci_set_master(pdev);
16866 /* Find power-management capability. */
16867 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16869 dev_err(&pdev->dev,
16870 "Cannot find Power Management capability, aborting\n");
16872 goto err_out_free_res;
16875 err = pci_set_power_state(pdev, PCI_D0);
16877 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16878 goto err_out_free_res;
16881 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16884 goto err_out_power_down;
16887 SET_NETDEV_DEV(dev, &pdev->dev);
16889 tp = netdev_priv(dev);
16892 tp->pm_cap = pm_cap;
16893 tp->rx_mode = TG3_DEF_RX_MODE;
16894 tp->tx_mode = TG3_DEF_TX_MODE;
16898 tp->msg_enable = tg3_debug;
16900 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16902 if (pdev_is_ssb_gige_core(pdev)) {
16903 tg3_flag_set(tp, IS_SSB_CORE);
16904 if (ssb_gige_must_flush_posted_writes(pdev))
16905 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16906 if (ssb_gige_one_dma_at_once(pdev))
16907 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16908 if (ssb_gige_have_roboswitch(pdev))
16909 tg3_flag_set(tp, ROBOSWITCH);
16910 if (ssb_gige_is_rgmii(pdev))
16911 tg3_flag_set(tp, RGMII_MODE);
16914 /* The word/byte swap controls here control register access byte
16915 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16918 tp->misc_host_ctrl =
16919 MISC_HOST_CTRL_MASK_PCI_INT |
16920 MISC_HOST_CTRL_WORD_SWAP |
16921 MISC_HOST_CTRL_INDIR_ACCESS |
16922 MISC_HOST_CTRL_PCISTATE_RW;
16924 /* The NONFRM (non-frame) byte/word swap controls take effect
16925 * on descriptor entries, anything which isn't packet data.
16927 * The StrongARM chips on the board (one for tx, one for rx)
16928 * are running in big-endian mode.
16930 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16931 GRC_MODE_WSWAP_NONFRM_DATA);
16932 #ifdef __BIG_ENDIAN
16933 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16935 spin_lock_init(&tp->lock);
16936 spin_lock_init(&tp->indirect_lock);
16937 INIT_WORK(&tp->reset_task, tg3_reset_task);
16939 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16941 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16943 goto err_out_free_dev;
16946 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16947 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16948 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16949 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16950 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16951 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16952 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16953 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16954 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16955 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16956 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16957 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16958 tg3_flag_set(tp, ENABLE_APE);
16959 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16960 if (!tp->aperegs) {
16961 dev_err(&pdev->dev,
16962 "Cannot map APE registers, aborting\n");
16964 goto err_out_iounmap;
16968 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16969 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16971 dev->ethtool_ops = &tg3_ethtool_ops;
16972 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16973 dev->netdev_ops = &tg3_netdev_ops;
16974 dev->irq = pdev->irq;
16976 err = tg3_get_invariants(tp, ent);
16978 dev_err(&pdev->dev,
16979 "Problem fetching invariants of chip, aborting\n");
16980 goto err_out_apeunmap;
16983 /* The EPB bridge inside 5714, 5715, and 5780 and any
16984 * device behind the EPB cannot support DMA addresses > 40-bit.
16985 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16986 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16987 * do DMA address check in tg3_start_xmit().
16989 if (tg3_flag(tp, IS_5788))
16990 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16991 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16992 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16993 #ifdef CONFIG_HIGHMEM
16994 dma_mask = DMA_BIT_MASK(64);
16997 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16999 /* Configure DMA attributes. */
17000 if (dma_mask > DMA_BIT_MASK(32)) {
17001 err = pci_set_dma_mask(pdev, dma_mask);
17003 features |= NETIF_F_HIGHDMA;
17004 err = pci_set_consistent_dma_mask(pdev,
17007 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17008 "DMA for consistent allocations\n");
17009 goto err_out_apeunmap;
17013 if (err || dma_mask == DMA_BIT_MASK(32)) {
17014 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17016 dev_err(&pdev->dev,
17017 "No usable DMA configuration, aborting\n");
17018 goto err_out_apeunmap;
17022 tg3_init_bufmgr_config(tp);
17024 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
17026 /* 5700 B0 chips do not support checksumming correctly due
17027 * to hardware bugs.
17029 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17030 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17032 if (tg3_flag(tp, 5755_PLUS))
17033 features |= NETIF_F_IPV6_CSUM;
17036 /* TSO is on by default on chips that support hardware TSO.
17037 * Firmware TSO on older chips gives lower performance, so it
17038 * is off by default, but can be enabled using ethtool.
17040 if ((tg3_flag(tp, HW_TSO_1) ||
17041 tg3_flag(tp, HW_TSO_2) ||
17042 tg3_flag(tp, HW_TSO_3)) &&
17043 (features & NETIF_F_IP_CSUM))
17044 features |= NETIF_F_TSO;
17045 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17046 if (features & NETIF_F_IPV6_CSUM)
17047 features |= NETIF_F_TSO6;
17048 if (tg3_flag(tp, HW_TSO_3) ||
17049 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17050 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17051 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17052 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17053 tg3_asic_rev(tp) == ASIC_REV_57780)
17054 features |= NETIF_F_TSO_ECN;
17057 dev->features |= features;
17058 dev->vlan_features |= features;
17061 * Add loopback capability only for a subset of devices that support
17062 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17063 * loopback for the remaining devices.
17065 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17066 !tg3_flag(tp, CPMU_PRESENT))
17067 /* Add the loopback capability */
17068 features |= NETIF_F_LOOPBACK;
17070 dev->hw_features |= features;
17072 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17073 !tg3_flag(tp, TSO_CAPABLE) &&
17074 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17075 tg3_flag_set(tp, MAX_RXPEND_64);
17076 tp->rx_pending = 63;
17079 err = tg3_get_device_address(tp);
17081 dev_err(&pdev->dev,
17082 "Could not obtain valid ethernet address, aborting\n");
17083 goto err_out_apeunmap;
17087 * Reset chip in case UNDI or EFI driver did not shutdown
17088 * DMA self test will enable WDMAC and we'll see (spurious)
17089 * pending DMA on the PCI bus at that point.
17091 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17092 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17093 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17094 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17097 err = tg3_test_dma(tp);
17099 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17100 goto err_out_apeunmap;
17103 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17104 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17105 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17106 for (i = 0; i < tp->irq_max; i++) {
17107 struct tg3_napi *tnapi = &tp->napi[i];
17110 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17112 tnapi->int_mbox = intmbx;
17118 tnapi->consmbox = rcvmbx;
17119 tnapi->prodmbox = sndmbx;
17122 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17124 tnapi->coal_now = HOSTCC_MODE_NOW;
17126 if (!tg3_flag(tp, SUPPORT_MSIX))
17130 * If we support MSIX, we'll be using RSS. If we're using
17131 * RSS, the first vector only handles link interrupts and the
17132 * remaining vectors handle rx and tx interrupts. Reuse the
17133 * mailbox values for the next iteration. The values we setup
17134 * above are still useful for the single vectored mode.
17149 pci_set_drvdata(pdev, dev);
17151 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17152 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17153 tg3_asic_rev(tp) == ASIC_REV_5762)
17154 tg3_flag_set(tp, PTP_CAPABLE);
17156 if (tg3_flag(tp, 5717_PLUS)) {
17157 /* Resume a low-power mode */
17158 tg3_frob_aux_power(tp, false);
17161 tg3_timer_init(tp);
17163 tg3_carrier_off(tp);
17165 err = register_netdev(dev);
17167 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17168 goto err_out_apeunmap;
17171 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17172 tp->board_part_number,
17173 tg3_chip_rev_id(tp),
17174 tg3_bus_string(tp, str),
17177 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17178 struct phy_device *phydev;
17179 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17181 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17182 phydev->drv->name, dev_name(&phydev->dev));
17186 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17187 ethtype = "10/100Base-TX";
17188 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17189 ethtype = "1000Base-SX";
17191 ethtype = "10/100/1000Base-T";
17193 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17194 "(WireSpeed[%d], EEE[%d])\n",
17195 tg3_phy_string(tp), ethtype,
17196 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17197 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17200 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17201 (dev->features & NETIF_F_RXCSUM) != 0,
17202 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17203 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17204 tg3_flag(tp, ENABLE_ASF) != 0,
17205 tg3_flag(tp, TSO_CAPABLE) != 0);
17206 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17208 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17209 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17211 pci_save_state(pdev);
17217 iounmap(tp->aperegs);
17218 tp->aperegs = NULL;
17230 err_out_power_down:
17231 pci_set_power_state(pdev, PCI_D3hot);
17234 pci_release_regions(pdev);
17236 err_out_disable_pdev:
17237 pci_disable_device(pdev);
17238 pci_set_drvdata(pdev, NULL);
17242 static void tg3_remove_one(struct pci_dev *pdev)
17244 struct net_device *dev = pci_get_drvdata(pdev);
17247 struct tg3 *tp = netdev_priv(dev);
17249 release_firmware(tp->fw);
17251 tg3_reset_task_cancel(tp);
17253 if (tg3_flag(tp, USE_PHYLIB)) {
17258 unregister_netdev(dev);
17260 iounmap(tp->aperegs);
17261 tp->aperegs = NULL;
17268 pci_release_regions(pdev);
17269 pci_disable_device(pdev);
17270 pci_set_drvdata(pdev, NULL);
17274 #ifdef CONFIG_PM_SLEEP
17275 static int tg3_suspend(struct device *device)
17277 struct pci_dev *pdev = to_pci_dev(device);
17278 struct net_device *dev = pci_get_drvdata(pdev);
17279 struct tg3 *tp = netdev_priv(dev);
17282 if (!netif_running(dev))
17285 tg3_reset_task_cancel(tp);
17287 tg3_netif_stop(tp);
17289 tg3_timer_stop(tp);
17291 tg3_full_lock(tp, 1);
17292 tg3_disable_ints(tp);
17293 tg3_full_unlock(tp);
17295 netif_device_detach(dev);
17297 tg3_full_lock(tp, 0);
17298 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17299 tg3_flag_clear(tp, INIT_COMPLETE);
17300 tg3_full_unlock(tp);
17302 err = tg3_power_down_prepare(tp);
17306 tg3_full_lock(tp, 0);
17308 tg3_flag_set(tp, INIT_COMPLETE);
17309 err2 = tg3_restart_hw(tp, 1);
17313 tg3_timer_start(tp);
17315 netif_device_attach(dev);
17316 tg3_netif_start(tp);
17319 tg3_full_unlock(tp);
17328 static int tg3_resume(struct device *device)
17330 struct pci_dev *pdev = to_pci_dev(device);
17331 struct net_device *dev = pci_get_drvdata(pdev);
17332 struct tg3 *tp = netdev_priv(dev);
17335 if (!netif_running(dev))
17338 netif_device_attach(dev);
17340 tg3_full_lock(tp, 0);
17342 tg3_flag_set(tp, INIT_COMPLETE);
17343 err = tg3_restart_hw(tp,
17344 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17348 tg3_timer_start(tp);
17350 tg3_netif_start(tp);
17353 tg3_full_unlock(tp);
17361 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17362 #define TG3_PM_OPS (&tg3_pm_ops)
17366 #define TG3_PM_OPS NULL
17368 #endif /* CONFIG_PM_SLEEP */
17371 * tg3_io_error_detected - called when PCI error is detected
17372 * @pdev: Pointer to PCI device
17373 * @state: The current pci connection state
17375 * This function is called after a PCI bus error affecting
17376 * this device has been detected.
17378 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17379 pci_channel_state_t state)
17381 struct net_device *netdev = pci_get_drvdata(pdev);
17382 struct tg3 *tp = netdev_priv(netdev);
17383 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17385 netdev_info(netdev, "PCI I/O error detected\n");
17389 if (!netif_running(netdev))
17394 tg3_netif_stop(tp);
17396 tg3_timer_stop(tp);
17398 /* Want to make sure that the reset task doesn't run */
17399 tg3_reset_task_cancel(tp);
17401 netif_device_detach(netdev);
17403 /* Clean up software state, even if MMIO is blocked */
17404 tg3_full_lock(tp, 0);
17405 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17406 tg3_full_unlock(tp);
17409 if (state == pci_channel_io_perm_failure)
17410 err = PCI_ERS_RESULT_DISCONNECT;
17412 pci_disable_device(pdev);
17420 * tg3_io_slot_reset - called after the pci bus has been reset.
17421 * @pdev: Pointer to PCI device
17423 * Restart the card from scratch, as if from a cold-boot.
17424 * At this point, the card has exprienced a hard reset,
17425 * followed by fixups by BIOS, and has its config space
17426 * set up identically to what it was at cold boot.
17428 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17430 struct net_device *netdev = pci_get_drvdata(pdev);
17431 struct tg3 *tp = netdev_priv(netdev);
17432 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17437 if (pci_enable_device(pdev)) {
17438 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17442 pci_set_master(pdev);
17443 pci_restore_state(pdev);
17444 pci_save_state(pdev);
17446 if (!netif_running(netdev)) {
17447 rc = PCI_ERS_RESULT_RECOVERED;
17451 err = tg3_power_up(tp);
17455 rc = PCI_ERS_RESULT_RECOVERED;
17464 * tg3_io_resume - called when traffic can start flowing again.
17465 * @pdev: Pointer to PCI device
17467 * This callback is called when the error recovery driver tells
17468 * us that its OK to resume normal operation.
17470 static void tg3_io_resume(struct pci_dev *pdev)
17472 struct net_device *netdev = pci_get_drvdata(pdev);
17473 struct tg3 *tp = netdev_priv(netdev);
17478 if (!netif_running(netdev))
17481 tg3_full_lock(tp, 0);
17482 tg3_flag_set(tp, INIT_COMPLETE);
17483 err = tg3_restart_hw(tp, 1);
17485 tg3_full_unlock(tp);
17486 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17490 netif_device_attach(netdev);
17492 tg3_timer_start(tp);
17494 tg3_netif_start(tp);
17496 tg3_full_unlock(tp);
17504 static const struct pci_error_handlers tg3_err_handler = {
17505 .error_detected = tg3_io_error_detected,
17506 .slot_reset = tg3_io_slot_reset,
17507 .resume = tg3_io_resume
17510 static struct pci_driver tg3_driver = {
17511 .name = DRV_MODULE_NAME,
17512 .id_table = tg3_pci_tbl,
17513 .probe = tg3_init_one,
17514 .remove = tg3_remove_one,
17515 .err_handler = &tg3_err_handler,
17516 .driver.pm = TG3_PM_OPS,
17519 static int __init tg3_init(void)
17521 return pci_register_driver(&tg3_driver);
17524 static void __exit tg3_cleanup(void)
17526 pci_unregister_driver(&tg3_driver);
17529 module_init(tg3_init);
17530 module_exit(tg3_cleanup);