2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1648 if (!tg3_readphy(tp, MII_BMCR, ®))
1650 if (!tg3_readphy(tp, MII_BMSR, ®))
1651 val |= (reg & 0xffff);
1655 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1657 if (!tg3_readphy(tp, MII_LPA, ®))
1658 val |= (reg & 0xffff);
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1665 if (!tg3_readphy(tp, MII_STAT1000, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685 tg3_phy_gather_ump_data(tp, data);
1687 tg3_wait_for_event_ack(tp);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1696 tg3_generate_fw_event(tp);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1708 tg3_generate_fw_event(tp);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 static int tg3_poll_fw(struct tg3 *tp)
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834 netdev_info(tp->dev, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3 *tp)
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1856 (tp->link_config.active_speed == SPEED_100 ?
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1871 tg3_ump_link_report(tp);
1874 tp->link_up = netif_carrier_ok(tp->dev);
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1881 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882 miireg = ADVERTISE_1000XPAUSE;
1883 else if (flow_ctrl & FLOW_CTRL_TX)
1884 miireg = ADVERTISE_1000XPSE_ASYM;
1885 else if (flow_ctrl & FLOW_CTRL_RX)
1886 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1897 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900 if (lcladv & ADVERTISE_1000XPAUSE)
1902 if (rmtadv & ADVERTISE_1000XPAUSE)
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1913 u32 old_rx_mode = tp->rx_mode;
1914 u32 old_tx_mode = tp->tx_mode;
1916 if (tg3_flag(tp, USE_PHYLIB))
1917 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1919 autoneg = tp->link_config.autoneg;
1921 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1925 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1927 flowctrl = tp->link_config.flowctrl;
1929 tp->link_config.active_flowctrl = flowctrl;
1931 if (flowctrl & FLOW_CTRL_RX)
1932 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1934 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1936 if (old_rx_mode != tp->rx_mode)
1937 tw32_f(MAC_RX_MODE, tp->rx_mode);
1939 if (flowctrl & FLOW_CTRL_TX)
1940 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1942 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1944 if (old_tx_mode != tp->tx_mode)
1945 tw32_f(MAC_TX_MODE, tp->tx_mode);
1948 static void tg3_adjust_link(struct net_device *dev)
1950 u8 oldflowctrl, linkmesg = 0;
1951 u32 mac_mode, lcl_adv, rmt_adv;
1952 struct tg3 *tp = netdev_priv(dev);
1953 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1955 spin_lock_bh(&tp->lock);
1957 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958 MAC_MODE_HALF_DUPLEX);
1960 oldflowctrl = tp->link_config.active_flowctrl;
1966 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967 mac_mode |= MAC_MODE_PORT_MODE_MII;
1968 else if (phydev->speed == SPEED_1000 ||
1969 tg3_asic_rev(tp) != ASIC_REV_5785)
1970 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1972 mac_mode |= MAC_MODE_PORT_MODE_MII;
1974 if (phydev->duplex == DUPLEX_HALF)
1975 mac_mode |= MAC_MODE_HALF_DUPLEX;
1977 lcl_adv = mii_advertise_flowctrl(
1978 tp->link_config.flowctrl);
1981 rmt_adv = LPA_PAUSE_CAP;
1982 if (phydev->asym_pause)
1983 rmt_adv |= LPA_PAUSE_ASYM;
1986 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1988 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1990 if (mac_mode != tp->mac_mode) {
1991 tp->mac_mode = mac_mode;
1992 tw32_f(MAC_MODE, tp->mac_mode);
1996 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997 if (phydev->speed == SPEED_10)
1999 MAC_MI_STAT_10MBPS_MODE |
2000 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2002 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2005 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006 tw32(MAC_TX_LENGTHS,
2007 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008 (6 << TX_LENGTHS_IPG_SHIFT) |
2009 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2011 tw32(MAC_TX_LENGTHS,
2012 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013 (6 << TX_LENGTHS_IPG_SHIFT) |
2014 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2016 if (phydev->link != tp->old_link ||
2017 phydev->speed != tp->link_config.active_speed ||
2018 phydev->duplex != tp->link_config.active_duplex ||
2019 oldflowctrl != tp->link_config.active_flowctrl)
2022 tp->old_link = phydev->link;
2023 tp->link_config.active_speed = phydev->speed;
2024 tp->link_config.active_duplex = phydev->duplex;
2026 spin_unlock_bh(&tp->lock);
2029 tg3_link_report(tp);
2032 static int tg3_phy_init(struct tg3 *tp)
2034 struct phy_device *phydev;
2036 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2039 /* Bring the PHY back to a known state. */
2042 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2044 /* Attach the MAC to the PHY. */
2045 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046 tg3_adjust_link, phydev->interface);
2047 if (IS_ERR(phydev)) {
2048 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049 return PTR_ERR(phydev);
2052 /* Mask with MAC supported features. */
2053 switch (phydev->interface) {
2054 case PHY_INTERFACE_MODE_GMII:
2055 case PHY_INTERFACE_MODE_RGMII:
2056 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057 phydev->supported &= (PHY_GBIT_FEATURES |
2059 SUPPORTED_Asym_Pause);
2063 case PHY_INTERFACE_MODE_MII:
2064 phydev->supported &= (PHY_BASIC_FEATURES |
2066 SUPPORTED_Asym_Pause);
2069 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2075 phydev->advertising = phydev->supported;
2080 static void tg3_phy_start(struct tg3 *tp)
2082 struct phy_device *phydev;
2084 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2087 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2089 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091 phydev->speed = tp->link_config.speed;
2092 phydev->duplex = tp->link_config.duplex;
2093 phydev->autoneg = tp->link_config.autoneg;
2094 phydev->advertising = tp->link_config.advertising;
2099 phy_start_aneg(phydev);
2102 static void tg3_phy_stop(struct tg3 *tp)
2104 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2107 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2110 static void tg3_phy_fini(struct tg3 *tp)
2112 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2123 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2126 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127 /* Cannot do read-modify-write on 5401 */
2128 err = tg3_phy_auxctl_write(tp,
2129 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2135 err = tg3_phy_auxctl_read(tp,
2136 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2140 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141 err = tg3_phy_auxctl_write(tp,
2142 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2152 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2155 tg3_writephy(tp, MII_TG3_FET_TEST,
2156 phytest | MII_TG3_FET_SHADOW_EN);
2157 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2159 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2161 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2164 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2172 if (!tg3_flag(tp, 5705_PLUS) ||
2173 (tg3_flag(tp, 5717_PLUS) &&
2174 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2177 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178 tg3_phy_fet_toggle_apd(tp, enable);
2182 reg = MII_TG3_MISC_SHDW_WREN |
2183 MII_TG3_MISC_SHDW_SCR5_SEL |
2184 MII_TG3_MISC_SHDW_SCR5_LPED |
2185 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186 MII_TG3_MISC_SHDW_SCR5_SDTL |
2187 MII_TG3_MISC_SHDW_SCR5_C125OE;
2188 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2191 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2194 reg = MII_TG3_MISC_SHDW_WREN |
2195 MII_TG3_MISC_SHDW_APD_SEL |
2196 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2198 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2200 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2207 if (!tg3_flag(tp, 5705_PLUS) ||
2208 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2211 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2214 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2217 tg3_writephy(tp, MII_TG3_FET_TEST,
2218 ephy | MII_TG3_FET_SHADOW_EN);
2219 if (!tg3_readphy(tp, reg, &phy)) {
2221 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2223 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224 tg3_writephy(tp, reg, phy);
2226 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2231 ret = tg3_phy_auxctl_read(tp,
2232 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2235 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2237 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238 tg3_phy_auxctl_write(tp,
2239 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2249 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2252 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2254 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2267 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2270 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2274 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2278 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2282 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2285 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2288 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2292 tg3_phy_toggle_auxctl_smdsp(tp, false);
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2299 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2304 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305 current_link_up == 1 &&
2306 tp->link_config.active_duplex == DUPLEX_FULL &&
2307 (tp->link_config.active_speed == SPEED_100 ||
2308 tp->link_config.active_speed == SPEED_1000)) {
2311 if (tp->link_config.active_speed == SPEED_1000)
2312 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2314 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2316 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2318 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319 TG3_CL45_D7_EEERES_STAT, &val);
2321 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2326 if (!tp->setlpicnt) {
2327 if (current_link_up == 1 &&
2328 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330 tg3_phy_toggle_auxctl_smdsp(tp, false);
2333 val = tr32(TG3_CPMU_EEE_MODE);
2334 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2342 if (tp->link_config.active_speed == SPEED_1000 &&
2343 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345 tg3_flag(tp, 57765_CLASS)) &&
2346 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347 val = MII_TG3_DSP_TAP26_ALNOKO |
2348 MII_TG3_DSP_TAP26_RMRXSTO;
2349 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350 tg3_phy_toggle_auxctl_smdsp(tp, false);
2353 val = tr32(TG3_CPMU_EEE_MODE);
2354 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2364 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365 if ((tmp32 & 0x1000) == 0)
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2377 static const u32 test_pat[4][6] = {
2378 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2385 for (chan = 0; chan < 4; chan++) {
2388 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389 (chan * 0x2000) | 0x0200);
2390 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2392 for (i = 0; i < 6; i++)
2393 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2396 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397 if (tg3_wait_macro_done(tp)) {
2402 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403 (chan * 0x2000) | 0x0200);
2404 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405 if (tg3_wait_macro_done(tp)) {
2410 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411 if (tg3_wait_macro_done(tp)) {
2416 for (i = 0; i < 6; i += 2) {
2419 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421 tg3_wait_macro_done(tp)) {
2427 if (low != test_pat[chan][i] ||
2428 high != test_pat[chan][i+1]) {
2429 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2445 for (chan = 0; chan < 4; chan++) {
2448 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449 (chan * 0x2000) | 0x0200);
2450 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451 for (i = 0; i < 6; i++)
2452 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454 if (tg3_wait_macro_done(tp))
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2463 u32 reg32, phy9_orig;
2464 int retries, do_phy_reset, err;
2470 err = tg3_bmcr_reset(tp);
2476 /* Disable transmitter and interrupt. */
2477 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2481 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2483 /* Set full-duplex, 1000 mbps. */
2484 tg3_writephy(tp, MII_BMCR,
2485 BMCR_FULLDPLX | BMCR_SPEED1000);
2487 /* Set to master mode. */
2488 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2491 tg3_writephy(tp, MII_CTRL1000,
2492 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2494 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2498 /* Block the PHY control access. */
2499 tg3_phydsp_write(tp, 0x8005, 0x0800);
2501 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2504 } while (--retries);
2506 err = tg3_phy_reset_chanpat(tp);
2510 tg3_phydsp_write(tp, 0x8005, 0x0000);
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2515 tg3_phy_toggle_auxctl_smdsp(tp, false);
2517 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2519 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2521 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2528 static void tg3_carrier_off(struct tg3 *tp)
2530 netif_carrier_off(tp->dev);
2531 tp->link_up = false;
2534 /* This will reset the tigon3 PHY if there is no valid
2535 * link unless the FORCE argument is non-zero.
2537 static int tg3_phy_reset(struct tg3 *tp)
2542 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2543 val = tr32(GRC_MISC_CFG);
2544 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2547 err = tg3_readphy(tp, MII_BMSR, &val);
2548 err |= tg3_readphy(tp, MII_BMSR, &val);
2552 if (netif_running(tp->dev) && tp->link_up) {
2553 netif_carrier_off(tp->dev);
2554 tg3_link_report(tp);
2557 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2558 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2559 tg3_asic_rev(tp) == ASIC_REV_5705) {
2560 err = tg3_phy_reset_5703_4_5(tp);
2567 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2568 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2569 cpmuctrl = tr32(TG3_CPMU_CTRL);
2570 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2572 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2575 err = tg3_bmcr_reset(tp);
2579 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2580 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2581 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2583 tw32(TG3_CPMU_CTRL, cpmuctrl);
2586 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2587 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2588 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2589 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2590 CPMU_LSPD_1000MB_MACCLK_12_5) {
2591 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2593 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2597 if (tg3_flag(tp, 5717_PLUS) &&
2598 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2601 tg3_phy_apply_otp(tp);
2603 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2604 tg3_phy_toggle_apd(tp, true);
2606 tg3_phy_toggle_apd(tp, false);
2609 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2610 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2611 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2612 tg3_phydsp_write(tp, 0x000a, 0x0323);
2613 tg3_phy_toggle_auxctl_smdsp(tp, false);
2616 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2617 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2618 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2621 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2622 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2623 tg3_phydsp_write(tp, 0x000a, 0x310b);
2624 tg3_phydsp_write(tp, 0x201f, 0x9506);
2625 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2626 tg3_phy_toggle_auxctl_smdsp(tp, false);
2628 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2629 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2631 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2632 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2633 tg3_writephy(tp, MII_TG3_TEST1,
2634 MII_TG3_TEST1_TRIM_EN | 0x4);
2636 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2638 tg3_phy_toggle_auxctl_smdsp(tp, false);
2642 /* Set Extended packet length bit (bit 14) on all chips that */
2643 /* support jumbo frames */
2644 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2645 /* Cannot do read-modify-write on 5401 */
2646 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2647 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2648 /* Set bit 14 with read-modify-write to preserve other bits */
2649 err = tg3_phy_auxctl_read(tp,
2650 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2652 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2653 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2656 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2657 * jumbo frames transmission.
2659 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2660 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2661 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2662 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2665 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2666 /* adjust output voltage */
2667 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2670 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2671 tg3_phydsp_write(tp, 0xffb, 0x4000);
2673 tg3_phy_toggle_automdix(tp, 1);
2674 tg3_phy_set_wirespeed(tp);
2678 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2679 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2680 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2681 TG3_GPIO_MSG_NEED_VAUX)
2682 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2683 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2684 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2685 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2686 (TG3_GPIO_MSG_DRVR_PRES << 12))
2688 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2689 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2690 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2691 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2692 (TG3_GPIO_MSG_NEED_VAUX << 12))
2694 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2698 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2699 tg3_asic_rev(tp) == ASIC_REV_5719)
2700 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2702 status = tr32(TG3_CPMU_DRV_STATUS);
2704 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2705 status &= ~(TG3_GPIO_MSG_MASK << shift);
2706 status |= (newstat << shift);
2708 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2709 tg3_asic_rev(tp) == ASIC_REV_5719)
2710 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2712 tw32(TG3_CPMU_DRV_STATUS, status);
2714 return status >> TG3_APE_GPIO_MSG_SHIFT;
2717 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2719 if (!tg3_flag(tp, IS_NIC))
2722 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2723 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2724 tg3_asic_rev(tp) == ASIC_REV_5720) {
2725 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2728 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2730 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2731 TG3_GRC_LCLCTL_PWRSW_DELAY);
2733 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2735 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2736 TG3_GRC_LCLCTL_PWRSW_DELAY);
2742 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2746 if (!tg3_flag(tp, IS_NIC) ||
2747 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2748 tg3_asic_rev(tp) == ASIC_REV_5701)
2751 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2753 tw32_wait_f(GRC_LOCAL_CTRL,
2754 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2755 TG3_GRC_LCLCTL_PWRSW_DELAY);
2757 tw32_wait_f(GRC_LOCAL_CTRL,
2759 TG3_GRC_LCLCTL_PWRSW_DELAY);
2761 tw32_wait_f(GRC_LOCAL_CTRL,
2762 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2763 TG3_GRC_LCLCTL_PWRSW_DELAY);
2766 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2768 if (!tg3_flag(tp, IS_NIC))
2771 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2772 tg3_asic_rev(tp) == ASIC_REV_5701) {
2773 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2774 (GRC_LCLCTRL_GPIO_OE0 |
2775 GRC_LCLCTRL_GPIO_OE1 |
2776 GRC_LCLCTRL_GPIO_OE2 |
2777 GRC_LCLCTRL_GPIO_OUTPUT0 |
2778 GRC_LCLCTRL_GPIO_OUTPUT1),
2779 TG3_GRC_LCLCTL_PWRSW_DELAY);
2780 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2781 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2782 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2783 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2784 GRC_LCLCTRL_GPIO_OE1 |
2785 GRC_LCLCTRL_GPIO_OE2 |
2786 GRC_LCLCTRL_GPIO_OUTPUT0 |
2787 GRC_LCLCTRL_GPIO_OUTPUT1 |
2789 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2792 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2793 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2796 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2797 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2801 u32 grc_local_ctrl = 0;
2803 /* Workaround to prevent overdrawing Amps. */
2804 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2805 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2806 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2808 TG3_GRC_LCLCTL_PWRSW_DELAY);
2811 /* On 5753 and variants, GPIO2 cannot be used. */
2812 no_gpio2 = tp->nic_sram_data_cfg &
2813 NIC_SRAM_DATA_CFG_NO_GPIO2;
2815 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2816 GRC_LCLCTRL_GPIO_OE1 |
2817 GRC_LCLCTRL_GPIO_OE2 |
2818 GRC_LCLCTRL_GPIO_OUTPUT1 |
2819 GRC_LCLCTRL_GPIO_OUTPUT2;
2821 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2822 GRC_LCLCTRL_GPIO_OUTPUT2);
2824 tw32_wait_f(GRC_LOCAL_CTRL,
2825 tp->grc_local_ctrl | grc_local_ctrl,
2826 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2830 tw32_wait_f(GRC_LOCAL_CTRL,
2831 tp->grc_local_ctrl | grc_local_ctrl,
2832 TG3_GRC_LCLCTL_PWRSW_DELAY);
2835 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2836 tw32_wait_f(GRC_LOCAL_CTRL,
2837 tp->grc_local_ctrl | grc_local_ctrl,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2847 /* Serialize power state transitions */
2848 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2851 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2852 msg = TG3_GPIO_MSG_NEED_VAUX;
2854 msg = tg3_set_function_status(tp, msg);
2856 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2859 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2860 tg3_pwrsrc_switch_to_vaux(tp);
2862 tg3_pwrsrc_die_with_vmain(tp);
2865 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2868 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2870 bool need_vaux = false;
2872 /* The GPIOs do something completely different on 57765. */
2873 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2876 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2877 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2878 tg3_asic_rev(tp) == ASIC_REV_5720) {
2879 tg3_frob_aux_power_5717(tp, include_wol ?
2880 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2884 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2885 struct net_device *dev_peer;
2887 dev_peer = pci_get_drvdata(tp->pdev_peer);
2889 /* remove_one() may have been run on the peer. */
2891 struct tg3 *tp_peer = netdev_priv(dev_peer);
2893 if (tg3_flag(tp_peer, INIT_COMPLETE))
2896 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2897 tg3_flag(tp_peer, ENABLE_ASF))
2902 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2903 tg3_flag(tp, ENABLE_ASF))
2907 tg3_pwrsrc_switch_to_vaux(tp);
2909 tg3_pwrsrc_die_with_vmain(tp);
2912 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2914 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2916 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2917 if (speed != SPEED_10)
2919 } else if (speed == SPEED_10)
2925 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2929 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2930 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2931 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2932 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2935 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2936 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2937 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2942 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2944 val = tr32(GRC_MISC_CFG);
2945 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2948 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2950 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2953 tg3_writephy(tp, MII_ADVERTISE, 0);
2954 tg3_writephy(tp, MII_BMCR,
2955 BMCR_ANENABLE | BMCR_ANRESTART);
2957 tg3_writephy(tp, MII_TG3_FET_TEST,
2958 phytest | MII_TG3_FET_SHADOW_EN);
2959 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2960 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2962 MII_TG3_FET_SHDW_AUXMODE4,
2965 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2968 } else if (do_low_power) {
2969 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2970 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2972 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2973 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2974 MII_TG3_AUXCTL_PCTL_VREG_11V;
2975 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2978 /* The PHY should not be powered down on some chips because
2981 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2982 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2983 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2984 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2985 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2989 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2990 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2991 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2992 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2993 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2994 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2997 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3000 /* tp->lock is held. */
3001 static int tg3_nvram_lock(struct tg3 *tp)
3003 if (tg3_flag(tp, NVRAM)) {
3006 if (tp->nvram_lock_cnt == 0) {
3007 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3008 for (i = 0; i < 8000; i++) {
3009 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3014 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3018 tp->nvram_lock_cnt++;
3023 /* tp->lock is held. */
3024 static void tg3_nvram_unlock(struct tg3 *tp)
3026 if (tg3_flag(tp, NVRAM)) {
3027 if (tp->nvram_lock_cnt > 0)
3028 tp->nvram_lock_cnt--;
3029 if (tp->nvram_lock_cnt == 0)
3030 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3034 /* tp->lock is held. */
3035 static void tg3_enable_nvram_access(struct tg3 *tp)
3037 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3038 u32 nvaccess = tr32(NVRAM_ACCESS);
3040 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3044 /* tp->lock is held. */
3045 static void tg3_disable_nvram_access(struct tg3 *tp)
3047 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3048 u32 nvaccess = tr32(NVRAM_ACCESS);
3050 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3054 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3055 u32 offset, u32 *val)
3060 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3063 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3064 EEPROM_ADDR_DEVID_MASK |
3066 tw32(GRC_EEPROM_ADDR,
3068 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3069 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3070 EEPROM_ADDR_ADDR_MASK) |
3071 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3073 for (i = 0; i < 1000; i++) {
3074 tmp = tr32(GRC_EEPROM_ADDR);
3076 if (tmp & EEPROM_ADDR_COMPLETE)
3080 if (!(tmp & EEPROM_ADDR_COMPLETE))
3083 tmp = tr32(GRC_EEPROM_DATA);
3086 * The data will always be opposite the native endian
3087 * format. Perform a blind byteswap to compensate.
3094 #define NVRAM_CMD_TIMEOUT 10000
3096 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3100 tw32(NVRAM_CMD, nvram_cmd);
3101 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3103 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3109 if (i == NVRAM_CMD_TIMEOUT)
3115 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3117 if (tg3_flag(tp, NVRAM) &&
3118 tg3_flag(tp, NVRAM_BUFFERED) &&
3119 tg3_flag(tp, FLASH) &&
3120 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3121 (tp->nvram_jedecnum == JEDEC_ATMEL))
3123 addr = ((addr / tp->nvram_pagesize) <<
3124 ATMEL_AT45DB0X1B_PAGE_POS) +
3125 (addr % tp->nvram_pagesize);
3130 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3132 if (tg3_flag(tp, NVRAM) &&
3133 tg3_flag(tp, NVRAM_BUFFERED) &&
3134 tg3_flag(tp, FLASH) &&
3135 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3136 (tp->nvram_jedecnum == JEDEC_ATMEL))
3138 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3139 tp->nvram_pagesize) +
3140 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3145 /* NOTE: Data read in from NVRAM is byteswapped according to
3146 * the byteswapping settings for all other register accesses.
3147 * tg3 devices are BE devices, so on a BE machine, the data
3148 * returned will be exactly as it is seen in NVRAM. On a LE
3149 * machine, the 32-bit value will be byteswapped.
3151 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3155 if (!tg3_flag(tp, NVRAM))
3156 return tg3_nvram_read_using_eeprom(tp, offset, val);
3158 offset = tg3_nvram_phys_addr(tp, offset);
3160 if (offset > NVRAM_ADDR_MSK)
3163 ret = tg3_nvram_lock(tp);
3167 tg3_enable_nvram_access(tp);
3169 tw32(NVRAM_ADDR, offset);
3170 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3171 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3174 *val = tr32(NVRAM_RDDATA);
3176 tg3_disable_nvram_access(tp);
3178 tg3_nvram_unlock(tp);
3183 /* Ensures NVRAM data is in bytestream format. */
3184 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3187 int res = tg3_nvram_read(tp, offset, &v);
3189 *val = cpu_to_be32(v);
3193 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3194 u32 offset, u32 len, u8 *buf)
3199 for (i = 0; i < len; i += 4) {
3205 memcpy(&data, buf + i, 4);
3208 * The SEEPROM interface expects the data to always be opposite
3209 * the native endian format. We accomplish this by reversing
3210 * all the operations that would have been performed on the
3211 * data from a call to tg3_nvram_read_be32().
3213 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3215 val = tr32(GRC_EEPROM_ADDR);
3216 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3218 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3220 tw32(GRC_EEPROM_ADDR, val |
3221 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3222 (addr & EEPROM_ADDR_ADDR_MASK) |
3226 for (j = 0; j < 1000; j++) {
3227 val = tr32(GRC_EEPROM_ADDR);
3229 if (val & EEPROM_ADDR_COMPLETE)
3233 if (!(val & EEPROM_ADDR_COMPLETE)) {
3242 /* offset and length are dword aligned */
3243 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3247 u32 pagesize = tp->nvram_pagesize;
3248 u32 pagemask = pagesize - 1;
3252 tmp = kmalloc(pagesize, GFP_KERNEL);
3258 u32 phy_addr, page_off, size;
3260 phy_addr = offset & ~pagemask;
3262 for (j = 0; j < pagesize; j += 4) {
3263 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3264 (__be32 *) (tmp + j));
3271 page_off = offset & pagemask;
3278 memcpy(tmp + page_off, buf, size);
3280 offset = offset + (pagesize - page_off);
3282 tg3_enable_nvram_access(tp);
3285 * Before we can erase the flash page, we need
3286 * to issue a special "write enable" command.
3288 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3290 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3293 /* Erase the target page */
3294 tw32(NVRAM_ADDR, phy_addr);
3296 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3297 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3299 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3302 /* Issue another write enable to start the write. */
3303 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3305 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3308 for (j = 0; j < pagesize; j += 4) {
3311 data = *((__be32 *) (tmp + j));
3313 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3315 tw32(NVRAM_ADDR, phy_addr + j);
3317 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3321 nvram_cmd |= NVRAM_CMD_FIRST;
3322 else if (j == (pagesize - 4))
3323 nvram_cmd |= NVRAM_CMD_LAST;
3325 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3333 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3334 tg3_nvram_exec_cmd(tp, nvram_cmd);
3341 /* offset and length are dword aligned */
3342 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3347 for (i = 0; i < len; i += 4, offset += 4) {
3348 u32 page_off, phy_addr, nvram_cmd;
3351 memcpy(&data, buf + i, 4);
3352 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3354 page_off = offset % tp->nvram_pagesize;
3356 phy_addr = tg3_nvram_phys_addr(tp, offset);
3358 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3360 if (page_off == 0 || i == 0)
3361 nvram_cmd |= NVRAM_CMD_FIRST;
3362 if (page_off == (tp->nvram_pagesize - 4))
3363 nvram_cmd |= NVRAM_CMD_LAST;
3366 nvram_cmd |= NVRAM_CMD_LAST;
3368 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3369 !tg3_flag(tp, FLASH) ||
3370 !tg3_flag(tp, 57765_PLUS))
3371 tw32(NVRAM_ADDR, phy_addr);
3373 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3374 !tg3_flag(tp, 5755_PLUS) &&
3375 (tp->nvram_jedecnum == JEDEC_ST) &&
3376 (nvram_cmd & NVRAM_CMD_FIRST)) {
3379 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3380 ret = tg3_nvram_exec_cmd(tp, cmd);
3384 if (!tg3_flag(tp, FLASH)) {
3385 /* We always do complete word writes to eeprom. */
3386 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3389 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3396 /* offset and length are dword aligned */
3397 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3401 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3402 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3403 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3407 if (!tg3_flag(tp, NVRAM)) {
3408 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3412 ret = tg3_nvram_lock(tp);
3416 tg3_enable_nvram_access(tp);
3417 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3418 tw32(NVRAM_WRITE1, 0x406);
3420 grc_mode = tr32(GRC_MODE);
3421 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3423 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3424 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3427 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3431 grc_mode = tr32(GRC_MODE);
3432 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3434 tg3_disable_nvram_access(tp);
3435 tg3_nvram_unlock(tp);
3438 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3439 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3446 #define RX_CPU_SCRATCH_BASE 0x30000
3447 #define RX_CPU_SCRATCH_SIZE 0x04000
3448 #define TX_CPU_SCRATCH_BASE 0x34000
3449 #define TX_CPU_SCRATCH_SIZE 0x04000
3451 /* tp->lock is held. */
3452 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3455 const int iters = 10000;
3457 for (i = 0; i < iters; i++) {
3458 tw32(cpu_base + CPU_STATE, 0xffffffff);
3459 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3460 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3464 return (i == iters) ? -EBUSY : 0;
3467 /* tp->lock is held. */
3468 static int tg3_rxcpu_pause(struct tg3 *tp)
3470 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3472 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3473 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3479 /* tp->lock is held. */
3480 static int tg3_txcpu_pause(struct tg3 *tp)
3482 return tg3_pause_cpu(tp, TX_CPU_BASE);
3485 /* tp->lock is held. */
3486 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3488 tw32(cpu_base + CPU_STATE, 0xffffffff);
3489 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3492 /* tp->lock is held. */
3493 static void tg3_rxcpu_resume(struct tg3 *tp)
3495 tg3_resume_cpu(tp, RX_CPU_BASE);
3498 /* tp->lock is held. */
3499 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3503 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3505 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3506 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3508 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3511 if (cpu_base == RX_CPU_BASE) {
3512 rc = tg3_rxcpu_pause(tp);
3515 * There is only an Rx CPU for the 5750 derivative in the
3518 if (tg3_flag(tp, IS_SSB_CORE))
3521 rc = tg3_txcpu_pause(tp);
3525 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3526 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3530 /* Clear firmware's nvram arbitration. */
3531 if (tg3_flag(tp, NVRAM))
3532 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3536 static int tg3_fw_data_len(struct tg3 *tp,
3537 const struct tg3_firmware_hdr *fw_hdr)
3541 /* Non fragmented firmware have one firmware header followed by a
3542 * contiguous chunk of data to be written. The length field in that
3543 * header is not the length of data to be written but the complete
3544 * length of the bss. The data length is determined based on
3545 * tp->fw->size minus headers.
3547 * Fragmented firmware have a main header followed by multiple
3548 * fragments. Each fragment is identical to non fragmented firmware
3549 * with a firmware header followed by a contiguous chunk of data. In
3550 * the main header, the length field is unused and set to 0xffffffff.
3551 * In each fragment header the length is the entire size of that
3552 * fragment i.e. fragment data + header length. Data length is
3553 * therefore length field in the header minus TG3_FW_HDR_LEN.
3555 if (tp->fw_len == 0xffffffff)
3556 fw_len = be32_to_cpu(fw_hdr->len);
3558 fw_len = tp->fw->size;
3560 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3563 /* tp->lock is held. */
3564 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3565 u32 cpu_scratch_base, int cpu_scratch_size,
3566 const struct tg3_firmware_hdr *fw_hdr)
3569 void (*write_op)(struct tg3 *, u32, u32);
3570 int total_len = tp->fw->size;
3572 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3574 "%s: Trying to load TX cpu firmware which is 5705\n",
3579 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3580 write_op = tg3_write_mem;
3582 write_op = tg3_write_indirect_reg32;
3584 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3585 /* It is possible that bootcode is still loading at this point.
3586 * Get the nvram lock first before halting the cpu.
3588 int lock_err = tg3_nvram_lock(tp);
3589 err = tg3_halt_cpu(tp, cpu_base);
3591 tg3_nvram_unlock(tp);
3595 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3596 write_op(tp, cpu_scratch_base + i, 0);
3597 tw32(cpu_base + CPU_STATE, 0xffffffff);
3598 tw32(cpu_base + CPU_MODE,
3599 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3601 /* Subtract additional main header for fragmented firmware and
3602 * advance to the first fragment
3604 total_len -= TG3_FW_HDR_LEN;
3609 u32 *fw_data = (u32 *)(fw_hdr + 1);
3610 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3611 write_op(tp, cpu_scratch_base +
3612 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3614 be32_to_cpu(fw_data[i]));
3616 total_len -= be32_to_cpu(fw_hdr->len);
3618 /* Advance to next fragment */
3619 fw_hdr = (struct tg3_firmware_hdr *)
3620 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3621 } while (total_len > 0);
3629 /* tp->lock is held. */
3630 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3633 const int iters = 5;
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32_f(cpu_base + CPU_PC, pc);
3638 for (i = 0; i < iters; i++) {
3639 if (tr32(cpu_base + CPU_PC) == pc)
3641 tw32(cpu_base + CPU_STATE, 0xffffffff);
3642 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3643 tw32_f(cpu_base + CPU_PC, pc);
3647 return (i == iters) ? -EBUSY : 0;
3650 /* tp->lock is held. */
3651 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3653 const struct tg3_firmware_hdr *fw_hdr;
3656 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3658 /* Firmware blob starts with version numbers, followed by
3659 start address and length. We are setting complete length.
3660 length = end_address_of_bss - start_address_of_text.
3661 Remainder is the blob to be loaded contiguously
3662 from start address. */
3664 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3665 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3670 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3671 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3676 /* Now startup only the RX cpu. */
3677 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3678 be32_to_cpu(fw_hdr->base_addr));
3680 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3681 "should be %08x\n", __func__,
3682 tr32(RX_CPU_BASE + CPU_PC),
3683 be32_to_cpu(fw_hdr->base_addr));
3687 tg3_rxcpu_resume(tp);
3692 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3694 const int iters = 1000;
3698 /* Wait for boot code to complete initialization and enter service
3699 * loop. It is then safe to download service patches
3701 for (i = 0; i < iters; i++) {
3702 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3709 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3713 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3715 netdev_warn(tp->dev,
3716 "Other patches exist. Not downloading EEE patch\n");
3723 /* tp->lock is held. */
3724 static void tg3_load_57766_firmware(struct tg3 *tp)
3726 struct tg3_firmware_hdr *fw_hdr;
3728 if (!tg3_flag(tp, NO_NVRAM))
3731 if (tg3_validate_rxcpu_state(tp))
3737 /* This firmware blob has a different format than older firmware
3738 * releases as given below. The main difference is we have fragmented
3739 * data to be written to non-contiguous locations.
3741 * In the beginning we have a firmware header identical to other
3742 * firmware which consists of version, base addr and length. The length
3743 * here is unused and set to 0xffffffff.
3745 * This is followed by a series of firmware fragments which are
3746 * individually identical to previous firmware. i.e. they have the
3747 * firmware header and followed by data for that fragment. The version
3748 * field of the individual fragment header is unused.
3751 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3755 if (tg3_rxcpu_pause(tp))
3758 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3759 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3761 tg3_rxcpu_resume(tp);
3764 /* tp->lock is held. */
3765 static int tg3_load_tso_firmware(struct tg3 *tp)
3767 const struct tg3_firmware_hdr *fw_hdr;
3768 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3771 if (!tg3_flag(tp, FW_TSO))
3774 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3776 /* Firmware blob starts with version numbers, followed by
3777 start address and length. We are setting complete length.
3778 length = end_address_of_bss - start_address_of_text.
3779 Remainder is the blob to be loaded contiguously
3780 from start address. */
3782 cpu_scratch_size = tp->fw_len;
3784 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3785 cpu_base = RX_CPU_BASE;
3786 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3788 cpu_base = TX_CPU_BASE;
3789 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3790 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3793 err = tg3_load_firmware_cpu(tp, cpu_base,
3794 cpu_scratch_base, cpu_scratch_size,
3799 /* Now startup the cpu. */
3800 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3801 be32_to_cpu(fw_hdr->base_addr));
3804 "%s fails to set CPU PC, is %08x should be %08x\n",
3805 __func__, tr32(cpu_base + CPU_PC),
3806 be32_to_cpu(fw_hdr->base_addr));
3810 tg3_resume_cpu(tp, cpu_base);
3815 /* tp->lock is held. */
3816 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3818 u32 addr_high, addr_low;
3821 addr_high = ((tp->dev->dev_addr[0] << 8) |
3822 tp->dev->dev_addr[1]);
3823 addr_low = ((tp->dev->dev_addr[2] << 24) |
3824 (tp->dev->dev_addr[3] << 16) |
3825 (tp->dev->dev_addr[4] << 8) |
3826 (tp->dev->dev_addr[5] << 0));
3827 for (i = 0; i < 4; i++) {
3828 if (i == 1 && skip_mac_1)
3830 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3831 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3834 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3835 tg3_asic_rev(tp) == ASIC_REV_5704) {
3836 for (i = 0; i < 12; i++) {
3837 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3838 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3842 addr_high = (tp->dev->dev_addr[0] +
3843 tp->dev->dev_addr[1] +
3844 tp->dev->dev_addr[2] +
3845 tp->dev->dev_addr[3] +
3846 tp->dev->dev_addr[4] +
3847 tp->dev->dev_addr[5]) &
3848 TX_BACKOFF_SEED_MASK;
3849 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3852 static void tg3_enable_register_access(struct tg3 *tp)
3855 * Make sure register accesses (indirect or otherwise) will function
3858 pci_write_config_dword(tp->pdev,
3859 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3862 static int tg3_power_up(struct tg3 *tp)
3866 tg3_enable_register_access(tp);
3868 err = pci_set_power_state(tp->pdev, PCI_D0);
3870 /* Switch out of Vaux if it is a NIC */
3871 tg3_pwrsrc_switch_to_vmain(tp);
3873 netdev_err(tp->dev, "Transition to D0 failed\n");
3879 static int tg3_setup_phy(struct tg3 *, int);
3881 static int tg3_power_down_prepare(struct tg3 *tp)
3884 bool device_should_wake, do_low_power;
3886 tg3_enable_register_access(tp);
3888 /* Restore the CLKREQ setting. */
3889 if (tg3_flag(tp, CLKREQ_BUG))
3890 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3891 PCI_EXP_LNKCTL_CLKREQ_EN);
3893 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3894 tw32(TG3PCI_MISC_HOST_CTRL,
3895 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3897 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3898 tg3_flag(tp, WOL_ENABLE);
3900 if (tg3_flag(tp, USE_PHYLIB)) {
3901 do_low_power = false;
3902 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3903 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3904 struct phy_device *phydev;
3905 u32 phyid, advertising;
3907 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3909 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3911 tp->link_config.speed = phydev->speed;
3912 tp->link_config.duplex = phydev->duplex;
3913 tp->link_config.autoneg = phydev->autoneg;
3914 tp->link_config.advertising = phydev->advertising;
3916 advertising = ADVERTISED_TP |
3918 ADVERTISED_Autoneg |
3919 ADVERTISED_10baseT_Half;
3921 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3922 if (tg3_flag(tp, WOL_SPEED_100MB))
3924 ADVERTISED_100baseT_Half |
3925 ADVERTISED_100baseT_Full |
3926 ADVERTISED_10baseT_Full;
3928 advertising |= ADVERTISED_10baseT_Full;
3931 phydev->advertising = advertising;
3933 phy_start_aneg(phydev);
3935 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3936 if (phyid != PHY_ID_BCMAC131) {
3937 phyid &= PHY_BCM_OUI_MASK;
3938 if (phyid == PHY_BCM_OUI_1 ||
3939 phyid == PHY_BCM_OUI_2 ||
3940 phyid == PHY_BCM_OUI_3)
3941 do_low_power = true;
3945 do_low_power = true;
3947 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3948 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3950 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3951 tg3_setup_phy(tp, 0);
3954 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3957 val = tr32(GRC_VCPU_EXT_CTRL);
3958 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3959 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3963 for (i = 0; i < 200; i++) {
3964 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3965 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3970 if (tg3_flag(tp, WOL_CAP))
3971 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3972 WOL_DRV_STATE_SHUTDOWN |
3976 if (device_should_wake) {
3979 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3981 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3982 tg3_phy_auxctl_write(tp,
3983 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3984 MII_TG3_AUXCTL_PCTL_WOL_EN |
3985 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3986 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3990 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3991 mac_mode = MAC_MODE_PORT_MODE_GMII;
3993 mac_mode = MAC_MODE_PORT_MODE_MII;
3995 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3996 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3997 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3998 SPEED_100 : SPEED_10;
3999 if (tg3_5700_link_polarity(tp, speed))
4000 mac_mode |= MAC_MODE_LINK_POLARITY;
4002 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4005 mac_mode = MAC_MODE_PORT_MODE_TBI;
4008 if (!tg3_flag(tp, 5750_PLUS))
4009 tw32(MAC_LED_CTRL, tp->led_ctrl);
4011 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4012 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4013 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4014 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4016 if (tg3_flag(tp, ENABLE_APE))
4017 mac_mode |= MAC_MODE_APE_TX_EN |
4018 MAC_MODE_APE_RX_EN |
4019 MAC_MODE_TDE_ENABLE;
4021 tw32_f(MAC_MODE, mac_mode);
4024 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4028 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4029 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4030 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4033 base_val = tp->pci_clock_ctrl;
4034 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4035 CLOCK_CTRL_TXCLK_DISABLE);
4037 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4038 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4039 } else if (tg3_flag(tp, 5780_CLASS) ||
4040 tg3_flag(tp, CPMU_PRESENT) ||
4041 tg3_asic_rev(tp) == ASIC_REV_5906) {
4043 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4044 u32 newbits1, newbits2;
4046 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4047 tg3_asic_rev(tp) == ASIC_REV_5701) {
4048 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4049 CLOCK_CTRL_TXCLK_DISABLE |
4051 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4052 } else if (tg3_flag(tp, 5705_PLUS)) {
4053 newbits1 = CLOCK_CTRL_625_CORE;
4054 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4056 newbits1 = CLOCK_CTRL_ALTCLK;
4057 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4060 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4063 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4066 if (!tg3_flag(tp, 5705_PLUS)) {
4069 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4070 tg3_asic_rev(tp) == ASIC_REV_5701) {
4071 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4072 CLOCK_CTRL_TXCLK_DISABLE |
4073 CLOCK_CTRL_44MHZ_CORE);
4075 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4078 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4079 tp->pci_clock_ctrl | newbits3, 40);
4083 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4084 tg3_power_down_phy(tp, do_low_power);
4086 tg3_frob_aux_power(tp, true);
4088 /* Workaround for unstable PLL clock */
4089 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4090 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4091 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4092 u32 val = tr32(0x7d00);
4094 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4096 if (!tg3_flag(tp, ENABLE_ASF)) {
4099 err = tg3_nvram_lock(tp);
4100 tg3_halt_cpu(tp, RX_CPU_BASE);
4102 tg3_nvram_unlock(tp);
4106 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4111 static void tg3_power_down(struct tg3 *tp)
4113 tg3_power_down_prepare(tp);
4115 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4116 pci_set_power_state(tp->pdev, PCI_D3hot);
4119 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4121 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4122 case MII_TG3_AUX_STAT_10HALF:
4124 *duplex = DUPLEX_HALF;
4127 case MII_TG3_AUX_STAT_10FULL:
4129 *duplex = DUPLEX_FULL;
4132 case MII_TG3_AUX_STAT_100HALF:
4134 *duplex = DUPLEX_HALF;
4137 case MII_TG3_AUX_STAT_100FULL:
4139 *duplex = DUPLEX_FULL;
4142 case MII_TG3_AUX_STAT_1000HALF:
4143 *speed = SPEED_1000;
4144 *duplex = DUPLEX_HALF;
4147 case MII_TG3_AUX_STAT_1000FULL:
4148 *speed = SPEED_1000;
4149 *duplex = DUPLEX_FULL;
4153 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4154 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4156 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4160 *speed = SPEED_UNKNOWN;
4161 *duplex = DUPLEX_UNKNOWN;
4166 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4171 new_adv = ADVERTISE_CSMA;
4172 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4173 new_adv |= mii_advertise_flowctrl(flowctrl);
4175 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4179 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4180 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4182 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4183 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4184 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4186 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4191 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4194 tw32(TG3_CPMU_EEE_MODE,
4195 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4197 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4202 /* Advertise 100-BaseTX EEE ability */
4203 if (advertise & ADVERTISED_100baseT_Full)
4204 val |= MDIO_AN_EEE_ADV_100TX;
4205 /* Advertise 1000-BaseT EEE ability */
4206 if (advertise & ADVERTISED_1000baseT_Full)
4207 val |= MDIO_AN_EEE_ADV_1000T;
4208 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4212 switch (tg3_asic_rev(tp)) {
4214 case ASIC_REV_57765:
4215 case ASIC_REV_57766:
4217 /* If we advertised any eee advertisements above... */
4219 val = MII_TG3_DSP_TAP26_ALNOKO |
4220 MII_TG3_DSP_TAP26_RMRXSTO |
4221 MII_TG3_DSP_TAP26_OPCSINPT;
4222 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4226 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4227 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4228 MII_TG3_DSP_CH34TP2_HIBW01);
4231 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4240 static void tg3_phy_copper_begin(struct tg3 *tp)
4242 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4243 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4246 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4247 adv = ADVERTISED_10baseT_Half |
4248 ADVERTISED_10baseT_Full;
4249 if (tg3_flag(tp, WOL_SPEED_100MB))
4250 adv |= ADVERTISED_100baseT_Half |
4251 ADVERTISED_100baseT_Full;
4253 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4255 adv = tp->link_config.advertising;
4256 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4257 adv &= ~(ADVERTISED_1000baseT_Half |
4258 ADVERTISED_1000baseT_Full);
4260 fc = tp->link_config.flowctrl;
4263 tg3_phy_autoneg_cfg(tp, adv, fc);
4265 tg3_writephy(tp, MII_BMCR,
4266 BMCR_ANENABLE | BMCR_ANRESTART);
4269 u32 bmcr, orig_bmcr;
4271 tp->link_config.active_speed = tp->link_config.speed;
4272 tp->link_config.active_duplex = tp->link_config.duplex;
4274 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4275 /* With autoneg disabled, 5715 only links up when the
4276 * advertisement register has the configured speed
4279 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4283 switch (tp->link_config.speed) {
4289 bmcr |= BMCR_SPEED100;
4293 bmcr |= BMCR_SPEED1000;
4297 if (tp->link_config.duplex == DUPLEX_FULL)
4298 bmcr |= BMCR_FULLDPLX;
4300 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4301 (bmcr != orig_bmcr)) {
4302 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4303 for (i = 0; i < 1500; i++) {
4307 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4308 tg3_readphy(tp, MII_BMSR, &tmp))
4310 if (!(tmp & BMSR_LSTATUS)) {
4315 tg3_writephy(tp, MII_BMCR, bmcr);
4321 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4325 /* Turn off tap power management. */
4326 /* Set Extended packet length bit */
4327 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4329 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4330 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4331 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4332 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4333 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4340 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4342 u32 advmsk, tgtadv, advertising;
4344 advertising = tp->link_config.advertising;
4345 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4347 advmsk = ADVERTISE_ALL;
4348 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4349 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4350 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4353 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4356 if ((*lcladv & advmsk) != tgtadv)
4359 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4362 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4364 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4368 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4369 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4370 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4371 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4372 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4374 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4377 if (tg3_ctrl != tgtadv)
4384 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4388 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4391 if (tg3_readphy(tp, MII_STAT1000, &val))
4394 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4397 if (tg3_readphy(tp, MII_LPA, rmtadv))
4400 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4401 tp->link_config.rmt_adv = lpeth;
4406 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4408 if (curr_link_up != tp->link_up) {
4410 netif_carrier_on(tp->dev);
4412 netif_carrier_off(tp->dev);
4413 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4414 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4417 tg3_link_report(tp);
4424 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4426 int current_link_up;
4428 u32 lcl_adv, rmt_adv;
4436 (MAC_STATUS_SYNC_CHANGED |
4437 MAC_STATUS_CFG_CHANGED |
4438 MAC_STATUS_MI_COMPLETION |
4439 MAC_STATUS_LNKSTATE_CHANGED));
4442 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4444 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4448 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4450 /* Some third-party PHYs need to be reset on link going
4453 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4454 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4455 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4457 tg3_readphy(tp, MII_BMSR, &bmsr);
4458 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4459 !(bmsr & BMSR_LSTATUS))
4465 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4466 tg3_readphy(tp, MII_BMSR, &bmsr);
4467 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4468 !tg3_flag(tp, INIT_COMPLETE))
4471 if (!(bmsr & BMSR_LSTATUS)) {
4472 err = tg3_init_5401phy_dsp(tp);
4476 tg3_readphy(tp, MII_BMSR, &bmsr);
4477 for (i = 0; i < 1000; i++) {
4479 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4480 (bmsr & BMSR_LSTATUS)) {
4486 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4487 TG3_PHY_REV_BCM5401_B0 &&
4488 !(bmsr & BMSR_LSTATUS) &&
4489 tp->link_config.active_speed == SPEED_1000) {
4490 err = tg3_phy_reset(tp);
4492 err = tg3_init_5401phy_dsp(tp);
4497 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4498 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4499 /* 5701 {A0,B0} CRC bug workaround */
4500 tg3_writephy(tp, 0x15, 0x0a75);
4501 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4502 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4503 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4506 /* Clear pending interrupts... */
4507 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4508 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4510 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4511 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4512 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4513 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4515 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4516 tg3_asic_rev(tp) == ASIC_REV_5701) {
4517 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4518 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4519 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4521 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4524 current_link_up = 0;
4525 current_speed = SPEED_UNKNOWN;
4526 current_duplex = DUPLEX_UNKNOWN;
4527 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4528 tp->link_config.rmt_adv = 0;
4530 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4531 err = tg3_phy_auxctl_read(tp,
4532 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4534 if (!err && !(val & (1 << 10))) {
4535 tg3_phy_auxctl_write(tp,
4536 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4543 for (i = 0; i < 100; i++) {
4544 tg3_readphy(tp, MII_BMSR, &bmsr);
4545 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4546 (bmsr & BMSR_LSTATUS))
4551 if (bmsr & BMSR_LSTATUS) {
4554 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4555 for (i = 0; i < 2000; i++) {
4557 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4562 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4567 for (i = 0; i < 200; i++) {
4568 tg3_readphy(tp, MII_BMCR, &bmcr);
4569 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4571 if (bmcr && bmcr != 0x7fff)
4579 tp->link_config.active_speed = current_speed;
4580 tp->link_config.active_duplex = current_duplex;
4582 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4583 if ((bmcr & BMCR_ANENABLE) &&
4584 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4585 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4586 current_link_up = 1;
4588 if (!(bmcr & BMCR_ANENABLE) &&
4589 tp->link_config.speed == current_speed &&
4590 tp->link_config.duplex == current_duplex) {
4591 current_link_up = 1;
4595 if (current_link_up == 1 &&
4596 tp->link_config.active_duplex == DUPLEX_FULL) {
4599 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4600 reg = MII_TG3_FET_GEN_STAT;
4601 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4603 reg = MII_TG3_EXT_STAT;
4604 bit = MII_TG3_EXT_STAT_MDIX;
4607 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4608 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4610 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4615 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4616 tg3_phy_copper_begin(tp);
4618 if (tg3_flag(tp, ROBOSWITCH)) {
4619 current_link_up = 1;
4620 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4621 current_speed = SPEED_1000;
4622 current_duplex = DUPLEX_FULL;
4623 tp->link_config.active_speed = current_speed;
4624 tp->link_config.active_duplex = current_duplex;
4627 tg3_readphy(tp, MII_BMSR, &bmsr);
4628 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4629 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4630 current_link_up = 1;
4633 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4634 if (current_link_up == 1) {
4635 if (tp->link_config.active_speed == SPEED_100 ||
4636 tp->link_config.active_speed == SPEED_10)
4637 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4639 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4640 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4641 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4643 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4645 /* In order for the 5750 core in BCM4785 chip to work properly
4646 * in RGMII mode, the Led Control Register must be set up.
4648 if (tg3_flag(tp, RGMII_MODE)) {
4649 u32 led_ctrl = tr32(MAC_LED_CTRL);
4650 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4652 if (tp->link_config.active_speed == SPEED_10)
4653 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4654 else if (tp->link_config.active_speed == SPEED_100)
4655 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4656 LED_CTRL_100MBPS_ON);
4657 else if (tp->link_config.active_speed == SPEED_1000)
4658 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4659 LED_CTRL_1000MBPS_ON);
4661 tw32(MAC_LED_CTRL, led_ctrl);
4665 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4666 if (tp->link_config.active_duplex == DUPLEX_HALF)
4667 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4669 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4670 if (current_link_up == 1 &&
4671 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4672 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4674 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4677 /* ??? Without this setting Netgear GA302T PHY does not
4678 * ??? send/receive packets...
4680 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4681 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4682 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4683 tw32_f(MAC_MI_MODE, tp->mi_mode);
4687 tw32_f(MAC_MODE, tp->mac_mode);
4690 tg3_phy_eee_adjust(tp, current_link_up);
4692 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4693 /* Polled via timer. */
4694 tw32_f(MAC_EVENT, 0);
4696 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4700 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4701 current_link_up == 1 &&
4702 tp->link_config.active_speed == SPEED_1000 &&
4703 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4706 (MAC_STATUS_SYNC_CHANGED |
4707 MAC_STATUS_CFG_CHANGED));
4710 NIC_SRAM_FIRMWARE_MBOX,
4711 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4714 /* Prevent send BD corruption. */
4715 if (tg3_flag(tp, CLKREQ_BUG)) {
4716 if (tp->link_config.active_speed == SPEED_100 ||
4717 tp->link_config.active_speed == SPEED_10)
4718 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4719 PCI_EXP_LNKCTL_CLKREQ_EN);
4721 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4722 PCI_EXP_LNKCTL_CLKREQ_EN);
4725 tg3_test_and_report_link_chg(tp, current_link_up);
4730 struct tg3_fiber_aneginfo {
4732 #define ANEG_STATE_UNKNOWN 0
4733 #define ANEG_STATE_AN_ENABLE 1
4734 #define ANEG_STATE_RESTART_INIT 2
4735 #define ANEG_STATE_RESTART 3
4736 #define ANEG_STATE_DISABLE_LINK_OK 4
4737 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4738 #define ANEG_STATE_ABILITY_DETECT 6
4739 #define ANEG_STATE_ACK_DETECT_INIT 7
4740 #define ANEG_STATE_ACK_DETECT 8
4741 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4742 #define ANEG_STATE_COMPLETE_ACK 10
4743 #define ANEG_STATE_IDLE_DETECT_INIT 11
4744 #define ANEG_STATE_IDLE_DETECT 12
4745 #define ANEG_STATE_LINK_OK 13
4746 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4747 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4750 #define MR_AN_ENABLE 0x00000001
4751 #define MR_RESTART_AN 0x00000002
4752 #define MR_AN_COMPLETE 0x00000004
4753 #define MR_PAGE_RX 0x00000008
4754 #define MR_NP_LOADED 0x00000010
4755 #define MR_TOGGLE_TX 0x00000020
4756 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4757 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4758 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4759 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4760 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4761 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4762 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4763 #define MR_TOGGLE_RX 0x00002000
4764 #define MR_NP_RX 0x00004000
4766 #define MR_LINK_OK 0x80000000
4768 unsigned long link_time, cur_time;
4770 u32 ability_match_cfg;
4771 int ability_match_count;
4773 char ability_match, idle_match, ack_match;
4775 u32 txconfig, rxconfig;
4776 #define ANEG_CFG_NP 0x00000080
4777 #define ANEG_CFG_ACK 0x00000040
4778 #define ANEG_CFG_RF2 0x00000020
4779 #define ANEG_CFG_RF1 0x00000010
4780 #define ANEG_CFG_PS2 0x00000001
4781 #define ANEG_CFG_PS1 0x00008000
4782 #define ANEG_CFG_HD 0x00004000
4783 #define ANEG_CFG_FD 0x00002000
4784 #define ANEG_CFG_INVAL 0x00001f06
4789 #define ANEG_TIMER_ENAB 2
4790 #define ANEG_FAILED -1
4792 #define ANEG_STATE_SETTLE_TIME 10000
4794 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4795 struct tg3_fiber_aneginfo *ap)
4798 unsigned long delta;
4802 if (ap->state == ANEG_STATE_UNKNOWN) {
4806 ap->ability_match_cfg = 0;
4807 ap->ability_match_count = 0;
4808 ap->ability_match = 0;
4814 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4815 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4817 if (rx_cfg_reg != ap->ability_match_cfg) {
4818 ap->ability_match_cfg = rx_cfg_reg;
4819 ap->ability_match = 0;
4820 ap->ability_match_count = 0;
4822 if (++ap->ability_match_count > 1) {
4823 ap->ability_match = 1;
4824 ap->ability_match_cfg = rx_cfg_reg;
4827 if (rx_cfg_reg & ANEG_CFG_ACK)
4835 ap->ability_match_cfg = 0;
4836 ap->ability_match_count = 0;
4837 ap->ability_match = 0;
4843 ap->rxconfig = rx_cfg_reg;
4846 switch (ap->state) {
4847 case ANEG_STATE_UNKNOWN:
4848 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4849 ap->state = ANEG_STATE_AN_ENABLE;
4852 case ANEG_STATE_AN_ENABLE:
4853 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4854 if (ap->flags & MR_AN_ENABLE) {
4857 ap->ability_match_cfg = 0;
4858 ap->ability_match_count = 0;
4859 ap->ability_match = 0;
4863 ap->state = ANEG_STATE_RESTART_INIT;
4865 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4869 case ANEG_STATE_RESTART_INIT:
4870 ap->link_time = ap->cur_time;
4871 ap->flags &= ~(MR_NP_LOADED);
4873 tw32(MAC_TX_AUTO_NEG, 0);
4874 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4875 tw32_f(MAC_MODE, tp->mac_mode);
4878 ret = ANEG_TIMER_ENAB;
4879 ap->state = ANEG_STATE_RESTART;
4882 case ANEG_STATE_RESTART:
4883 delta = ap->cur_time - ap->link_time;
4884 if (delta > ANEG_STATE_SETTLE_TIME)
4885 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4887 ret = ANEG_TIMER_ENAB;
4890 case ANEG_STATE_DISABLE_LINK_OK:
4894 case ANEG_STATE_ABILITY_DETECT_INIT:
4895 ap->flags &= ~(MR_TOGGLE_TX);
4896 ap->txconfig = ANEG_CFG_FD;
4897 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4898 if (flowctrl & ADVERTISE_1000XPAUSE)
4899 ap->txconfig |= ANEG_CFG_PS1;
4900 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4901 ap->txconfig |= ANEG_CFG_PS2;
4902 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4903 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4904 tw32_f(MAC_MODE, tp->mac_mode);
4907 ap->state = ANEG_STATE_ABILITY_DETECT;
4910 case ANEG_STATE_ABILITY_DETECT:
4911 if (ap->ability_match != 0 && ap->rxconfig != 0)
4912 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4915 case ANEG_STATE_ACK_DETECT_INIT:
4916 ap->txconfig |= ANEG_CFG_ACK;
4917 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4918 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4919 tw32_f(MAC_MODE, tp->mac_mode);
4922 ap->state = ANEG_STATE_ACK_DETECT;
4925 case ANEG_STATE_ACK_DETECT:
4926 if (ap->ack_match != 0) {
4927 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4928 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4929 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4931 ap->state = ANEG_STATE_AN_ENABLE;
4933 } else if (ap->ability_match != 0 &&
4934 ap->rxconfig == 0) {
4935 ap->state = ANEG_STATE_AN_ENABLE;
4939 case ANEG_STATE_COMPLETE_ACK_INIT:
4940 if (ap->rxconfig & ANEG_CFG_INVAL) {
4944 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4945 MR_LP_ADV_HALF_DUPLEX |
4946 MR_LP_ADV_SYM_PAUSE |
4947 MR_LP_ADV_ASYM_PAUSE |
4948 MR_LP_ADV_REMOTE_FAULT1 |
4949 MR_LP_ADV_REMOTE_FAULT2 |
4950 MR_LP_ADV_NEXT_PAGE |
4953 if (ap->rxconfig & ANEG_CFG_FD)
4954 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4955 if (ap->rxconfig & ANEG_CFG_HD)
4956 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4957 if (ap->rxconfig & ANEG_CFG_PS1)
4958 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4959 if (ap->rxconfig & ANEG_CFG_PS2)
4960 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4961 if (ap->rxconfig & ANEG_CFG_RF1)
4962 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4963 if (ap->rxconfig & ANEG_CFG_RF2)
4964 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4965 if (ap->rxconfig & ANEG_CFG_NP)
4966 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4968 ap->link_time = ap->cur_time;
4970 ap->flags ^= (MR_TOGGLE_TX);
4971 if (ap->rxconfig & 0x0008)
4972 ap->flags |= MR_TOGGLE_RX;
4973 if (ap->rxconfig & ANEG_CFG_NP)
4974 ap->flags |= MR_NP_RX;
4975 ap->flags |= MR_PAGE_RX;
4977 ap->state = ANEG_STATE_COMPLETE_ACK;
4978 ret = ANEG_TIMER_ENAB;
4981 case ANEG_STATE_COMPLETE_ACK:
4982 if (ap->ability_match != 0 &&
4983 ap->rxconfig == 0) {
4984 ap->state = ANEG_STATE_AN_ENABLE;
4987 delta = ap->cur_time - ap->link_time;
4988 if (delta > ANEG_STATE_SETTLE_TIME) {
4989 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4990 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4992 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4993 !(ap->flags & MR_NP_RX)) {
4994 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5002 case ANEG_STATE_IDLE_DETECT_INIT:
5003 ap->link_time = ap->cur_time;
5004 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5005 tw32_f(MAC_MODE, tp->mac_mode);
5008 ap->state = ANEG_STATE_IDLE_DETECT;
5009 ret = ANEG_TIMER_ENAB;
5012 case ANEG_STATE_IDLE_DETECT:
5013 if (ap->ability_match != 0 &&
5014 ap->rxconfig == 0) {
5015 ap->state = ANEG_STATE_AN_ENABLE;
5018 delta = ap->cur_time - ap->link_time;
5019 if (delta > ANEG_STATE_SETTLE_TIME) {
5020 /* XXX another gem from the Broadcom driver :( */
5021 ap->state = ANEG_STATE_LINK_OK;
5025 case ANEG_STATE_LINK_OK:
5026 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5030 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5031 /* ??? unimplemented */
5034 case ANEG_STATE_NEXT_PAGE_WAIT:
5035 /* ??? unimplemented */
5046 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5049 struct tg3_fiber_aneginfo aninfo;
5050 int status = ANEG_FAILED;
5054 tw32_f(MAC_TX_AUTO_NEG, 0);
5056 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5057 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5060 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5063 memset(&aninfo, 0, sizeof(aninfo));
5064 aninfo.flags |= MR_AN_ENABLE;
5065 aninfo.state = ANEG_STATE_UNKNOWN;
5066 aninfo.cur_time = 0;
5068 while (++tick < 195000) {
5069 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5070 if (status == ANEG_DONE || status == ANEG_FAILED)
5076 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5077 tw32_f(MAC_MODE, tp->mac_mode);
5080 *txflags = aninfo.txconfig;
5081 *rxflags = aninfo.flags;
5083 if (status == ANEG_DONE &&
5084 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5085 MR_LP_ADV_FULL_DUPLEX)))
5091 static void tg3_init_bcm8002(struct tg3 *tp)
5093 u32 mac_status = tr32(MAC_STATUS);
5096 /* Reset when initting first time or we have a link. */
5097 if (tg3_flag(tp, INIT_COMPLETE) &&
5098 !(mac_status & MAC_STATUS_PCS_SYNCED))
5101 /* Set PLL lock range. */
5102 tg3_writephy(tp, 0x16, 0x8007);
5105 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5107 /* Wait for reset to complete. */
5108 /* XXX schedule_timeout() ... */
5109 for (i = 0; i < 500; i++)
5112 /* Config mode; select PMA/Ch 1 regs. */
5113 tg3_writephy(tp, 0x10, 0x8411);
5115 /* Enable auto-lock and comdet, select txclk for tx. */
5116 tg3_writephy(tp, 0x11, 0x0a10);
5118 tg3_writephy(tp, 0x18, 0x00a0);
5119 tg3_writephy(tp, 0x16, 0x41ff);
5121 /* Assert and deassert POR. */
5122 tg3_writephy(tp, 0x13, 0x0400);
5124 tg3_writephy(tp, 0x13, 0x0000);
5126 tg3_writephy(tp, 0x11, 0x0a50);
5128 tg3_writephy(tp, 0x11, 0x0a10);
5130 /* Wait for signal to stabilize */
5131 /* XXX schedule_timeout() ... */
5132 for (i = 0; i < 15000; i++)
5135 /* Deselect the channel register so we can read the PHYID
5138 tg3_writephy(tp, 0x10, 0x8011);
5141 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5144 u32 sg_dig_ctrl, sg_dig_status;
5145 u32 serdes_cfg, expected_sg_dig_ctrl;
5146 int workaround, port_a;
5147 int current_link_up;
5150 expected_sg_dig_ctrl = 0;
5153 current_link_up = 0;
5155 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5156 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5158 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5161 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5162 /* preserve bits 20-23 for voltage regulator */
5163 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5166 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5168 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5169 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5171 u32 val = serdes_cfg;
5177 tw32_f(MAC_SERDES_CFG, val);
5180 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5182 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5183 tg3_setup_flow_control(tp, 0, 0);
5184 current_link_up = 1;
5189 /* Want auto-negotiation. */
5190 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5192 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5193 if (flowctrl & ADVERTISE_1000XPAUSE)
5194 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5195 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5196 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5198 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5199 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5200 tp->serdes_counter &&
5201 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5202 MAC_STATUS_RCVD_CFG)) ==
5203 MAC_STATUS_PCS_SYNCED)) {
5204 tp->serdes_counter--;
5205 current_link_up = 1;
5210 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5211 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5213 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5215 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5216 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5217 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5218 MAC_STATUS_SIGNAL_DET)) {
5219 sg_dig_status = tr32(SG_DIG_STATUS);
5220 mac_status = tr32(MAC_STATUS);
5222 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5223 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5224 u32 local_adv = 0, remote_adv = 0;
5226 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5227 local_adv |= ADVERTISE_1000XPAUSE;
5228 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5229 local_adv |= ADVERTISE_1000XPSE_ASYM;
5231 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5232 remote_adv |= LPA_1000XPAUSE;
5233 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5234 remote_adv |= LPA_1000XPAUSE_ASYM;
5236 tp->link_config.rmt_adv =
5237 mii_adv_to_ethtool_adv_x(remote_adv);
5239 tg3_setup_flow_control(tp, local_adv, remote_adv);
5240 current_link_up = 1;
5241 tp->serdes_counter = 0;
5242 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5243 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5244 if (tp->serdes_counter)
5245 tp->serdes_counter--;
5248 u32 val = serdes_cfg;
5255 tw32_f(MAC_SERDES_CFG, val);
5258 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5261 /* Link parallel detection - link is up */
5262 /* only if we have PCS_SYNC and not */
5263 /* receiving config code words */
5264 mac_status = tr32(MAC_STATUS);
5265 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5266 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5267 tg3_setup_flow_control(tp, 0, 0);
5268 current_link_up = 1;
5270 TG3_PHYFLG_PARALLEL_DETECT;
5271 tp->serdes_counter =
5272 SERDES_PARALLEL_DET_TIMEOUT;
5274 goto restart_autoneg;
5278 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5279 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5283 return current_link_up;
5286 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5288 int current_link_up = 0;
5290 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5293 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5294 u32 txflags, rxflags;
5297 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5298 u32 local_adv = 0, remote_adv = 0;
5300 if (txflags & ANEG_CFG_PS1)
5301 local_adv |= ADVERTISE_1000XPAUSE;
5302 if (txflags & ANEG_CFG_PS2)
5303 local_adv |= ADVERTISE_1000XPSE_ASYM;
5305 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5306 remote_adv |= LPA_1000XPAUSE;
5307 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5308 remote_adv |= LPA_1000XPAUSE_ASYM;
5310 tp->link_config.rmt_adv =
5311 mii_adv_to_ethtool_adv_x(remote_adv);
5313 tg3_setup_flow_control(tp, local_adv, remote_adv);
5315 current_link_up = 1;
5317 for (i = 0; i < 30; i++) {
5320 (MAC_STATUS_SYNC_CHANGED |
5321 MAC_STATUS_CFG_CHANGED));
5323 if ((tr32(MAC_STATUS) &
5324 (MAC_STATUS_SYNC_CHANGED |
5325 MAC_STATUS_CFG_CHANGED)) == 0)
5329 mac_status = tr32(MAC_STATUS);
5330 if (current_link_up == 0 &&
5331 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5332 !(mac_status & MAC_STATUS_RCVD_CFG))
5333 current_link_up = 1;
5335 tg3_setup_flow_control(tp, 0, 0);
5337 /* Forcing 1000FD link up. */
5338 current_link_up = 1;
5340 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5343 tw32_f(MAC_MODE, tp->mac_mode);
5348 return current_link_up;
5351 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5354 u16 orig_active_speed;
5355 u8 orig_active_duplex;
5357 int current_link_up;
5360 orig_pause_cfg = tp->link_config.active_flowctrl;
5361 orig_active_speed = tp->link_config.active_speed;
5362 orig_active_duplex = tp->link_config.active_duplex;
5364 if (!tg3_flag(tp, HW_AUTONEG) &&
5366 tg3_flag(tp, INIT_COMPLETE)) {
5367 mac_status = tr32(MAC_STATUS);
5368 mac_status &= (MAC_STATUS_PCS_SYNCED |
5369 MAC_STATUS_SIGNAL_DET |
5370 MAC_STATUS_CFG_CHANGED |
5371 MAC_STATUS_RCVD_CFG);
5372 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5373 MAC_STATUS_SIGNAL_DET)) {
5374 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5375 MAC_STATUS_CFG_CHANGED));
5380 tw32_f(MAC_TX_AUTO_NEG, 0);
5382 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5383 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5384 tw32_f(MAC_MODE, tp->mac_mode);
5387 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5388 tg3_init_bcm8002(tp);
5390 /* Enable link change event even when serdes polling. */
5391 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5394 current_link_up = 0;
5395 tp->link_config.rmt_adv = 0;
5396 mac_status = tr32(MAC_STATUS);
5398 if (tg3_flag(tp, HW_AUTONEG))
5399 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5401 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5403 tp->napi[0].hw_status->status =
5404 (SD_STATUS_UPDATED |
5405 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5407 for (i = 0; i < 100; i++) {
5408 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5409 MAC_STATUS_CFG_CHANGED));
5411 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5412 MAC_STATUS_CFG_CHANGED |
5413 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5417 mac_status = tr32(MAC_STATUS);
5418 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5419 current_link_up = 0;
5420 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5421 tp->serdes_counter == 0) {
5422 tw32_f(MAC_MODE, (tp->mac_mode |
5423 MAC_MODE_SEND_CONFIGS));
5425 tw32_f(MAC_MODE, tp->mac_mode);
5429 if (current_link_up == 1) {
5430 tp->link_config.active_speed = SPEED_1000;
5431 tp->link_config.active_duplex = DUPLEX_FULL;
5432 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5433 LED_CTRL_LNKLED_OVERRIDE |
5434 LED_CTRL_1000MBPS_ON));
5436 tp->link_config.active_speed = SPEED_UNKNOWN;
5437 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5438 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5439 LED_CTRL_LNKLED_OVERRIDE |
5440 LED_CTRL_TRAFFIC_OVERRIDE));
5443 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5444 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5445 if (orig_pause_cfg != now_pause_cfg ||
5446 orig_active_speed != tp->link_config.active_speed ||
5447 orig_active_duplex != tp->link_config.active_duplex)
5448 tg3_link_report(tp);
5454 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5456 int current_link_up, err = 0;
5460 u32 local_adv, remote_adv;
5462 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5463 tw32_f(MAC_MODE, tp->mac_mode);
5469 (MAC_STATUS_SYNC_CHANGED |
5470 MAC_STATUS_CFG_CHANGED |
5471 MAC_STATUS_MI_COMPLETION |
5472 MAC_STATUS_LNKSTATE_CHANGED));
5478 current_link_up = 0;
5479 current_speed = SPEED_UNKNOWN;
5480 current_duplex = DUPLEX_UNKNOWN;
5481 tp->link_config.rmt_adv = 0;
5483 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5484 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5485 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5486 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5487 bmsr |= BMSR_LSTATUS;
5489 bmsr &= ~BMSR_LSTATUS;
5492 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5494 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5495 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5496 /* do nothing, just check for link up at the end */
5497 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5500 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5501 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5502 ADVERTISE_1000XPAUSE |
5503 ADVERTISE_1000XPSE_ASYM |
5506 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5507 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5509 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5510 tg3_writephy(tp, MII_ADVERTISE, newadv);
5511 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5512 tg3_writephy(tp, MII_BMCR, bmcr);
5514 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5515 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5516 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5523 bmcr &= ~BMCR_SPEED1000;
5524 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5526 if (tp->link_config.duplex == DUPLEX_FULL)
5527 new_bmcr |= BMCR_FULLDPLX;
5529 if (new_bmcr != bmcr) {
5530 /* BMCR_SPEED1000 is a reserved bit that needs
5531 * to be set on write.
5533 new_bmcr |= BMCR_SPEED1000;
5535 /* Force a linkdown */
5539 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5540 adv &= ~(ADVERTISE_1000XFULL |
5541 ADVERTISE_1000XHALF |
5543 tg3_writephy(tp, MII_ADVERTISE, adv);
5544 tg3_writephy(tp, MII_BMCR, bmcr |
5548 tg3_carrier_off(tp);
5550 tg3_writephy(tp, MII_BMCR, new_bmcr);
5552 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5553 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5554 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5555 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5556 bmsr |= BMSR_LSTATUS;
5558 bmsr &= ~BMSR_LSTATUS;
5560 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5564 if (bmsr & BMSR_LSTATUS) {
5565 current_speed = SPEED_1000;
5566 current_link_up = 1;
5567 if (bmcr & BMCR_FULLDPLX)
5568 current_duplex = DUPLEX_FULL;
5570 current_duplex = DUPLEX_HALF;
5575 if (bmcr & BMCR_ANENABLE) {
5578 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5579 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5580 common = local_adv & remote_adv;
5581 if (common & (ADVERTISE_1000XHALF |
5582 ADVERTISE_1000XFULL)) {
5583 if (common & ADVERTISE_1000XFULL)
5584 current_duplex = DUPLEX_FULL;
5586 current_duplex = DUPLEX_HALF;
5588 tp->link_config.rmt_adv =
5589 mii_adv_to_ethtool_adv_x(remote_adv);
5590 } else if (!tg3_flag(tp, 5780_CLASS)) {
5591 /* Link is up via parallel detect */
5593 current_link_up = 0;
5598 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5599 tg3_setup_flow_control(tp, local_adv, remote_adv);
5601 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5602 if (tp->link_config.active_duplex == DUPLEX_HALF)
5603 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5605 tw32_f(MAC_MODE, tp->mac_mode);
5608 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5610 tp->link_config.active_speed = current_speed;
5611 tp->link_config.active_duplex = current_duplex;
5613 tg3_test_and_report_link_chg(tp, current_link_up);
5617 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5619 if (tp->serdes_counter) {
5620 /* Give autoneg time to complete. */
5621 tp->serdes_counter--;
5626 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5629 tg3_readphy(tp, MII_BMCR, &bmcr);
5630 if (bmcr & BMCR_ANENABLE) {
5633 /* Select shadow register 0x1f */
5634 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5635 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5637 /* Select expansion interrupt status register */
5638 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5639 MII_TG3_DSP_EXP1_INT_STAT);
5640 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5641 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5643 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5644 /* We have signal detect and not receiving
5645 * config code words, link is up by parallel
5649 bmcr &= ~BMCR_ANENABLE;
5650 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5651 tg3_writephy(tp, MII_BMCR, bmcr);
5652 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5655 } else if (tp->link_up &&
5656 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5657 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5660 /* Select expansion interrupt status register */
5661 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5662 MII_TG3_DSP_EXP1_INT_STAT);
5663 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5667 /* Config code words received, turn on autoneg. */
5668 tg3_readphy(tp, MII_BMCR, &bmcr);
5669 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5671 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5677 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5682 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5683 err = tg3_setup_fiber_phy(tp, force_reset);
5684 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5685 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5687 err = tg3_setup_copper_phy(tp, force_reset);
5689 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5692 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5693 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5695 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5700 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5701 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5702 tw32(GRC_MISC_CFG, val);
5705 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5706 (6 << TX_LENGTHS_IPG_SHIFT);
5707 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5708 tg3_asic_rev(tp) == ASIC_REV_5762)
5709 val |= tr32(MAC_TX_LENGTHS) &
5710 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5711 TX_LENGTHS_CNT_DWN_VAL_MSK);
5713 if (tp->link_config.active_speed == SPEED_1000 &&
5714 tp->link_config.active_duplex == DUPLEX_HALF)
5715 tw32(MAC_TX_LENGTHS, val |
5716 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5718 tw32(MAC_TX_LENGTHS, val |
5719 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5721 if (!tg3_flag(tp, 5705_PLUS)) {
5723 tw32(HOSTCC_STAT_COAL_TICKS,
5724 tp->coal.stats_block_coalesce_usecs);
5726 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5730 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5731 val = tr32(PCIE_PWR_MGMT_THRESH);
5733 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5736 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5737 tw32(PCIE_PWR_MGMT_THRESH, val);
5743 /* tp->lock must be held */
5744 static u64 tg3_refclk_read(struct tg3 *tp)
5746 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5747 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5750 /* tp->lock must be held */
5751 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5753 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5754 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5755 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5756 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5759 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5760 static inline void tg3_full_unlock(struct tg3 *tp);
5761 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5763 struct tg3 *tp = netdev_priv(dev);
5765 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5766 SOF_TIMESTAMPING_RX_SOFTWARE |
5767 SOF_TIMESTAMPING_SOFTWARE |
5768 SOF_TIMESTAMPING_TX_HARDWARE |
5769 SOF_TIMESTAMPING_RX_HARDWARE |
5770 SOF_TIMESTAMPING_RAW_HARDWARE;
5773 info->phc_index = ptp_clock_index(tp->ptp_clock);
5775 info->phc_index = -1;
5777 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5779 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5780 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5781 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5782 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5786 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5788 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5789 bool neg_adj = false;
5797 /* Frequency adjustment is performed using hardware with a 24 bit
5798 * accumulator and a programmable correction value. On each clk, the
5799 * correction value gets added to the accumulator and when it
5800 * overflows, the time counter is incremented/decremented.
5802 * So conversion from ppb to correction value is
5803 * ppb * (1 << 24) / 1000000000
5805 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5806 TG3_EAV_REF_CLK_CORRECT_MASK;
5808 tg3_full_lock(tp, 0);
5811 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5812 TG3_EAV_REF_CLK_CORRECT_EN |
5813 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5815 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5817 tg3_full_unlock(tp);
5822 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5824 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5826 tg3_full_lock(tp, 0);
5827 tp->ptp_adjust += delta;
5828 tg3_full_unlock(tp);
5833 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5837 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5839 tg3_full_lock(tp, 0);
5840 ns = tg3_refclk_read(tp);
5841 ns += tp->ptp_adjust;
5842 tg3_full_unlock(tp);
5844 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5845 ts->tv_nsec = remainder;
5850 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5851 const struct timespec *ts)
5854 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5856 ns = timespec_to_ns(ts);
5858 tg3_full_lock(tp, 0);
5859 tg3_refclk_write(tp, ns);
5861 tg3_full_unlock(tp);
5866 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5867 struct ptp_clock_request *rq, int on)
5872 static const struct ptp_clock_info tg3_ptp_caps = {
5873 .owner = THIS_MODULE,
5874 .name = "tg3 clock",
5875 .max_adj = 250000000,
5880 .adjfreq = tg3_ptp_adjfreq,
5881 .adjtime = tg3_ptp_adjtime,
5882 .gettime = tg3_ptp_gettime,
5883 .settime = tg3_ptp_settime,
5884 .enable = tg3_ptp_enable,
5887 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5888 struct skb_shared_hwtstamps *timestamp)
5890 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5891 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5895 /* tp->lock must be held */
5896 static void tg3_ptp_init(struct tg3 *tp)
5898 if (!tg3_flag(tp, PTP_CAPABLE))
5901 /* Initialize the hardware clock to the system time. */
5902 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5904 tp->ptp_info = tg3_ptp_caps;
5907 /* tp->lock must be held */
5908 static void tg3_ptp_resume(struct tg3 *tp)
5910 if (!tg3_flag(tp, PTP_CAPABLE))
5913 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5917 static void tg3_ptp_fini(struct tg3 *tp)
5919 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5922 ptp_clock_unregister(tp->ptp_clock);
5923 tp->ptp_clock = NULL;
5927 static inline int tg3_irq_sync(struct tg3 *tp)
5929 return tp->irq_sync;
5932 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5936 dst = (u32 *)((u8 *)dst + off);
5937 for (i = 0; i < len; i += sizeof(u32))
5938 *dst++ = tr32(off + i);
5941 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5943 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5944 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5945 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5946 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5947 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5948 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5949 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5950 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5951 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5952 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5953 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5954 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5955 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5956 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5957 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5958 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5959 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5960 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5961 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5963 if (tg3_flag(tp, SUPPORT_MSIX))
5964 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5966 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5967 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5968 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5969 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5970 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5971 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5972 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5973 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5975 if (!tg3_flag(tp, 5705_PLUS)) {
5976 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5977 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5978 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5981 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5982 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5983 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5984 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5985 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5987 if (tg3_flag(tp, NVRAM))
5988 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5991 static void tg3_dump_state(struct tg3 *tp)
5996 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6000 if (tg3_flag(tp, PCI_EXPRESS)) {
6001 /* Read up to but not including private PCI registers */
6002 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6003 regs[i / sizeof(u32)] = tr32(i);
6005 tg3_dump_legacy_regs(tp, regs);
6007 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6008 if (!regs[i + 0] && !regs[i + 1] &&
6009 !regs[i + 2] && !regs[i + 3])
6012 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6014 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6019 for (i = 0; i < tp->irq_cnt; i++) {
6020 struct tg3_napi *tnapi = &tp->napi[i];
6022 /* SW status block */
6024 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6026 tnapi->hw_status->status,
6027 tnapi->hw_status->status_tag,
6028 tnapi->hw_status->rx_jumbo_consumer,
6029 tnapi->hw_status->rx_consumer,
6030 tnapi->hw_status->rx_mini_consumer,
6031 tnapi->hw_status->idx[0].rx_producer,
6032 tnapi->hw_status->idx[0].tx_consumer);
6035 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6037 tnapi->last_tag, tnapi->last_irq_tag,
6038 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6040 tnapi->prodring.rx_std_prod_idx,
6041 tnapi->prodring.rx_std_cons_idx,
6042 tnapi->prodring.rx_jmb_prod_idx,
6043 tnapi->prodring.rx_jmb_cons_idx);
6047 /* This is called whenever we suspect that the system chipset is re-
6048 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6049 * is bogus tx completions. We try to recover by setting the
6050 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6053 static void tg3_tx_recover(struct tg3 *tp)
6055 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6056 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6058 netdev_warn(tp->dev,
6059 "The system may be re-ordering memory-mapped I/O "
6060 "cycles to the network device, attempting to recover. "
6061 "Please report the problem to the driver maintainer "
6062 "and include system chipset information.\n");
6064 spin_lock(&tp->lock);
6065 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6066 spin_unlock(&tp->lock);
6069 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6071 /* Tell compiler to fetch tx indices from memory. */
6073 return tnapi->tx_pending -
6074 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6077 /* Tigon3 never reports partial packet sends. So we do not
6078 * need special logic to handle SKBs that have not had all
6079 * of their frags sent yet, like SunGEM does.
6081 static void tg3_tx(struct tg3_napi *tnapi)
6083 struct tg3 *tp = tnapi->tp;
6084 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6085 u32 sw_idx = tnapi->tx_cons;
6086 struct netdev_queue *txq;
6087 int index = tnapi - tp->napi;
6088 unsigned int pkts_compl = 0, bytes_compl = 0;
6090 if (tg3_flag(tp, ENABLE_TSS))
6093 txq = netdev_get_tx_queue(tp->dev, index);
6095 while (sw_idx != hw_idx) {
6096 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6097 struct sk_buff *skb = ri->skb;
6100 if (unlikely(skb == NULL)) {
6105 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6106 struct skb_shared_hwtstamps timestamp;
6107 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6108 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6110 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6112 skb_tstamp_tx(skb, ×tamp);
6115 pci_unmap_single(tp->pdev,
6116 dma_unmap_addr(ri, mapping),
6122 while (ri->fragmented) {
6123 ri->fragmented = false;
6124 sw_idx = NEXT_TX(sw_idx);
6125 ri = &tnapi->tx_buffers[sw_idx];
6128 sw_idx = NEXT_TX(sw_idx);
6130 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6131 ri = &tnapi->tx_buffers[sw_idx];
6132 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6135 pci_unmap_page(tp->pdev,
6136 dma_unmap_addr(ri, mapping),
6137 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6140 while (ri->fragmented) {
6141 ri->fragmented = false;
6142 sw_idx = NEXT_TX(sw_idx);
6143 ri = &tnapi->tx_buffers[sw_idx];
6146 sw_idx = NEXT_TX(sw_idx);
6150 bytes_compl += skb->len;
6154 if (unlikely(tx_bug)) {
6160 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6162 tnapi->tx_cons = sw_idx;
6164 /* Need to make the tx_cons update visible to tg3_start_xmit()
6165 * before checking for netif_queue_stopped(). Without the
6166 * memory barrier, there is a small possibility that tg3_start_xmit()
6167 * will miss it and cause the queue to be stopped forever.
6171 if (unlikely(netif_tx_queue_stopped(txq) &&
6172 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6173 __netif_tx_lock(txq, smp_processor_id());
6174 if (netif_tx_queue_stopped(txq) &&
6175 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6176 netif_tx_wake_queue(txq);
6177 __netif_tx_unlock(txq);
6181 static void tg3_frag_free(bool is_frag, void *data)
6184 put_page(virt_to_head_page(data));
6189 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6191 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6192 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6197 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6198 map_sz, PCI_DMA_FROMDEVICE);
6199 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6204 /* Returns size of skb allocated or < 0 on error.
6206 * We only need to fill in the address because the other members
6207 * of the RX descriptor are invariant, see tg3_init_rings.
6209 * Note the purposeful assymetry of cpu vs. chip accesses. For
6210 * posting buffers we only dirty the first cache line of the RX
6211 * descriptor (containing the address). Whereas for the RX status
6212 * buffers the cpu only reads the last cacheline of the RX descriptor
6213 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6215 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6216 u32 opaque_key, u32 dest_idx_unmasked,
6217 unsigned int *frag_size)
6219 struct tg3_rx_buffer_desc *desc;
6220 struct ring_info *map;
6223 int skb_size, data_size, dest_idx;
6225 switch (opaque_key) {
6226 case RXD_OPAQUE_RING_STD:
6227 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6228 desc = &tpr->rx_std[dest_idx];
6229 map = &tpr->rx_std_buffers[dest_idx];
6230 data_size = tp->rx_pkt_map_sz;
6233 case RXD_OPAQUE_RING_JUMBO:
6234 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6235 desc = &tpr->rx_jmb[dest_idx].std;
6236 map = &tpr->rx_jmb_buffers[dest_idx];
6237 data_size = TG3_RX_JMB_MAP_SZ;
6244 /* Do not overwrite any of the map or rp information
6245 * until we are sure we can commit to a new buffer.
6247 * Callers depend upon this behavior and assume that
6248 * we leave everything unchanged if we fail.
6250 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6251 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6252 if (skb_size <= PAGE_SIZE) {
6253 data = netdev_alloc_frag(skb_size);
6254 *frag_size = skb_size;
6256 data = kmalloc(skb_size, GFP_ATOMIC);
6262 mapping = pci_map_single(tp->pdev,
6263 data + TG3_RX_OFFSET(tp),
6265 PCI_DMA_FROMDEVICE);
6266 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6267 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6272 dma_unmap_addr_set(map, mapping, mapping);
6274 desc->addr_hi = ((u64)mapping >> 32);
6275 desc->addr_lo = ((u64)mapping & 0xffffffff);
6280 /* We only need to move over in the address because the other
6281 * members of the RX descriptor are invariant. See notes above
6282 * tg3_alloc_rx_data for full details.
6284 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6285 struct tg3_rx_prodring_set *dpr,
6286 u32 opaque_key, int src_idx,
6287 u32 dest_idx_unmasked)
6289 struct tg3 *tp = tnapi->tp;
6290 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6291 struct ring_info *src_map, *dest_map;
6292 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6295 switch (opaque_key) {
6296 case RXD_OPAQUE_RING_STD:
6297 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6298 dest_desc = &dpr->rx_std[dest_idx];
6299 dest_map = &dpr->rx_std_buffers[dest_idx];
6300 src_desc = &spr->rx_std[src_idx];
6301 src_map = &spr->rx_std_buffers[src_idx];
6304 case RXD_OPAQUE_RING_JUMBO:
6305 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6306 dest_desc = &dpr->rx_jmb[dest_idx].std;
6307 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6308 src_desc = &spr->rx_jmb[src_idx].std;
6309 src_map = &spr->rx_jmb_buffers[src_idx];
6316 dest_map->data = src_map->data;
6317 dma_unmap_addr_set(dest_map, mapping,
6318 dma_unmap_addr(src_map, mapping));
6319 dest_desc->addr_hi = src_desc->addr_hi;
6320 dest_desc->addr_lo = src_desc->addr_lo;
6322 /* Ensure that the update to the skb happens after the physical
6323 * addresses have been transferred to the new BD location.
6327 src_map->data = NULL;
6330 /* The RX ring scheme is composed of multiple rings which post fresh
6331 * buffers to the chip, and one special ring the chip uses to report
6332 * status back to the host.
6334 * The special ring reports the status of received packets to the
6335 * host. The chip does not write into the original descriptor the
6336 * RX buffer was obtained from. The chip simply takes the original
6337 * descriptor as provided by the host, updates the status and length
6338 * field, then writes this into the next status ring entry.
6340 * Each ring the host uses to post buffers to the chip is described
6341 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6342 * it is first placed into the on-chip ram. When the packet's length
6343 * is known, it walks down the TG3_BDINFO entries to select the ring.
6344 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6345 * which is within the range of the new packet's length is chosen.
6347 * The "separate ring for rx status" scheme may sound queer, but it makes
6348 * sense from a cache coherency perspective. If only the host writes
6349 * to the buffer post rings, and only the chip writes to the rx status
6350 * rings, then cache lines never move beyond shared-modified state.
6351 * If both the host and chip were to write into the same ring, cache line
6352 * eviction could occur since both entities want it in an exclusive state.
6354 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6356 struct tg3 *tp = tnapi->tp;
6357 u32 work_mask, rx_std_posted = 0;
6358 u32 std_prod_idx, jmb_prod_idx;
6359 u32 sw_idx = tnapi->rx_rcb_ptr;
6362 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6364 hw_idx = *(tnapi->rx_rcb_prod_idx);
6366 * We need to order the read of hw_idx and the read of
6367 * the opaque cookie.
6372 std_prod_idx = tpr->rx_std_prod_idx;
6373 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6374 while (sw_idx != hw_idx && budget > 0) {
6375 struct ring_info *ri;
6376 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6378 struct sk_buff *skb;
6379 dma_addr_t dma_addr;
6380 u32 opaque_key, desc_idx, *post_ptr;
6384 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6385 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6386 if (opaque_key == RXD_OPAQUE_RING_STD) {
6387 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6388 dma_addr = dma_unmap_addr(ri, mapping);
6390 post_ptr = &std_prod_idx;
6392 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6393 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6394 dma_addr = dma_unmap_addr(ri, mapping);
6396 post_ptr = &jmb_prod_idx;
6398 goto next_pkt_nopost;
6400 work_mask |= opaque_key;
6402 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6403 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6405 tg3_recycle_rx(tnapi, tpr, opaque_key,
6406 desc_idx, *post_ptr);
6408 /* Other statistics kept track of by card. */
6413 prefetch(data + TG3_RX_OFFSET(tp));
6414 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6417 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6418 RXD_FLAG_PTPSTAT_PTPV1 ||
6419 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6420 RXD_FLAG_PTPSTAT_PTPV2) {
6421 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6422 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6425 if (len > TG3_RX_COPY_THRESH(tp)) {
6427 unsigned int frag_size;
6429 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6430 *post_ptr, &frag_size);
6434 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6435 PCI_DMA_FROMDEVICE);
6437 skb = build_skb(data, frag_size);
6439 tg3_frag_free(frag_size != 0, data);
6440 goto drop_it_no_recycle;
6442 skb_reserve(skb, TG3_RX_OFFSET(tp));
6443 /* Ensure that the update to the data happens
6444 * after the usage of the old DMA mapping.
6451 tg3_recycle_rx(tnapi, tpr, opaque_key,
6452 desc_idx, *post_ptr);
6454 skb = netdev_alloc_skb(tp->dev,
6455 len + TG3_RAW_IP_ALIGN);
6457 goto drop_it_no_recycle;
6459 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6460 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6462 data + TG3_RX_OFFSET(tp),
6464 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6469 tg3_hwclock_to_timestamp(tp, tstamp,
6470 skb_hwtstamps(skb));
6472 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6473 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6474 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6475 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6476 skb->ip_summed = CHECKSUM_UNNECESSARY;
6478 skb_checksum_none_assert(skb);
6480 skb->protocol = eth_type_trans(skb, tp->dev);
6482 if (len > (tp->dev->mtu + ETH_HLEN) &&
6483 skb->protocol != htons(ETH_P_8021Q)) {
6485 goto drop_it_no_recycle;
6488 if (desc->type_flags & RXD_FLAG_VLAN &&
6489 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6490 __vlan_hwaccel_put_tag(skb,
6491 desc->err_vlan & RXD_VLAN_MASK);
6493 napi_gro_receive(&tnapi->napi, skb);
6501 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6502 tpr->rx_std_prod_idx = std_prod_idx &
6503 tp->rx_std_ring_mask;
6504 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6505 tpr->rx_std_prod_idx);
6506 work_mask &= ~RXD_OPAQUE_RING_STD;
6511 sw_idx &= tp->rx_ret_ring_mask;
6513 /* Refresh hw_idx to see if there is new work */
6514 if (sw_idx == hw_idx) {
6515 hw_idx = *(tnapi->rx_rcb_prod_idx);
6520 /* ACK the status ring. */
6521 tnapi->rx_rcb_ptr = sw_idx;
6522 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6524 /* Refill RX ring(s). */
6525 if (!tg3_flag(tp, ENABLE_RSS)) {
6526 /* Sync BD data before updating mailbox */
6529 if (work_mask & RXD_OPAQUE_RING_STD) {
6530 tpr->rx_std_prod_idx = std_prod_idx &
6531 tp->rx_std_ring_mask;
6532 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6533 tpr->rx_std_prod_idx);
6535 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6536 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6537 tp->rx_jmb_ring_mask;
6538 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6539 tpr->rx_jmb_prod_idx);
6542 } else if (work_mask) {
6543 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6544 * updated before the producer indices can be updated.
6548 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6549 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6551 if (tnapi != &tp->napi[1]) {
6552 tp->rx_refill = true;
6553 napi_schedule(&tp->napi[1].napi);
6560 static void tg3_poll_link(struct tg3 *tp)
6562 /* handle link change and other phy events */
6563 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6564 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6566 if (sblk->status & SD_STATUS_LINK_CHG) {
6567 sblk->status = SD_STATUS_UPDATED |
6568 (sblk->status & ~SD_STATUS_LINK_CHG);
6569 spin_lock(&tp->lock);
6570 if (tg3_flag(tp, USE_PHYLIB)) {
6572 (MAC_STATUS_SYNC_CHANGED |
6573 MAC_STATUS_CFG_CHANGED |
6574 MAC_STATUS_MI_COMPLETION |
6575 MAC_STATUS_LNKSTATE_CHANGED));
6578 tg3_setup_phy(tp, 0);
6579 spin_unlock(&tp->lock);
6584 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6585 struct tg3_rx_prodring_set *dpr,
6586 struct tg3_rx_prodring_set *spr)
6588 u32 si, di, cpycnt, src_prod_idx;
6592 src_prod_idx = spr->rx_std_prod_idx;
6594 /* Make sure updates to the rx_std_buffers[] entries and the
6595 * standard producer index are seen in the correct order.
6599 if (spr->rx_std_cons_idx == src_prod_idx)
6602 if (spr->rx_std_cons_idx < src_prod_idx)
6603 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6605 cpycnt = tp->rx_std_ring_mask + 1 -
6606 spr->rx_std_cons_idx;
6608 cpycnt = min(cpycnt,
6609 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6611 si = spr->rx_std_cons_idx;
6612 di = dpr->rx_std_prod_idx;
6614 for (i = di; i < di + cpycnt; i++) {
6615 if (dpr->rx_std_buffers[i].data) {
6625 /* Ensure that updates to the rx_std_buffers ring and the
6626 * shadowed hardware producer ring from tg3_recycle_skb() are
6627 * ordered correctly WRT the skb check above.
6631 memcpy(&dpr->rx_std_buffers[di],
6632 &spr->rx_std_buffers[si],
6633 cpycnt * sizeof(struct ring_info));
6635 for (i = 0; i < cpycnt; i++, di++, si++) {
6636 struct tg3_rx_buffer_desc *sbd, *dbd;
6637 sbd = &spr->rx_std[si];
6638 dbd = &dpr->rx_std[di];
6639 dbd->addr_hi = sbd->addr_hi;
6640 dbd->addr_lo = sbd->addr_lo;
6643 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6644 tp->rx_std_ring_mask;
6645 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6646 tp->rx_std_ring_mask;
6650 src_prod_idx = spr->rx_jmb_prod_idx;
6652 /* Make sure updates to the rx_jmb_buffers[] entries and
6653 * the jumbo producer index are seen in the correct order.
6657 if (spr->rx_jmb_cons_idx == src_prod_idx)
6660 if (spr->rx_jmb_cons_idx < src_prod_idx)
6661 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6663 cpycnt = tp->rx_jmb_ring_mask + 1 -
6664 spr->rx_jmb_cons_idx;
6666 cpycnt = min(cpycnt,
6667 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6669 si = spr->rx_jmb_cons_idx;
6670 di = dpr->rx_jmb_prod_idx;
6672 for (i = di; i < di + cpycnt; i++) {
6673 if (dpr->rx_jmb_buffers[i].data) {
6683 /* Ensure that updates to the rx_jmb_buffers ring and the
6684 * shadowed hardware producer ring from tg3_recycle_skb() are
6685 * ordered correctly WRT the skb check above.
6689 memcpy(&dpr->rx_jmb_buffers[di],
6690 &spr->rx_jmb_buffers[si],
6691 cpycnt * sizeof(struct ring_info));
6693 for (i = 0; i < cpycnt; i++, di++, si++) {
6694 struct tg3_rx_buffer_desc *sbd, *dbd;
6695 sbd = &spr->rx_jmb[si].std;
6696 dbd = &dpr->rx_jmb[di].std;
6697 dbd->addr_hi = sbd->addr_hi;
6698 dbd->addr_lo = sbd->addr_lo;
6701 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6702 tp->rx_jmb_ring_mask;
6703 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6704 tp->rx_jmb_ring_mask;
6710 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6712 struct tg3 *tp = tnapi->tp;
6714 /* run TX completion thread */
6715 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6717 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6721 if (!tnapi->rx_rcb_prod_idx)
6724 /* run RX thread, within the bounds set by NAPI.
6725 * All RX "locking" is done by ensuring outside
6726 * code synchronizes with tg3->napi.poll()
6728 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6729 work_done += tg3_rx(tnapi, budget - work_done);
6731 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6732 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6734 u32 std_prod_idx = dpr->rx_std_prod_idx;
6735 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6737 tp->rx_refill = false;
6738 for (i = 1; i <= tp->rxq_cnt; i++)
6739 err |= tg3_rx_prodring_xfer(tp, dpr,
6740 &tp->napi[i].prodring);
6744 if (std_prod_idx != dpr->rx_std_prod_idx)
6745 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6746 dpr->rx_std_prod_idx);
6748 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6749 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6750 dpr->rx_jmb_prod_idx);
6755 tw32_f(HOSTCC_MODE, tp->coal_now);
6761 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6763 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6764 schedule_work(&tp->reset_task);
6767 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6769 cancel_work_sync(&tp->reset_task);
6770 tg3_flag_clear(tp, RESET_TASK_PENDING);
6771 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6774 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6776 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6777 struct tg3 *tp = tnapi->tp;
6779 struct tg3_hw_status *sblk = tnapi->hw_status;
6782 work_done = tg3_poll_work(tnapi, work_done, budget);
6784 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6787 if (unlikely(work_done >= budget))
6790 /* tp->last_tag is used in tg3_int_reenable() below
6791 * to tell the hw how much work has been processed,
6792 * so we must read it before checking for more work.
6794 tnapi->last_tag = sblk->status_tag;
6795 tnapi->last_irq_tag = tnapi->last_tag;
6798 /* check for RX/TX work to do */
6799 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6800 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6802 /* This test here is not race free, but will reduce
6803 * the number of interrupts by looping again.
6805 if (tnapi == &tp->napi[1] && tp->rx_refill)
6808 napi_complete(napi);
6809 /* Reenable interrupts. */
6810 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6812 /* This test here is synchronized by napi_schedule()
6813 * and napi_complete() to close the race condition.
6815 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6816 tw32(HOSTCC_MODE, tp->coalesce_mode |
6817 HOSTCC_MODE_ENABLE |
6828 /* work_done is guaranteed to be less than budget. */
6829 napi_complete(napi);
6830 tg3_reset_task_schedule(tp);
6834 static void tg3_process_error(struct tg3 *tp)
6837 bool real_error = false;
6839 if (tg3_flag(tp, ERROR_PROCESSED))
6842 /* Check Flow Attention register */
6843 val = tr32(HOSTCC_FLOW_ATTN);
6844 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6845 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6849 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6850 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6854 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6855 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6864 tg3_flag_set(tp, ERROR_PROCESSED);
6865 tg3_reset_task_schedule(tp);
6868 static int tg3_poll(struct napi_struct *napi, int budget)
6870 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6871 struct tg3 *tp = tnapi->tp;
6873 struct tg3_hw_status *sblk = tnapi->hw_status;
6876 if (sblk->status & SD_STATUS_ERROR)
6877 tg3_process_error(tp);
6881 work_done = tg3_poll_work(tnapi, work_done, budget);
6883 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6886 if (unlikely(work_done >= budget))
6889 if (tg3_flag(tp, TAGGED_STATUS)) {
6890 /* tp->last_tag is used in tg3_int_reenable() below
6891 * to tell the hw how much work has been processed,
6892 * so we must read it before checking for more work.
6894 tnapi->last_tag = sblk->status_tag;
6895 tnapi->last_irq_tag = tnapi->last_tag;
6898 sblk->status &= ~SD_STATUS_UPDATED;
6900 if (likely(!tg3_has_work(tnapi))) {
6901 napi_complete(napi);
6902 tg3_int_reenable(tnapi);
6910 /* work_done is guaranteed to be less than budget. */
6911 napi_complete(napi);
6912 tg3_reset_task_schedule(tp);
6916 static void tg3_napi_disable(struct tg3 *tp)
6920 for (i = tp->irq_cnt - 1; i >= 0; i--)
6921 napi_disable(&tp->napi[i].napi);
6924 static void tg3_napi_enable(struct tg3 *tp)
6928 for (i = 0; i < tp->irq_cnt; i++)
6929 napi_enable(&tp->napi[i].napi);
6932 static void tg3_napi_init(struct tg3 *tp)
6936 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6937 for (i = 1; i < tp->irq_cnt; i++)
6938 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6941 static void tg3_napi_fini(struct tg3 *tp)
6945 for (i = 0; i < tp->irq_cnt; i++)
6946 netif_napi_del(&tp->napi[i].napi);
6949 static inline void tg3_netif_stop(struct tg3 *tp)
6951 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6952 tg3_napi_disable(tp);
6953 netif_carrier_off(tp->dev);
6954 netif_tx_disable(tp->dev);
6957 /* tp->lock must be held */
6958 static inline void tg3_netif_start(struct tg3 *tp)
6962 /* NOTE: unconditional netif_tx_wake_all_queues is only
6963 * appropriate so long as all callers are assured to
6964 * have free tx slots (such as after tg3_init_hw)
6966 netif_tx_wake_all_queues(tp->dev);
6969 netif_carrier_on(tp->dev);
6971 tg3_napi_enable(tp);
6972 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6973 tg3_enable_ints(tp);
6976 static void tg3_irq_quiesce(struct tg3 *tp)
6980 BUG_ON(tp->irq_sync);
6985 for (i = 0; i < tp->irq_cnt; i++)
6986 synchronize_irq(tp->napi[i].irq_vec);
6989 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6990 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6991 * with as well. Most of the time, this is not necessary except when
6992 * shutting down the device.
6994 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6996 spin_lock_bh(&tp->lock);
6998 tg3_irq_quiesce(tp);
7001 static inline void tg3_full_unlock(struct tg3 *tp)
7003 spin_unlock_bh(&tp->lock);
7006 /* One-shot MSI handler - Chip automatically disables interrupt
7007 * after sending MSI so driver doesn't have to do it.
7009 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7011 struct tg3_napi *tnapi = dev_id;
7012 struct tg3 *tp = tnapi->tp;
7014 prefetch(tnapi->hw_status);
7016 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7018 if (likely(!tg3_irq_sync(tp)))
7019 napi_schedule(&tnapi->napi);
7024 /* MSI ISR - No need to check for interrupt sharing and no need to
7025 * flush status block and interrupt mailbox. PCI ordering rules
7026 * guarantee that MSI will arrive after the status block.
7028 static irqreturn_t tg3_msi(int irq, void *dev_id)
7030 struct tg3_napi *tnapi = dev_id;
7031 struct tg3 *tp = tnapi->tp;
7033 prefetch(tnapi->hw_status);
7035 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7037 * Writing any value to intr-mbox-0 clears PCI INTA# and
7038 * chip-internal interrupt pending events.
7039 * Writing non-zero to intr-mbox-0 additional tells the
7040 * NIC to stop sending us irqs, engaging "in-intr-handler"
7043 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7044 if (likely(!tg3_irq_sync(tp)))
7045 napi_schedule(&tnapi->napi);
7047 return IRQ_RETVAL(1);
7050 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7052 struct tg3_napi *tnapi = dev_id;
7053 struct tg3 *tp = tnapi->tp;
7054 struct tg3_hw_status *sblk = tnapi->hw_status;
7055 unsigned int handled = 1;
7057 /* In INTx mode, it is possible for the interrupt to arrive at
7058 * the CPU before the status block posted prior to the interrupt.
7059 * Reading the PCI State register will confirm whether the
7060 * interrupt is ours and will flush the status block.
7062 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7063 if (tg3_flag(tp, CHIP_RESETTING) ||
7064 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7071 * Writing any value to intr-mbox-0 clears PCI INTA# and
7072 * chip-internal interrupt pending events.
7073 * Writing non-zero to intr-mbox-0 additional tells the
7074 * NIC to stop sending us irqs, engaging "in-intr-handler"
7077 * Flush the mailbox to de-assert the IRQ immediately to prevent
7078 * spurious interrupts. The flush impacts performance but
7079 * excessive spurious interrupts can be worse in some cases.
7081 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7082 if (tg3_irq_sync(tp))
7084 sblk->status &= ~SD_STATUS_UPDATED;
7085 if (likely(tg3_has_work(tnapi))) {
7086 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7087 napi_schedule(&tnapi->napi);
7089 /* No work, shared interrupt perhaps? re-enable
7090 * interrupts, and flush that PCI write
7092 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7096 return IRQ_RETVAL(handled);
7099 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7101 struct tg3_napi *tnapi = dev_id;
7102 struct tg3 *tp = tnapi->tp;
7103 struct tg3_hw_status *sblk = tnapi->hw_status;
7104 unsigned int handled = 1;
7106 /* In INTx mode, it is possible for the interrupt to arrive at
7107 * the CPU before the status block posted prior to the interrupt.
7108 * Reading the PCI State register will confirm whether the
7109 * interrupt is ours and will flush the status block.
7111 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7112 if (tg3_flag(tp, CHIP_RESETTING) ||
7113 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7120 * writing any value to intr-mbox-0 clears PCI INTA# and
7121 * chip-internal interrupt pending events.
7122 * writing non-zero to intr-mbox-0 additional tells the
7123 * NIC to stop sending us irqs, engaging "in-intr-handler"
7126 * Flush the mailbox to de-assert the IRQ immediately to prevent
7127 * spurious interrupts. The flush impacts performance but
7128 * excessive spurious interrupts can be worse in some cases.
7130 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7133 * In a shared interrupt configuration, sometimes other devices'
7134 * interrupts will scream. We record the current status tag here
7135 * so that the above check can report that the screaming interrupts
7136 * are unhandled. Eventually they will be silenced.
7138 tnapi->last_irq_tag = sblk->status_tag;
7140 if (tg3_irq_sync(tp))
7143 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7145 napi_schedule(&tnapi->napi);
7148 return IRQ_RETVAL(handled);
7151 /* ISR for interrupt test */
7152 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7154 struct tg3_napi *tnapi = dev_id;
7155 struct tg3 *tp = tnapi->tp;
7156 struct tg3_hw_status *sblk = tnapi->hw_status;
7158 if ((sblk->status & SD_STATUS_UPDATED) ||
7159 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7160 tg3_disable_ints(tp);
7161 return IRQ_RETVAL(1);
7163 return IRQ_RETVAL(0);
7166 #ifdef CONFIG_NET_POLL_CONTROLLER
7167 static void tg3_poll_controller(struct net_device *dev)
7170 struct tg3 *tp = netdev_priv(dev);
7172 if (tg3_irq_sync(tp))
7175 for (i = 0; i < tp->irq_cnt; i++)
7176 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7180 static void tg3_tx_timeout(struct net_device *dev)
7182 struct tg3 *tp = netdev_priv(dev);
7184 if (netif_msg_tx_err(tp)) {
7185 netdev_err(dev, "transmit timed out, resetting\n");
7189 tg3_reset_task_schedule(tp);
7192 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7193 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7195 u32 base = (u32) mapping & 0xffffffff;
7197 return (base > 0xffffdcc0) && (base + len + 8 < base);
7200 /* Test for DMA addresses > 40-bit */
7201 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7204 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7205 if (tg3_flag(tp, 40BIT_DMA_BUG))
7206 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7213 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7214 dma_addr_t mapping, u32 len, u32 flags,
7217 txbd->addr_hi = ((u64) mapping >> 32);
7218 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7219 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7220 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7223 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7224 dma_addr_t map, u32 len, u32 flags,
7227 struct tg3 *tp = tnapi->tp;
7230 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7233 if (tg3_4g_overflow_test(map, len))
7236 if (tg3_40bit_overflow_test(tp, map, len))
7239 if (tp->dma_limit) {
7240 u32 prvidx = *entry;
7241 u32 tmp_flag = flags & ~TXD_FLAG_END;
7242 while (len > tp->dma_limit && *budget) {
7243 u32 frag_len = tp->dma_limit;
7244 len -= tp->dma_limit;
7246 /* Avoid the 8byte DMA problem */
7248 len += tp->dma_limit / 2;
7249 frag_len = tp->dma_limit / 2;
7252 tnapi->tx_buffers[*entry].fragmented = true;
7254 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7255 frag_len, tmp_flag, mss, vlan);
7258 *entry = NEXT_TX(*entry);
7265 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7266 len, flags, mss, vlan);
7268 *entry = NEXT_TX(*entry);
7271 tnapi->tx_buffers[prvidx].fragmented = false;
7275 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7276 len, flags, mss, vlan);
7277 *entry = NEXT_TX(*entry);
7283 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7286 struct sk_buff *skb;
7287 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7292 pci_unmap_single(tnapi->tp->pdev,
7293 dma_unmap_addr(txb, mapping),
7297 while (txb->fragmented) {
7298 txb->fragmented = false;
7299 entry = NEXT_TX(entry);
7300 txb = &tnapi->tx_buffers[entry];
7303 for (i = 0; i <= last; i++) {
7304 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7306 entry = NEXT_TX(entry);
7307 txb = &tnapi->tx_buffers[entry];
7309 pci_unmap_page(tnapi->tp->pdev,
7310 dma_unmap_addr(txb, mapping),
7311 skb_frag_size(frag), PCI_DMA_TODEVICE);
7313 while (txb->fragmented) {
7314 txb->fragmented = false;
7315 entry = NEXT_TX(entry);
7316 txb = &tnapi->tx_buffers[entry];
7321 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7322 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7323 struct sk_buff **pskb,
7324 u32 *entry, u32 *budget,
7325 u32 base_flags, u32 mss, u32 vlan)
7327 struct tg3 *tp = tnapi->tp;
7328 struct sk_buff *new_skb, *skb = *pskb;
7329 dma_addr_t new_addr = 0;
7332 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7333 new_skb = skb_copy(skb, GFP_ATOMIC);
7335 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7337 new_skb = skb_copy_expand(skb,
7338 skb_headroom(skb) + more_headroom,
7339 skb_tailroom(skb), GFP_ATOMIC);
7345 /* New SKB is guaranteed to be linear. */
7346 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7348 /* Make sure the mapping succeeded */
7349 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7350 dev_kfree_skb(new_skb);
7353 u32 save_entry = *entry;
7355 base_flags |= TXD_FLAG_END;
7357 tnapi->tx_buffers[*entry].skb = new_skb;
7358 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7361 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7362 new_skb->len, base_flags,
7364 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7365 dev_kfree_skb(new_skb);
7376 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7378 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7379 * TSO header is greater than 80 bytes.
7381 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7383 struct sk_buff *segs, *nskb;
7384 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7386 /* Estimate the number of fragments in the worst case */
7387 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7388 netif_stop_queue(tp->dev);
7390 /* netif_tx_stop_queue() must be done before checking
7391 * checking tx index in tg3_tx_avail() below, because in
7392 * tg3_tx(), we update tx index before checking for
7393 * netif_tx_queue_stopped().
7396 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7397 return NETDEV_TX_BUSY;
7399 netif_wake_queue(tp->dev);
7402 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7404 goto tg3_tso_bug_end;
7410 tg3_start_xmit(nskb, tp->dev);
7416 return NETDEV_TX_OK;
7419 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7420 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7422 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7424 struct tg3 *tp = netdev_priv(dev);
7425 u32 len, entry, base_flags, mss, vlan = 0;
7427 int i = -1, would_hit_hwbug;
7429 struct tg3_napi *tnapi;
7430 struct netdev_queue *txq;
7433 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7434 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7435 if (tg3_flag(tp, ENABLE_TSS))
7438 budget = tg3_tx_avail(tnapi);
7440 /* We are running in BH disabled context with netif_tx_lock
7441 * and TX reclaim runs via tp->napi.poll inside of a software
7442 * interrupt. Furthermore, IRQ processing runs lockless so we have
7443 * no IRQ context deadlocks to worry about either. Rejoice!
7445 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7446 if (!netif_tx_queue_stopped(txq)) {
7447 netif_tx_stop_queue(txq);
7449 /* This is a hard error, log it. */
7451 "BUG! Tx Ring full when queue awake!\n");
7453 return NETDEV_TX_BUSY;
7456 entry = tnapi->tx_prod;
7458 if (skb->ip_summed == CHECKSUM_PARTIAL)
7459 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7461 mss = skb_shinfo(skb)->gso_size;
7464 u32 tcp_opt_len, hdr_len;
7466 if (skb_header_cloned(skb) &&
7467 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7471 tcp_opt_len = tcp_optlen(skb);
7473 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7475 if (!skb_is_gso_v6(skb)) {
7477 iph->tot_len = htons(mss + hdr_len);
7480 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7481 tg3_flag(tp, TSO_BUG))
7482 return tg3_tso_bug(tp, skb);
7484 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7485 TXD_FLAG_CPU_POST_DMA);
7487 if (tg3_flag(tp, HW_TSO_1) ||
7488 tg3_flag(tp, HW_TSO_2) ||
7489 tg3_flag(tp, HW_TSO_3)) {
7490 tcp_hdr(skb)->check = 0;
7491 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7493 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7498 if (tg3_flag(tp, HW_TSO_3)) {
7499 mss |= (hdr_len & 0xc) << 12;
7501 base_flags |= 0x00000010;
7502 base_flags |= (hdr_len & 0x3e0) << 5;
7503 } else if (tg3_flag(tp, HW_TSO_2))
7504 mss |= hdr_len << 9;
7505 else if (tg3_flag(tp, HW_TSO_1) ||
7506 tg3_asic_rev(tp) == ASIC_REV_5705) {
7507 if (tcp_opt_len || iph->ihl > 5) {
7510 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7511 mss |= (tsflags << 11);
7514 if (tcp_opt_len || iph->ihl > 5) {
7517 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7518 base_flags |= tsflags << 12;
7523 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7524 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7525 base_flags |= TXD_FLAG_JMB_PKT;
7527 if (vlan_tx_tag_present(skb)) {
7528 base_flags |= TXD_FLAG_VLAN;
7529 vlan = vlan_tx_tag_get(skb);
7532 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7533 tg3_flag(tp, TX_TSTAMP_EN)) {
7534 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7535 base_flags |= TXD_FLAG_HWTSTAMP;
7538 len = skb_headlen(skb);
7540 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7541 if (pci_dma_mapping_error(tp->pdev, mapping))
7545 tnapi->tx_buffers[entry].skb = skb;
7546 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7548 would_hit_hwbug = 0;
7550 if (tg3_flag(tp, 5701_DMA_BUG))
7551 would_hit_hwbug = 1;
7553 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7554 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7556 would_hit_hwbug = 1;
7557 } else if (skb_shinfo(skb)->nr_frags > 0) {
7560 if (!tg3_flag(tp, HW_TSO_1) &&
7561 !tg3_flag(tp, HW_TSO_2) &&
7562 !tg3_flag(tp, HW_TSO_3))
7565 /* Now loop through additional data
7566 * fragments, and queue them.
7568 last = skb_shinfo(skb)->nr_frags - 1;
7569 for (i = 0; i <= last; i++) {
7570 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7572 len = skb_frag_size(frag);
7573 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7574 len, DMA_TO_DEVICE);
7576 tnapi->tx_buffers[entry].skb = NULL;
7577 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7579 if (dma_mapping_error(&tp->pdev->dev, mapping))
7583 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7585 ((i == last) ? TXD_FLAG_END : 0),
7587 would_hit_hwbug = 1;
7593 if (would_hit_hwbug) {
7594 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7596 /* If the workaround fails due to memory/mapping
7597 * failure, silently drop this packet.
7599 entry = tnapi->tx_prod;
7600 budget = tg3_tx_avail(tnapi);
7601 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7602 base_flags, mss, vlan))
7606 skb_tx_timestamp(skb);
7607 netdev_tx_sent_queue(txq, skb->len);
7609 /* Sync BD data before updating mailbox */
7612 /* Packets are ready, update Tx producer idx local and on card. */
7613 tw32_tx_mbox(tnapi->prodmbox, entry);
7615 tnapi->tx_prod = entry;
7616 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7617 netif_tx_stop_queue(txq);
7619 /* netif_tx_stop_queue() must be done before checking
7620 * checking tx index in tg3_tx_avail() below, because in
7621 * tg3_tx(), we update tx index before checking for
7622 * netif_tx_queue_stopped().
7625 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7626 netif_tx_wake_queue(txq);
7630 return NETDEV_TX_OK;
7633 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7634 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7639 return NETDEV_TX_OK;
7642 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7645 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7646 MAC_MODE_PORT_MODE_MASK);
7648 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7650 if (!tg3_flag(tp, 5705_PLUS))
7651 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7653 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7654 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7656 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7658 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7660 if (tg3_flag(tp, 5705_PLUS) ||
7661 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7662 tg3_asic_rev(tp) == ASIC_REV_5700)
7663 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7666 tw32(MAC_MODE, tp->mac_mode);
7670 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7672 u32 val, bmcr, mac_mode, ptest = 0;
7674 tg3_phy_toggle_apd(tp, false);
7675 tg3_phy_toggle_automdix(tp, 0);
7677 if (extlpbk && tg3_phy_set_extloopbk(tp))
7680 bmcr = BMCR_FULLDPLX;
7685 bmcr |= BMCR_SPEED100;
7689 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7691 bmcr |= BMCR_SPEED100;
7694 bmcr |= BMCR_SPEED1000;
7699 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7700 tg3_readphy(tp, MII_CTRL1000, &val);
7701 val |= CTL1000_AS_MASTER |
7702 CTL1000_ENABLE_MASTER;
7703 tg3_writephy(tp, MII_CTRL1000, val);
7705 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7706 MII_TG3_FET_PTEST_TRIM_2;
7707 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7710 bmcr |= BMCR_LOOPBACK;
7712 tg3_writephy(tp, MII_BMCR, bmcr);
7714 /* The write needs to be flushed for the FETs */
7715 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7716 tg3_readphy(tp, MII_BMCR, &bmcr);
7720 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7721 tg3_asic_rev(tp) == ASIC_REV_5785) {
7722 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7723 MII_TG3_FET_PTEST_FRC_TX_LINK |
7724 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7726 /* The write needs to be flushed for the AC131 */
7727 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7730 /* Reset to prevent losing 1st rx packet intermittently */
7731 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7732 tg3_flag(tp, 5780_CLASS)) {
7733 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7735 tw32_f(MAC_RX_MODE, tp->rx_mode);
7738 mac_mode = tp->mac_mode &
7739 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7740 if (speed == SPEED_1000)
7741 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7743 mac_mode |= MAC_MODE_PORT_MODE_MII;
7745 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7746 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7748 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7749 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7750 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7751 mac_mode |= MAC_MODE_LINK_POLARITY;
7753 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7754 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7757 tw32(MAC_MODE, mac_mode);
7763 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7765 struct tg3 *tp = netdev_priv(dev);
7767 if (features & NETIF_F_LOOPBACK) {
7768 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7771 spin_lock_bh(&tp->lock);
7772 tg3_mac_loopback(tp, true);
7773 netif_carrier_on(tp->dev);
7774 spin_unlock_bh(&tp->lock);
7775 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7777 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7780 spin_lock_bh(&tp->lock);
7781 tg3_mac_loopback(tp, false);
7782 /* Force link status check */
7783 tg3_setup_phy(tp, 1);
7784 spin_unlock_bh(&tp->lock);
7785 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7789 static netdev_features_t tg3_fix_features(struct net_device *dev,
7790 netdev_features_t features)
7792 struct tg3 *tp = netdev_priv(dev);
7794 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7795 features &= ~NETIF_F_ALL_TSO;
7800 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7802 netdev_features_t changed = dev->features ^ features;
7804 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7805 tg3_set_loopback(dev, features);
7810 static void tg3_rx_prodring_free(struct tg3 *tp,
7811 struct tg3_rx_prodring_set *tpr)
7815 if (tpr != &tp->napi[0].prodring) {
7816 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7817 i = (i + 1) & tp->rx_std_ring_mask)
7818 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7821 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7822 for (i = tpr->rx_jmb_cons_idx;
7823 i != tpr->rx_jmb_prod_idx;
7824 i = (i + 1) & tp->rx_jmb_ring_mask) {
7825 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7833 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7834 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7837 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7838 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7839 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7844 /* Initialize rx rings for packet processing.
7846 * The chip has been shut down and the driver detached from
7847 * the networking, so no interrupts or new tx packets will
7848 * end up in the driver. tp->{tx,}lock are held and thus
7851 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7852 struct tg3_rx_prodring_set *tpr)
7854 u32 i, rx_pkt_dma_sz;
7856 tpr->rx_std_cons_idx = 0;
7857 tpr->rx_std_prod_idx = 0;
7858 tpr->rx_jmb_cons_idx = 0;
7859 tpr->rx_jmb_prod_idx = 0;
7861 if (tpr != &tp->napi[0].prodring) {
7862 memset(&tpr->rx_std_buffers[0], 0,
7863 TG3_RX_STD_BUFF_RING_SIZE(tp));
7864 if (tpr->rx_jmb_buffers)
7865 memset(&tpr->rx_jmb_buffers[0], 0,
7866 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7870 /* Zero out all descriptors. */
7871 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7873 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7874 if (tg3_flag(tp, 5780_CLASS) &&
7875 tp->dev->mtu > ETH_DATA_LEN)
7876 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7877 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7879 /* Initialize invariants of the rings, we only set this
7880 * stuff once. This works because the card does not
7881 * write into the rx buffer posting rings.
7883 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7884 struct tg3_rx_buffer_desc *rxd;
7886 rxd = &tpr->rx_std[i];
7887 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7888 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7889 rxd->opaque = (RXD_OPAQUE_RING_STD |
7890 (i << RXD_OPAQUE_INDEX_SHIFT));
7893 /* Now allocate fresh SKBs for each rx ring. */
7894 for (i = 0; i < tp->rx_pending; i++) {
7895 unsigned int frag_size;
7897 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7899 netdev_warn(tp->dev,
7900 "Using a smaller RX standard ring. Only "
7901 "%d out of %d buffers were allocated "
7902 "successfully\n", i, tp->rx_pending);
7910 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7913 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7915 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7918 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7919 struct tg3_rx_buffer_desc *rxd;
7921 rxd = &tpr->rx_jmb[i].std;
7922 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7923 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7925 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7926 (i << RXD_OPAQUE_INDEX_SHIFT));
7929 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7930 unsigned int frag_size;
7932 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7934 netdev_warn(tp->dev,
7935 "Using a smaller RX jumbo ring. Only %d "
7936 "out of %d buffers were allocated "
7937 "successfully\n", i, tp->rx_jumbo_pending);
7940 tp->rx_jumbo_pending = i;
7949 tg3_rx_prodring_free(tp, tpr);
7953 static void tg3_rx_prodring_fini(struct tg3 *tp,
7954 struct tg3_rx_prodring_set *tpr)
7956 kfree(tpr->rx_std_buffers);
7957 tpr->rx_std_buffers = NULL;
7958 kfree(tpr->rx_jmb_buffers);
7959 tpr->rx_jmb_buffers = NULL;
7961 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7962 tpr->rx_std, tpr->rx_std_mapping);
7966 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7967 tpr->rx_jmb, tpr->rx_jmb_mapping);
7972 static int tg3_rx_prodring_init(struct tg3 *tp,
7973 struct tg3_rx_prodring_set *tpr)
7975 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7977 if (!tpr->rx_std_buffers)
7980 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7981 TG3_RX_STD_RING_BYTES(tp),
7982 &tpr->rx_std_mapping,
7987 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7988 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7990 if (!tpr->rx_jmb_buffers)
7993 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7994 TG3_RX_JMB_RING_BYTES(tp),
7995 &tpr->rx_jmb_mapping,
8004 tg3_rx_prodring_fini(tp, tpr);
8008 /* Free up pending packets in all rx/tx rings.
8010 * The chip has been shut down and the driver detached from
8011 * the networking, so no interrupts or new tx packets will
8012 * end up in the driver. tp->{tx,}lock is not held and we are not
8013 * in an interrupt context and thus may sleep.
8015 static void tg3_free_rings(struct tg3 *tp)
8019 for (j = 0; j < tp->irq_cnt; j++) {
8020 struct tg3_napi *tnapi = &tp->napi[j];
8022 tg3_rx_prodring_free(tp, &tnapi->prodring);
8024 if (!tnapi->tx_buffers)
8027 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8028 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8033 tg3_tx_skb_unmap(tnapi, i,
8034 skb_shinfo(skb)->nr_frags - 1);
8036 dev_kfree_skb_any(skb);
8038 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8042 /* Initialize tx/rx rings for packet processing.
8044 * The chip has been shut down and the driver detached from
8045 * the networking, so no interrupts or new tx packets will
8046 * end up in the driver. tp->{tx,}lock are held and thus
8049 static int tg3_init_rings(struct tg3 *tp)
8053 /* Free up all the SKBs. */
8056 for (i = 0; i < tp->irq_cnt; i++) {
8057 struct tg3_napi *tnapi = &tp->napi[i];
8059 tnapi->last_tag = 0;
8060 tnapi->last_irq_tag = 0;
8061 tnapi->hw_status->status = 0;
8062 tnapi->hw_status->status_tag = 0;
8063 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8068 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8070 tnapi->rx_rcb_ptr = 0;
8072 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8074 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8083 static void tg3_mem_tx_release(struct tg3 *tp)
8087 for (i = 0; i < tp->irq_max; i++) {
8088 struct tg3_napi *tnapi = &tp->napi[i];
8090 if (tnapi->tx_ring) {
8091 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8092 tnapi->tx_ring, tnapi->tx_desc_mapping);
8093 tnapi->tx_ring = NULL;
8096 kfree(tnapi->tx_buffers);
8097 tnapi->tx_buffers = NULL;
8101 static int tg3_mem_tx_acquire(struct tg3 *tp)
8104 struct tg3_napi *tnapi = &tp->napi[0];
8106 /* If multivector TSS is enabled, vector 0 does not handle
8107 * tx interrupts. Don't allocate any resources for it.
8109 if (tg3_flag(tp, ENABLE_TSS))
8112 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8113 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8114 TG3_TX_RING_SIZE, GFP_KERNEL);
8115 if (!tnapi->tx_buffers)
8118 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8120 &tnapi->tx_desc_mapping,
8122 if (!tnapi->tx_ring)
8129 tg3_mem_tx_release(tp);
8133 static void tg3_mem_rx_release(struct tg3 *tp)
8137 for (i = 0; i < tp->irq_max; i++) {
8138 struct tg3_napi *tnapi = &tp->napi[i];
8140 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8145 dma_free_coherent(&tp->pdev->dev,
8146 TG3_RX_RCB_RING_BYTES(tp),
8148 tnapi->rx_rcb_mapping);
8149 tnapi->rx_rcb = NULL;
8153 static int tg3_mem_rx_acquire(struct tg3 *tp)
8155 unsigned int i, limit;
8157 limit = tp->rxq_cnt;
8159 /* If RSS is enabled, we need a (dummy) producer ring
8160 * set on vector zero. This is the true hw prodring.
8162 if (tg3_flag(tp, ENABLE_RSS))
8165 for (i = 0; i < limit; i++) {
8166 struct tg3_napi *tnapi = &tp->napi[i];
8168 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8171 /* If multivector RSS is enabled, vector 0
8172 * does not handle rx or tx interrupts.
8173 * Don't allocate any resources for it.
8175 if (!i && tg3_flag(tp, ENABLE_RSS))
8178 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8179 TG3_RX_RCB_RING_BYTES(tp),
8180 &tnapi->rx_rcb_mapping,
8181 GFP_KERNEL | __GFP_ZERO);
8189 tg3_mem_rx_release(tp);
8194 * Must not be invoked with interrupt sources disabled and
8195 * the hardware shutdown down.
8197 static void tg3_free_consistent(struct tg3 *tp)
8201 for (i = 0; i < tp->irq_cnt; i++) {
8202 struct tg3_napi *tnapi = &tp->napi[i];
8204 if (tnapi->hw_status) {
8205 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8207 tnapi->status_mapping);
8208 tnapi->hw_status = NULL;
8212 tg3_mem_rx_release(tp);
8213 tg3_mem_tx_release(tp);
8216 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8217 tp->hw_stats, tp->stats_mapping);
8218 tp->hw_stats = NULL;
8223 * Must not be invoked with interrupt sources disabled and
8224 * the hardware shutdown down. Can sleep.
8226 static int tg3_alloc_consistent(struct tg3 *tp)
8230 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8231 sizeof(struct tg3_hw_stats),
8233 GFP_KERNEL | __GFP_ZERO);
8237 for (i = 0; i < tp->irq_cnt; i++) {
8238 struct tg3_napi *tnapi = &tp->napi[i];
8239 struct tg3_hw_status *sblk;
8241 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8243 &tnapi->status_mapping,
8244 GFP_KERNEL | __GFP_ZERO);
8245 if (!tnapi->hw_status)
8248 sblk = tnapi->hw_status;
8250 if (tg3_flag(tp, ENABLE_RSS)) {
8251 u16 *prodptr = NULL;
8254 * When RSS is enabled, the status block format changes
8255 * slightly. The "rx_jumbo_consumer", "reserved",
8256 * and "rx_mini_consumer" members get mapped to the
8257 * other three rx return ring producer indexes.
8261 prodptr = &sblk->idx[0].rx_producer;
8264 prodptr = &sblk->rx_jumbo_consumer;
8267 prodptr = &sblk->reserved;
8270 prodptr = &sblk->rx_mini_consumer;
8273 tnapi->rx_rcb_prod_idx = prodptr;
8275 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8279 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8285 tg3_free_consistent(tp);
8289 #define MAX_WAIT_CNT 1000
8291 /* To stop a block, clear the enable bit and poll till it
8292 * clears. tp->lock is held.
8294 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8299 if (tg3_flag(tp, 5705_PLUS)) {
8306 /* We can't enable/disable these bits of the
8307 * 5705/5750, just say success.
8320 for (i = 0; i < MAX_WAIT_CNT; i++) {
8323 if ((val & enable_bit) == 0)
8327 if (i == MAX_WAIT_CNT && !silent) {
8328 dev_err(&tp->pdev->dev,
8329 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8337 /* tp->lock is held. */
8338 static int tg3_abort_hw(struct tg3 *tp, int silent)
8342 tg3_disable_ints(tp);
8344 tp->rx_mode &= ~RX_MODE_ENABLE;
8345 tw32_f(MAC_RX_MODE, tp->rx_mode);
8348 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8349 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8350 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8351 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8352 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8353 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8355 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8356 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8357 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8358 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8359 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8360 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8361 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8363 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8364 tw32_f(MAC_MODE, tp->mac_mode);
8367 tp->tx_mode &= ~TX_MODE_ENABLE;
8368 tw32_f(MAC_TX_MODE, tp->tx_mode);
8370 for (i = 0; i < MAX_WAIT_CNT; i++) {
8372 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8375 if (i >= MAX_WAIT_CNT) {
8376 dev_err(&tp->pdev->dev,
8377 "%s timed out, TX_MODE_ENABLE will not clear "
8378 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8382 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8383 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8384 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8386 tw32(FTQ_RESET, 0xffffffff);
8387 tw32(FTQ_RESET, 0x00000000);
8389 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8390 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8392 for (i = 0; i < tp->irq_cnt; i++) {
8393 struct tg3_napi *tnapi = &tp->napi[i];
8394 if (tnapi->hw_status)
8395 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8401 /* Save PCI command register before chip reset */
8402 static void tg3_save_pci_state(struct tg3 *tp)
8404 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8407 /* Restore PCI state after chip reset */
8408 static void tg3_restore_pci_state(struct tg3 *tp)
8412 /* Re-enable indirect register accesses. */
8413 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8414 tp->misc_host_ctrl);
8416 /* Set MAX PCI retry to zero. */
8417 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8418 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8419 tg3_flag(tp, PCIX_MODE))
8420 val |= PCISTATE_RETRY_SAME_DMA;
8421 /* Allow reads and writes to the APE register and memory space. */
8422 if (tg3_flag(tp, ENABLE_APE))
8423 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8424 PCISTATE_ALLOW_APE_SHMEM_WR |
8425 PCISTATE_ALLOW_APE_PSPACE_WR;
8426 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8428 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8430 if (!tg3_flag(tp, PCI_EXPRESS)) {
8431 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8432 tp->pci_cacheline_sz);
8433 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8437 /* Make sure PCI-X relaxed ordering bit is clear. */
8438 if (tg3_flag(tp, PCIX_MODE)) {
8441 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8443 pcix_cmd &= ~PCI_X_CMD_ERO;
8444 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8448 if (tg3_flag(tp, 5780_CLASS)) {
8450 /* Chip reset on 5780 will reset MSI enable bit,
8451 * so need to restore it.
8453 if (tg3_flag(tp, USING_MSI)) {
8456 pci_read_config_word(tp->pdev,
8457 tp->msi_cap + PCI_MSI_FLAGS,
8459 pci_write_config_word(tp->pdev,
8460 tp->msi_cap + PCI_MSI_FLAGS,
8461 ctrl | PCI_MSI_FLAGS_ENABLE);
8462 val = tr32(MSGINT_MODE);
8463 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8468 /* tp->lock is held. */
8469 static int tg3_chip_reset(struct tg3 *tp)
8472 void (*write_op)(struct tg3 *, u32, u32);
8477 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8479 /* No matching tg3_nvram_unlock() after this because
8480 * chip reset below will undo the nvram lock.
8482 tp->nvram_lock_cnt = 0;
8484 /* GRC_MISC_CFG core clock reset will clear the memory
8485 * enable bit in PCI register 4 and the MSI enable bit
8486 * on some chips, so we save relevant registers here.
8488 tg3_save_pci_state(tp);
8490 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8491 tg3_flag(tp, 5755_PLUS))
8492 tw32(GRC_FASTBOOT_PC, 0);
8495 * We must avoid the readl() that normally takes place.
8496 * It locks machines, causes machine checks, and other
8497 * fun things. So, temporarily disable the 5701
8498 * hardware workaround, while we do the reset.
8500 write_op = tp->write32;
8501 if (write_op == tg3_write_flush_reg32)
8502 tp->write32 = tg3_write32;
8504 /* Prevent the irq handler from reading or writing PCI registers
8505 * during chip reset when the memory enable bit in the PCI command
8506 * register may be cleared. The chip does not generate interrupt
8507 * at this time, but the irq handler may still be called due to irq
8508 * sharing or irqpoll.
8510 tg3_flag_set(tp, CHIP_RESETTING);
8511 for (i = 0; i < tp->irq_cnt; i++) {
8512 struct tg3_napi *tnapi = &tp->napi[i];
8513 if (tnapi->hw_status) {
8514 tnapi->hw_status->status = 0;
8515 tnapi->hw_status->status_tag = 0;
8517 tnapi->last_tag = 0;
8518 tnapi->last_irq_tag = 0;
8522 for (i = 0; i < tp->irq_cnt; i++)
8523 synchronize_irq(tp->napi[i].irq_vec);
8525 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8526 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8527 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8531 val = GRC_MISC_CFG_CORECLK_RESET;
8533 if (tg3_flag(tp, PCI_EXPRESS)) {
8534 /* Force PCIe 1.0a mode */
8535 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8536 !tg3_flag(tp, 57765_PLUS) &&
8537 tr32(TG3_PCIE_PHY_TSTCTL) ==
8538 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8539 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8541 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8542 tw32(GRC_MISC_CFG, (1 << 29));
8547 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8548 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8549 tw32(GRC_VCPU_EXT_CTRL,
8550 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8553 /* Manage gphy power for all CPMU absent PCIe devices. */
8554 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8555 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8557 tw32(GRC_MISC_CFG, val);
8559 /* restore 5701 hardware bug workaround write method */
8560 tp->write32 = write_op;
8562 /* Unfortunately, we have to delay before the PCI read back.
8563 * Some 575X chips even will not respond to a PCI cfg access
8564 * when the reset command is given to the chip.
8566 * How do these hardware designers expect things to work
8567 * properly if the PCI write is posted for a long period
8568 * of time? It is always necessary to have some method by
8569 * which a register read back can occur to push the write
8570 * out which does the reset.
8572 * For most tg3 variants the trick below was working.
8577 /* Flush PCI posted writes. The normal MMIO registers
8578 * are inaccessible at this time so this is the only
8579 * way to make this reliably (actually, this is no longer
8580 * the case, see above). I tried to use indirect
8581 * register read/write but this upset some 5701 variants.
8583 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8587 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8590 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8594 /* Wait for link training to complete. */
8595 for (j = 0; j < 5000; j++)
8598 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8599 pci_write_config_dword(tp->pdev, 0xc4,
8600 cfg_val | (1 << 15));
8603 /* Clear the "no snoop" and "relaxed ordering" bits. */
8604 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8606 * Older PCIe devices only support the 128 byte
8607 * MPS setting. Enforce the restriction.
8609 if (!tg3_flag(tp, CPMU_PRESENT))
8610 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8611 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8613 /* Clear error status */
8614 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8615 PCI_EXP_DEVSTA_CED |
8616 PCI_EXP_DEVSTA_NFED |
8617 PCI_EXP_DEVSTA_FED |
8618 PCI_EXP_DEVSTA_URD);
8621 tg3_restore_pci_state(tp);
8623 tg3_flag_clear(tp, CHIP_RESETTING);
8624 tg3_flag_clear(tp, ERROR_PROCESSED);
8627 if (tg3_flag(tp, 5780_CLASS))
8628 val = tr32(MEMARB_MODE);
8629 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8631 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8633 tw32(0x5000, 0x400);
8636 if (tg3_flag(tp, IS_SSB_CORE)) {
8638 * BCM4785: In order to avoid repercussions from using
8639 * potentially defective internal ROM, stop the Rx RISC CPU,
8640 * which is not required.
8643 tg3_halt_cpu(tp, RX_CPU_BASE);
8646 tw32(GRC_MODE, tp->grc_mode);
8648 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8651 tw32(0xc4, val | (1 << 15));
8654 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8655 tg3_asic_rev(tp) == ASIC_REV_5705) {
8656 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8657 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8658 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8659 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8662 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8663 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8665 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8666 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8671 tw32_f(MAC_MODE, val);
8674 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8676 err = tg3_poll_fw(tp);
8682 if (tg3_flag(tp, PCI_EXPRESS) &&
8683 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8684 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8685 !tg3_flag(tp, 57765_PLUS)) {
8688 tw32(0x7c00, val | (1 << 25));
8691 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8692 val = tr32(TG3_CPMU_CLCK_ORIDE);
8693 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8696 /* Reprobe ASF enable state. */
8697 tg3_flag_clear(tp, ENABLE_ASF);
8698 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8699 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8700 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8703 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8704 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8705 tg3_flag_set(tp, ENABLE_ASF);
8706 tp->last_event_jiffies = jiffies;
8707 if (tg3_flag(tp, 5750_PLUS))
8708 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8715 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8716 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8718 /* tp->lock is held. */
8719 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8725 tg3_write_sig_pre_reset(tp, kind);
8727 tg3_abort_hw(tp, silent);
8728 err = tg3_chip_reset(tp);
8730 __tg3_set_mac_addr(tp, 0);
8732 tg3_write_sig_legacy(tp, kind);
8733 tg3_write_sig_post_reset(tp, kind);
8736 /* Save the stats across chip resets... */
8737 tg3_get_nstats(tp, &tp->net_stats_prev);
8738 tg3_get_estats(tp, &tp->estats_prev);
8740 /* And make sure the next sample is new data */
8741 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8750 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8752 struct tg3 *tp = netdev_priv(dev);
8753 struct sockaddr *addr = p;
8754 int err = 0, skip_mac_1 = 0;
8756 if (!is_valid_ether_addr(addr->sa_data))
8757 return -EADDRNOTAVAIL;
8759 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8761 if (!netif_running(dev))
8764 if (tg3_flag(tp, ENABLE_ASF)) {
8765 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8767 addr0_high = tr32(MAC_ADDR_0_HIGH);
8768 addr0_low = tr32(MAC_ADDR_0_LOW);
8769 addr1_high = tr32(MAC_ADDR_1_HIGH);
8770 addr1_low = tr32(MAC_ADDR_1_LOW);
8772 /* Skip MAC addr 1 if ASF is using it. */
8773 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8774 !(addr1_high == 0 && addr1_low == 0))
8777 spin_lock_bh(&tp->lock);
8778 __tg3_set_mac_addr(tp, skip_mac_1);
8779 spin_unlock_bh(&tp->lock);
8784 /* tp->lock is held. */
8785 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8786 dma_addr_t mapping, u32 maxlen_flags,
8790 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8791 ((u64) mapping >> 32));
8793 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8794 ((u64) mapping & 0xffffffff));
8796 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8799 if (!tg3_flag(tp, 5705_PLUS))
8801 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8806 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8810 if (!tg3_flag(tp, ENABLE_TSS)) {
8811 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8812 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8813 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8815 tw32(HOSTCC_TXCOL_TICKS, 0);
8816 tw32(HOSTCC_TXMAX_FRAMES, 0);
8817 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8819 for (; i < tp->txq_cnt; i++) {
8822 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8823 tw32(reg, ec->tx_coalesce_usecs);
8824 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8825 tw32(reg, ec->tx_max_coalesced_frames);
8826 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8827 tw32(reg, ec->tx_max_coalesced_frames_irq);
8831 for (; i < tp->irq_max - 1; i++) {
8832 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8833 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8834 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8838 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8841 u32 limit = tp->rxq_cnt;
8843 if (!tg3_flag(tp, ENABLE_RSS)) {
8844 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8845 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8846 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8849 tw32(HOSTCC_RXCOL_TICKS, 0);
8850 tw32(HOSTCC_RXMAX_FRAMES, 0);
8851 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8854 for (; i < limit; i++) {
8857 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8858 tw32(reg, ec->rx_coalesce_usecs);
8859 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8860 tw32(reg, ec->rx_max_coalesced_frames);
8861 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8862 tw32(reg, ec->rx_max_coalesced_frames_irq);
8865 for (; i < tp->irq_max - 1; i++) {
8866 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8867 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8868 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8872 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8874 tg3_coal_tx_init(tp, ec);
8875 tg3_coal_rx_init(tp, ec);
8877 if (!tg3_flag(tp, 5705_PLUS)) {
8878 u32 val = ec->stats_block_coalesce_usecs;
8880 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8881 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8886 tw32(HOSTCC_STAT_COAL_TICKS, val);
8890 /* tp->lock is held. */
8891 static void tg3_rings_reset(struct tg3 *tp)
8894 u32 stblk, txrcb, rxrcb, limit;
8895 struct tg3_napi *tnapi = &tp->napi[0];
8897 /* Disable all transmit rings but the first. */
8898 if (!tg3_flag(tp, 5705_PLUS))
8899 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8900 else if (tg3_flag(tp, 5717_PLUS))
8901 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8902 else if (tg3_flag(tp, 57765_CLASS) ||
8903 tg3_asic_rev(tp) == ASIC_REV_5762)
8904 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8906 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8908 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8909 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8910 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8911 BDINFO_FLAGS_DISABLED);
8914 /* Disable all receive return rings but the first. */
8915 if (tg3_flag(tp, 5717_PLUS))
8916 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8917 else if (!tg3_flag(tp, 5705_PLUS))
8918 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8919 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8920 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8921 tg3_flag(tp, 57765_CLASS))
8922 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8924 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8926 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8927 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8928 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8929 BDINFO_FLAGS_DISABLED);
8931 /* Disable interrupts */
8932 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8933 tp->napi[0].chk_msi_cnt = 0;
8934 tp->napi[0].last_rx_cons = 0;
8935 tp->napi[0].last_tx_cons = 0;
8937 /* Zero mailbox registers. */
8938 if (tg3_flag(tp, SUPPORT_MSIX)) {
8939 for (i = 1; i < tp->irq_max; i++) {
8940 tp->napi[i].tx_prod = 0;
8941 tp->napi[i].tx_cons = 0;
8942 if (tg3_flag(tp, ENABLE_TSS))
8943 tw32_mailbox(tp->napi[i].prodmbox, 0);
8944 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8945 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8946 tp->napi[i].chk_msi_cnt = 0;
8947 tp->napi[i].last_rx_cons = 0;
8948 tp->napi[i].last_tx_cons = 0;
8950 if (!tg3_flag(tp, ENABLE_TSS))
8951 tw32_mailbox(tp->napi[0].prodmbox, 0);
8953 tp->napi[0].tx_prod = 0;
8954 tp->napi[0].tx_cons = 0;
8955 tw32_mailbox(tp->napi[0].prodmbox, 0);
8956 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8959 /* Make sure the NIC-based send BD rings are disabled. */
8960 if (!tg3_flag(tp, 5705_PLUS)) {
8961 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8962 for (i = 0; i < 16; i++)
8963 tw32_tx_mbox(mbox + i * 8, 0);
8966 txrcb = NIC_SRAM_SEND_RCB;
8967 rxrcb = NIC_SRAM_RCV_RET_RCB;
8969 /* Clear status block in ram. */
8970 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8972 /* Set status block DMA address */
8973 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8974 ((u64) tnapi->status_mapping >> 32));
8975 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8976 ((u64) tnapi->status_mapping & 0xffffffff));
8978 if (tnapi->tx_ring) {
8979 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8980 (TG3_TX_RING_SIZE <<
8981 BDINFO_FLAGS_MAXLEN_SHIFT),
8982 NIC_SRAM_TX_BUFFER_DESC);
8983 txrcb += TG3_BDINFO_SIZE;
8986 if (tnapi->rx_rcb) {
8987 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8988 (tp->rx_ret_ring_mask + 1) <<
8989 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8990 rxrcb += TG3_BDINFO_SIZE;
8993 stblk = HOSTCC_STATBLCK_RING1;
8995 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8996 u64 mapping = (u64)tnapi->status_mapping;
8997 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8998 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9000 /* Clear status block in ram. */
9001 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9003 if (tnapi->tx_ring) {
9004 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9005 (TG3_TX_RING_SIZE <<
9006 BDINFO_FLAGS_MAXLEN_SHIFT),
9007 NIC_SRAM_TX_BUFFER_DESC);
9008 txrcb += TG3_BDINFO_SIZE;
9011 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9012 ((tp->rx_ret_ring_mask + 1) <<
9013 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9016 rxrcb += TG3_BDINFO_SIZE;
9020 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9022 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9024 if (!tg3_flag(tp, 5750_PLUS) ||
9025 tg3_flag(tp, 5780_CLASS) ||
9026 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9027 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9028 tg3_flag(tp, 57765_PLUS))
9029 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9030 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9031 tg3_asic_rev(tp) == ASIC_REV_5787)
9032 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9034 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9036 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9037 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9039 val = min(nic_rep_thresh, host_rep_thresh);
9040 tw32(RCVBDI_STD_THRESH, val);
9042 if (tg3_flag(tp, 57765_PLUS))
9043 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9045 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9048 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9050 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9052 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9053 tw32(RCVBDI_JUMBO_THRESH, val);
9055 if (tg3_flag(tp, 57765_PLUS))
9056 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9059 static inline u32 calc_crc(unsigned char *buf, int len)
9067 for (j = 0; j < len; j++) {
9070 for (k = 0; k < 8; k++) {
9083 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9085 /* accept or reject all multicast frames */
9086 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9087 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9088 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9089 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9092 static void __tg3_set_rx_mode(struct net_device *dev)
9094 struct tg3 *tp = netdev_priv(dev);
9097 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9098 RX_MODE_KEEP_VLAN_TAG);
9100 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9101 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9104 if (!tg3_flag(tp, ENABLE_ASF))
9105 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9108 if (dev->flags & IFF_PROMISC) {
9109 /* Promiscuous mode. */
9110 rx_mode |= RX_MODE_PROMISC;
9111 } else if (dev->flags & IFF_ALLMULTI) {
9112 /* Accept all multicast. */
9113 tg3_set_multi(tp, 1);
9114 } else if (netdev_mc_empty(dev)) {
9115 /* Reject all multicast. */
9116 tg3_set_multi(tp, 0);
9118 /* Accept one or more multicast(s). */
9119 struct netdev_hw_addr *ha;
9120 u32 mc_filter[4] = { 0, };
9125 netdev_for_each_mc_addr(ha, dev) {
9126 crc = calc_crc(ha->addr, ETH_ALEN);
9128 regidx = (bit & 0x60) >> 5;
9130 mc_filter[regidx] |= (1 << bit);
9133 tw32(MAC_HASH_REG_0, mc_filter[0]);
9134 tw32(MAC_HASH_REG_1, mc_filter[1]);
9135 tw32(MAC_HASH_REG_2, mc_filter[2]);
9136 tw32(MAC_HASH_REG_3, mc_filter[3]);
9139 if (rx_mode != tp->rx_mode) {
9140 tp->rx_mode = rx_mode;
9141 tw32_f(MAC_RX_MODE, rx_mode);
9146 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9150 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9151 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9154 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9158 if (!tg3_flag(tp, SUPPORT_MSIX))
9161 if (tp->rxq_cnt == 1) {
9162 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9166 /* Validate table against current IRQ count */
9167 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9168 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9172 if (i != TG3_RSS_INDIR_TBL_SIZE)
9173 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9176 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9179 u32 reg = MAC_RSS_INDIR_TBL_0;
9181 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9182 u32 val = tp->rss_ind_tbl[i];
9184 for (; i % 8; i++) {
9186 val |= tp->rss_ind_tbl[i];
9193 /* tp->lock is held. */
9194 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9196 u32 val, rdmac_mode;
9198 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9200 tg3_disable_ints(tp);
9204 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9206 if (tg3_flag(tp, INIT_COMPLETE))
9207 tg3_abort_hw(tp, 1);
9209 /* Enable MAC control of LPI */
9210 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9211 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9212 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9213 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9214 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9216 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9218 tw32_f(TG3_CPMU_EEE_CTRL,
9219 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9221 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9222 TG3_CPMU_EEEMD_LPI_IN_TX |
9223 TG3_CPMU_EEEMD_LPI_IN_RX |
9224 TG3_CPMU_EEEMD_EEE_ENABLE;
9226 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9227 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9229 if (tg3_flag(tp, ENABLE_APE))
9230 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9232 tw32_f(TG3_CPMU_EEE_MODE, val);
9234 tw32_f(TG3_CPMU_EEE_DBTMR1,
9235 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9236 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9238 tw32_f(TG3_CPMU_EEE_DBTMR2,
9239 TG3_CPMU_DBTMR2_APE_TX_2047US |
9240 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9246 err = tg3_chip_reset(tp);
9250 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9252 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9253 val = tr32(TG3_CPMU_CTRL);
9254 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9255 tw32(TG3_CPMU_CTRL, val);
9257 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9258 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9259 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9260 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9262 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9263 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9264 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9265 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9267 val = tr32(TG3_CPMU_HST_ACC);
9268 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9269 val |= CPMU_HST_ACC_MACCLK_6_25;
9270 tw32(TG3_CPMU_HST_ACC, val);
9273 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9274 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9275 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9276 PCIE_PWR_MGMT_L1_THRESH_4MS;
9277 tw32(PCIE_PWR_MGMT_THRESH, val);
9279 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9280 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9282 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9284 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9285 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9288 if (tg3_flag(tp, L1PLLPD_EN)) {
9289 u32 grc_mode = tr32(GRC_MODE);
9291 /* Access the lower 1K of PL PCIE block registers. */
9292 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9293 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9295 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9296 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9297 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9299 tw32(GRC_MODE, grc_mode);
9302 if (tg3_flag(tp, 57765_CLASS)) {
9303 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9304 u32 grc_mode = tr32(GRC_MODE);
9306 /* Access the lower 1K of PL PCIE block registers. */
9307 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9308 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9310 val = tr32(TG3_PCIE_TLDLPL_PORT +
9311 TG3_PCIE_PL_LO_PHYCTL5);
9312 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9313 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9315 tw32(GRC_MODE, grc_mode);
9318 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9321 /* Fix transmit hangs */
9322 val = tr32(TG3_CPMU_PADRNG_CTL);
9323 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9324 tw32(TG3_CPMU_PADRNG_CTL, val);
9326 grc_mode = tr32(GRC_MODE);
9328 /* Access the lower 1K of DL PCIE block registers. */
9329 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9330 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9332 val = tr32(TG3_PCIE_TLDLPL_PORT +
9333 TG3_PCIE_DL_LO_FTSMAX);
9334 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9335 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9336 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9338 tw32(GRC_MODE, grc_mode);
9341 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9342 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9343 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9344 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9347 /* This works around an issue with Athlon chipsets on
9348 * B3 tigon3 silicon. This bit has no effect on any
9349 * other revision. But do not set this on PCI Express
9350 * chips and don't even touch the clocks if the CPMU is present.
9352 if (!tg3_flag(tp, CPMU_PRESENT)) {
9353 if (!tg3_flag(tp, PCI_EXPRESS))
9354 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9355 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9358 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9359 tg3_flag(tp, PCIX_MODE)) {
9360 val = tr32(TG3PCI_PCISTATE);
9361 val |= PCISTATE_RETRY_SAME_DMA;
9362 tw32(TG3PCI_PCISTATE, val);
9365 if (tg3_flag(tp, ENABLE_APE)) {
9366 /* Allow reads and writes to the
9367 * APE register and memory space.
9369 val = tr32(TG3PCI_PCISTATE);
9370 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9371 PCISTATE_ALLOW_APE_SHMEM_WR |
9372 PCISTATE_ALLOW_APE_PSPACE_WR;
9373 tw32(TG3PCI_PCISTATE, val);
9376 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9377 /* Enable some hw fixes. */
9378 val = tr32(TG3PCI_MSI_DATA);
9379 val |= (1 << 26) | (1 << 28) | (1 << 29);
9380 tw32(TG3PCI_MSI_DATA, val);
9383 /* Descriptor ring init may make accesses to the
9384 * NIC SRAM area to setup the TX descriptors, so we
9385 * can only do this after the hardware has been
9386 * successfully reset.
9388 err = tg3_init_rings(tp);
9392 if (tg3_flag(tp, 57765_PLUS)) {
9393 val = tr32(TG3PCI_DMA_RW_CTRL) &
9394 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9395 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9396 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9397 if (!tg3_flag(tp, 57765_CLASS) &&
9398 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9399 tg3_asic_rev(tp) != ASIC_REV_5762)
9400 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9401 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9402 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9403 tg3_asic_rev(tp) != ASIC_REV_5761) {
9404 /* This value is determined during the probe time DMA
9405 * engine test, tg3_test_dma.
9407 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9410 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9411 GRC_MODE_4X_NIC_SEND_RINGS |
9412 GRC_MODE_NO_TX_PHDR_CSUM |
9413 GRC_MODE_NO_RX_PHDR_CSUM);
9414 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9416 /* Pseudo-header checksum is done by hardware logic and not
9417 * the offload processers, so make the chip do the pseudo-
9418 * header checksums on receive. For transmit it is more
9419 * convenient to do the pseudo-header checksum in software
9420 * as Linux does that on transmit for us in all cases.
9422 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9424 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9426 tw32(TG3_RX_PTP_CTL,
9427 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9429 if (tg3_flag(tp, PTP_CAPABLE))
9430 val |= GRC_MODE_TIME_SYNC_ENABLE;
9432 tw32(GRC_MODE, tp->grc_mode | val);
9434 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9435 val = tr32(GRC_MISC_CFG);
9437 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9438 tw32(GRC_MISC_CFG, val);
9440 /* Initialize MBUF/DESC pool. */
9441 if (tg3_flag(tp, 5750_PLUS)) {
9443 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9444 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9445 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9446 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9448 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9449 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9450 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9451 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9454 fw_len = tp->fw_len;
9455 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9456 tw32(BUFMGR_MB_POOL_ADDR,
9457 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9458 tw32(BUFMGR_MB_POOL_SIZE,
9459 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9462 if (tp->dev->mtu <= ETH_DATA_LEN) {
9463 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9464 tp->bufmgr_config.mbuf_read_dma_low_water);
9465 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9466 tp->bufmgr_config.mbuf_mac_rx_low_water);
9467 tw32(BUFMGR_MB_HIGH_WATER,
9468 tp->bufmgr_config.mbuf_high_water);
9470 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9471 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9472 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9473 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9474 tw32(BUFMGR_MB_HIGH_WATER,
9475 tp->bufmgr_config.mbuf_high_water_jumbo);
9477 tw32(BUFMGR_DMA_LOW_WATER,
9478 tp->bufmgr_config.dma_low_water);
9479 tw32(BUFMGR_DMA_HIGH_WATER,
9480 tp->bufmgr_config.dma_high_water);
9482 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9483 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9484 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9485 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9486 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9487 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9488 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9489 tw32(BUFMGR_MODE, val);
9490 for (i = 0; i < 2000; i++) {
9491 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9496 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9500 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9501 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9503 tg3_setup_rxbd_thresholds(tp);
9505 /* Initialize TG3_BDINFO's at:
9506 * RCVDBDI_STD_BD: standard eth size rx ring
9507 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9508 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9511 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9512 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9513 * ring attribute flags
9514 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9516 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9517 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9519 * The size of each ring is fixed in the firmware, but the location is
9522 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9523 ((u64) tpr->rx_std_mapping >> 32));
9524 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9525 ((u64) tpr->rx_std_mapping & 0xffffffff));
9526 if (!tg3_flag(tp, 5717_PLUS))
9527 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9528 NIC_SRAM_RX_BUFFER_DESC);
9530 /* Disable the mini ring */
9531 if (!tg3_flag(tp, 5705_PLUS))
9532 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9533 BDINFO_FLAGS_DISABLED);
9535 /* Program the jumbo buffer descriptor ring control
9536 * blocks on those devices that have them.
9538 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9539 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9541 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9542 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9543 ((u64) tpr->rx_jmb_mapping >> 32));
9544 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9545 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9546 val = TG3_RX_JMB_RING_SIZE(tp) <<
9547 BDINFO_FLAGS_MAXLEN_SHIFT;
9548 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9549 val | BDINFO_FLAGS_USE_EXT_RECV);
9550 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9551 tg3_flag(tp, 57765_CLASS) ||
9552 tg3_asic_rev(tp) == ASIC_REV_5762)
9553 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9554 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9556 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9557 BDINFO_FLAGS_DISABLED);
9560 if (tg3_flag(tp, 57765_PLUS)) {
9561 val = TG3_RX_STD_RING_SIZE(tp);
9562 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9563 val |= (TG3_RX_STD_DMA_SZ << 2);
9565 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9567 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9569 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9571 tpr->rx_std_prod_idx = tp->rx_pending;
9572 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9574 tpr->rx_jmb_prod_idx =
9575 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9576 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9578 tg3_rings_reset(tp);
9580 /* Initialize MAC address and backoff seed. */
9581 __tg3_set_mac_addr(tp, 0);
9583 /* MTU + ethernet header + FCS + optional VLAN tag */
9584 tw32(MAC_RX_MTU_SIZE,
9585 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9587 /* The slot time is changed by tg3_setup_phy if we
9588 * run at gigabit with half duplex.
9590 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9591 (6 << TX_LENGTHS_IPG_SHIFT) |
9592 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9594 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9595 tg3_asic_rev(tp) == ASIC_REV_5762)
9596 val |= tr32(MAC_TX_LENGTHS) &
9597 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9598 TX_LENGTHS_CNT_DWN_VAL_MSK);
9600 tw32(MAC_TX_LENGTHS, val);
9602 /* Receive rules. */
9603 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9604 tw32(RCVLPC_CONFIG, 0x0181);
9606 /* Calculate RDMAC_MODE setting early, we need it to determine
9607 * the RCVLPC_STATE_ENABLE mask.
9609 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9610 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9611 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9612 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9613 RDMAC_MODE_LNGREAD_ENAB);
9615 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9616 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9618 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9619 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9620 tg3_asic_rev(tp) == ASIC_REV_57780)
9621 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9622 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9623 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9625 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9626 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9627 if (tg3_flag(tp, TSO_CAPABLE) &&
9628 tg3_asic_rev(tp) == ASIC_REV_5705) {
9629 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9630 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9631 !tg3_flag(tp, IS_5788)) {
9632 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9636 if (tg3_flag(tp, PCI_EXPRESS))
9637 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9639 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9641 if (tp->dev->mtu <= ETH_DATA_LEN) {
9642 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9643 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9647 if (tg3_flag(tp, HW_TSO_1) ||
9648 tg3_flag(tp, HW_TSO_2) ||
9649 tg3_flag(tp, HW_TSO_3))
9650 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9652 if (tg3_flag(tp, 57765_PLUS) ||
9653 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9654 tg3_asic_rev(tp) == ASIC_REV_57780)
9655 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9657 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9658 tg3_asic_rev(tp) == ASIC_REV_5762)
9659 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9661 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9662 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9663 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9664 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9665 tg3_flag(tp, 57765_PLUS)) {
9668 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9669 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9671 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9674 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9675 tg3_asic_rev(tp) == ASIC_REV_5762) {
9676 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9677 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9678 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9679 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9680 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9681 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9683 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9686 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9687 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9688 tg3_asic_rev(tp) == ASIC_REV_5762) {
9691 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9692 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9694 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9698 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9699 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9702 /* Receive/send statistics. */
9703 if (tg3_flag(tp, 5750_PLUS)) {
9704 val = tr32(RCVLPC_STATS_ENABLE);
9705 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9706 tw32(RCVLPC_STATS_ENABLE, val);
9707 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9708 tg3_flag(tp, TSO_CAPABLE)) {
9709 val = tr32(RCVLPC_STATS_ENABLE);
9710 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9711 tw32(RCVLPC_STATS_ENABLE, val);
9713 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9715 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9716 tw32(SNDDATAI_STATSENAB, 0xffffff);
9717 tw32(SNDDATAI_STATSCTRL,
9718 (SNDDATAI_SCTRL_ENABLE |
9719 SNDDATAI_SCTRL_FASTUPD));
9721 /* Setup host coalescing engine. */
9722 tw32(HOSTCC_MODE, 0);
9723 for (i = 0; i < 2000; i++) {
9724 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9729 __tg3_set_coalesce(tp, &tp->coal);
9731 if (!tg3_flag(tp, 5705_PLUS)) {
9732 /* Status/statistics block address. See tg3_timer,
9733 * the tg3_periodic_fetch_stats call there, and
9734 * tg3_get_stats to see how this works for 5705/5750 chips.
9736 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9737 ((u64) tp->stats_mapping >> 32));
9738 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9739 ((u64) tp->stats_mapping & 0xffffffff));
9740 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9742 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9744 /* Clear statistics and status block memory areas */
9745 for (i = NIC_SRAM_STATS_BLK;
9746 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9748 tg3_write_mem(tp, i, 0);
9753 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9755 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9756 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9757 if (!tg3_flag(tp, 5705_PLUS))
9758 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9760 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9761 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9762 /* reset to prevent losing 1st rx packet intermittently */
9763 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9767 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9768 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9769 MAC_MODE_FHDE_ENABLE;
9770 if (tg3_flag(tp, ENABLE_APE))
9771 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9772 if (!tg3_flag(tp, 5705_PLUS) &&
9773 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9774 tg3_asic_rev(tp) != ASIC_REV_5700)
9775 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9776 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9779 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9780 * If TG3_FLAG_IS_NIC is zero, we should read the
9781 * register to preserve the GPIO settings for LOMs. The GPIOs,
9782 * whether used as inputs or outputs, are set by boot code after
9785 if (!tg3_flag(tp, IS_NIC)) {
9788 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9789 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9790 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9792 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9793 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9794 GRC_LCLCTRL_GPIO_OUTPUT3;
9796 if (tg3_asic_rev(tp) == ASIC_REV_5755)
9797 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9799 tp->grc_local_ctrl &= ~gpio_mask;
9800 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9802 /* GPIO1 must be driven high for eeprom write protect */
9803 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9804 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9805 GRC_LCLCTRL_GPIO_OUTPUT1);
9807 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9810 if (tg3_flag(tp, USING_MSIX)) {
9811 val = tr32(MSGINT_MODE);
9812 val |= MSGINT_MODE_ENABLE;
9813 if (tp->irq_cnt > 1)
9814 val |= MSGINT_MODE_MULTIVEC_EN;
9815 if (!tg3_flag(tp, 1SHOT_MSI))
9816 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9817 tw32(MSGINT_MODE, val);
9820 if (!tg3_flag(tp, 5705_PLUS)) {
9821 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9825 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9826 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9827 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9828 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9829 WDMAC_MODE_LNGREAD_ENAB);
9831 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9832 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9833 if (tg3_flag(tp, TSO_CAPABLE) &&
9834 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9835 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9837 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9838 !tg3_flag(tp, IS_5788)) {
9839 val |= WDMAC_MODE_RX_ACCEL;
9843 /* Enable host coalescing bug fix */
9844 if (tg3_flag(tp, 5755_PLUS))
9845 val |= WDMAC_MODE_STATUS_TAG_FIX;
9847 if (tg3_asic_rev(tp) == ASIC_REV_5785)
9848 val |= WDMAC_MODE_BURST_ALL_DATA;
9850 tw32_f(WDMAC_MODE, val);
9853 if (tg3_flag(tp, PCIX_MODE)) {
9856 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9858 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9859 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9860 pcix_cmd |= PCI_X_CMD_READ_2K;
9861 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9862 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9863 pcix_cmd |= PCI_X_CMD_READ_2K;
9865 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9869 tw32_f(RDMAC_MODE, rdmac_mode);
9872 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9873 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9874 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9877 if (i < TG3_NUM_RDMA_CHANNELS) {
9878 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9879 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9880 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9881 tg3_flag_set(tp, 5719_RDMA_BUG);
9885 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9886 if (!tg3_flag(tp, 5705_PLUS))
9887 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9889 if (tg3_asic_rev(tp) == ASIC_REV_5761)
9891 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9893 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9895 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9896 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9897 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9898 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9899 val |= RCVDBDI_MODE_LRG_RING_SZ;
9900 tw32(RCVDBDI_MODE, val);
9901 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9902 if (tg3_flag(tp, HW_TSO_1) ||
9903 tg3_flag(tp, HW_TSO_2) ||
9904 tg3_flag(tp, HW_TSO_3))
9905 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9906 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9907 if (tg3_flag(tp, ENABLE_TSS))
9908 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9909 tw32(SNDBDI_MODE, val);
9910 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9912 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9913 err = tg3_load_5701_a0_firmware_fix(tp);
9918 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9919 /* Ignore any errors for the firmware download. If download
9920 * fails, the device will operate with EEE disabled
9922 tg3_load_57766_firmware(tp);
9925 if (tg3_flag(tp, TSO_CAPABLE)) {
9926 err = tg3_load_tso_firmware(tp);
9931 tp->tx_mode = TX_MODE_ENABLE;
9933 if (tg3_flag(tp, 5755_PLUS) ||
9934 tg3_asic_rev(tp) == ASIC_REV_5906)
9935 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9937 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9938 tg3_asic_rev(tp) == ASIC_REV_5762) {
9939 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9940 tp->tx_mode &= ~val;
9941 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9944 tw32_f(MAC_TX_MODE, tp->tx_mode);
9947 if (tg3_flag(tp, ENABLE_RSS)) {
9948 tg3_rss_write_indir_tbl(tp);
9950 /* Setup the "secret" hash key. */
9951 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9952 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9953 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9954 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9955 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9956 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9957 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9958 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9959 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9960 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9963 tp->rx_mode = RX_MODE_ENABLE;
9964 if (tg3_flag(tp, 5755_PLUS))
9965 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9967 if (tg3_flag(tp, ENABLE_RSS))
9968 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9969 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9970 RX_MODE_RSS_IPV6_HASH_EN |
9971 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9972 RX_MODE_RSS_IPV4_HASH_EN |
9973 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9975 tw32_f(MAC_RX_MODE, tp->rx_mode);
9978 tw32(MAC_LED_CTRL, tp->led_ctrl);
9980 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9981 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9982 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9985 tw32_f(MAC_RX_MODE, tp->rx_mode);
9988 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9989 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9990 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9991 /* Set drive transmission level to 1.2V */
9992 /* only if the signal pre-emphasis bit is not set */
9993 val = tr32(MAC_SERDES_CFG);
9996 tw32(MAC_SERDES_CFG, val);
9998 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
9999 tw32(MAC_SERDES_CFG, 0x616000);
10002 /* Prevent chip from dropping frames when flow control
10005 if (tg3_flag(tp, 57765_CLASS))
10009 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10011 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10012 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10013 /* Use hardware link auto-negotiation */
10014 tg3_flag_set(tp, HW_AUTONEG);
10017 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10018 tg3_asic_rev(tp) == ASIC_REV_5714) {
10021 tmp = tr32(SERDES_RX_CTRL);
10022 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10023 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10024 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10025 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10028 if (!tg3_flag(tp, USE_PHYLIB)) {
10029 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10030 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10032 err = tg3_setup_phy(tp, 0);
10036 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10037 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10040 /* Clear CRC stats. */
10041 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10042 tg3_writephy(tp, MII_TG3_TEST1,
10043 tmp | MII_TG3_TEST1_CRC_EN);
10044 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10049 __tg3_set_rx_mode(tp->dev);
10051 /* Initialize receive rules. */
10052 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10053 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10054 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10055 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10057 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10061 if (tg3_flag(tp, ENABLE_ASF))
10065 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10067 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10069 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10071 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10073 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10075 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10077 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10079 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10081 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10083 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10085 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10087 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10089 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10091 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10099 if (tg3_flag(tp, ENABLE_APE))
10100 /* Write our heartbeat update interval to APE. */
10101 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10102 APE_HOST_HEARTBEAT_INT_DISABLE);
10104 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10109 /* Called at device open time to get the chip ready for
10110 * packet processing. Invoked with tp->lock held.
10112 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10114 tg3_switch_clocks(tp);
10116 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10118 return tg3_reset_hw(tp, reset_phy);
10121 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10125 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10126 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10128 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10131 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10132 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10133 memset(ocir, 0, TG3_OCIR_LEN);
10137 /* sysfs attributes for hwmon */
10138 static ssize_t tg3_show_temp(struct device *dev,
10139 struct device_attribute *devattr, char *buf)
10141 struct pci_dev *pdev = to_pci_dev(dev);
10142 struct net_device *netdev = pci_get_drvdata(pdev);
10143 struct tg3 *tp = netdev_priv(netdev);
10144 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10147 spin_lock_bh(&tp->lock);
10148 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10149 sizeof(temperature));
10150 spin_unlock_bh(&tp->lock);
10151 return sprintf(buf, "%u\n", temperature);
10155 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10156 TG3_TEMP_SENSOR_OFFSET);
10157 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10158 TG3_TEMP_CAUTION_OFFSET);
10159 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10160 TG3_TEMP_MAX_OFFSET);
10162 static struct attribute *tg3_attributes[] = {
10163 &sensor_dev_attr_temp1_input.dev_attr.attr,
10164 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10165 &sensor_dev_attr_temp1_max.dev_attr.attr,
10169 static const struct attribute_group tg3_group = {
10170 .attrs = tg3_attributes,
10173 static void tg3_hwmon_close(struct tg3 *tp)
10175 if (tp->hwmon_dev) {
10176 hwmon_device_unregister(tp->hwmon_dev);
10177 tp->hwmon_dev = NULL;
10178 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10182 static void tg3_hwmon_open(struct tg3 *tp)
10186 struct pci_dev *pdev = tp->pdev;
10187 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10189 tg3_sd_scan_scratchpad(tp, ocirs);
10191 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10192 if (!ocirs[i].src_data_length)
10195 size += ocirs[i].src_hdr_length;
10196 size += ocirs[i].src_data_length;
10202 /* Register hwmon sysfs hooks */
10203 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10205 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10209 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10210 if (IS_ERR(tp->hwmon_dev)) {
10211 tp->hwmon_dev = NULL;
10212 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10213 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10218 #define TG3_STAT_ADD32(PSTAT, REG) \
10219 do { u32 __val = tr32(REG); \
10220 (PSTAT)->low += __val; \
10221 if ((PSTAT)->low < __val) \
10222 (PSTAT)->high += 1; \
10225 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10227 struct tg3_hw_stats *sp = tp->hw_stats;
10232 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10233 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10234 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10235 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10236 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10237 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10238 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10239 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10240 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10241 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10242 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10243 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10244 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10245 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10246 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10247 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10250 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10251 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10252 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10253 tg3_flag_clear(tp, 5719_RDMA_BUG);
10256 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10257 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10258 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10259 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10260 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10261 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10262 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10263 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10264 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10265 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10266 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10267 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10268 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10269 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10271 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10272 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10273 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10274 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10275 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10277 u32 val = tr32(HOSTCC_FLOW_ATTN);
10278 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10280 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10281 sp->rx_discards.low += val;
10282 if (sp->rx_discards.low < val)
10283 sp->rx_discards.high += 1;
10285 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10287 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10290 static void tg3_chk_missed_msi(struct tg3 *tp)
10294 for (i = 0; i < tp->irq_cnt; i++) {
10295 struct tg3_napi *tnapi = &tp->napi[i];
10297 if (tg3_has_work(tnapi)) {
10298 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10299 tnapi->last_tx_cons == tnapi->tx_cons) {
10300 if (tnapi->chk_msi_cnt < 1) {
10301 tnapi->chk_msi_cnt++;
10307 tnapi->chk_msi_cnt = 0;
10308 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10309 tnapi->last_tx_cons = tnapi->tx_cons;
10313 static void tg3_timer(unsigned long __opaque)
10315 struct tg3 *tp = (struct tg3 *) __opaque;
10317 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10318 goto restart_timer;
10320 spin_lock(&tp->lock);
10322 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10323 tg3_flag(tp, 57765_CLASS))
10324 tg3_chk_missed_msi(tp);
10326 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10327 /* BCM4785: Flush posted writes from GbE to host memory. */
10331 if (!tg3_flag(tp, TAGGED_STATUS)) {
10332 /* All of this garbage is because when using non-tagged
10333 * IRQ status the mailbox/status_block protocol the chip
10334 * uses with the cpu is race prone.
10336 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10337 tw32(GRC_LOCAL_CTRL,
10338 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10340 tw32(HOSTCC_MODE, tp->coalesce_mode |
10341 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10344 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10345 spin_unlock(&tp->lock);
10346 tg3_reset_task_schedule(tp);
10347 goto restart_timer;
10351 /* This part only runs once per second. */
10352 if (!--tp->timer_counter) {
10353 if (tg3_flag(tp, 5705_PLUS))
10354 tg3_periodic_fetch_stats(tp);
10356 if (tp->setlpicnt && !--tp->setlpicnt)
10357 tg3_phy_eee_enable(tp);
10359 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10363 mac_stat = tr32(MAC_STATUS);
10366 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10367 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10369 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10373 tg3_setup_phy(tp, 0);
10374 } else if (tg3_flag(tp, POLL_SERDES)) {
10375 u32 mac_stat = tr32(MAC_STATUS);
10376 int need_setup = 0;
10379 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10382 if (!tp->link_up &&
10383 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10384 MAC_STATUS_SIGNAL_DET))) {
10388 if (!tp->serdes_counter) {
10391 ~MAC_MODE_PORT_MODE_MASK));
10393 tw32_f(MAC_MODE, tp->mac_mode);
10396 tg3_setup_phy(tp, 0);
10398 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10399 tg3_flag(tp, 5780_CLASS)) {
10400 tg3_serdes_parallel_detect(tp);
10403 tp->timer_counter = tp->timer_multiplier;
10406 /* Heartbeat is only sent once every 2 seconds.
10408 * The heartbeat is to tell the ASF firmware that the host
10409 * driver is still alive. In the event that the OS crashes,
10410 * ASF needs to reset the hardware to free up the FIFO space
10411 * that may be filled with rx packets destined for the host.
10412 * If the FIFO is full, ASF will no longer function properly.
10414 * Unintended resets have been reported on real time kernels
10415 * where the timer doesn't run on time. Netpoll will also have
10418 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10419 * to check the ring condition when the heartbeat is expiring
10420 * before doing the reset. This will prevent most unintended
10423 if (!--tp->asf_counter) {
10424 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10425 tg3_wait_for_event_ack(tp);
10427 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10428 FWCMD_NICDRV_ALIVE3);
10429 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10430 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10431 TG3_FW_UPDATE_TIMEOUT_SEC);
10433 tg3_generate_fw_event(tp);
10435 tp->asf_counter = tp->asf_multiplier;
10438 spin_unlock(&tp->lock);
10441 tp->timer.expires = jiffies + tp->timer_offset;
10442 add_timer(&tp->timer);
10445 static void tg3_timer_init(struct tg3 *tp)
10447 if (tg3_flag(tp, TAGGED_STATUS) &&
10448 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10449 !tg3_flag(tp, 57765_CLASS))
10450 tp->timer_offset = HZ;
10452 tp->timer_offset = HZ / 10;
10454 BUG_ON(tp->timer_offset > HZ);
10456 tp->timer_multiplier = (HZ / tp->timer_offset);
10457 tp->asf_multiplier = (HZ / tp->timer_offset) *
10458 TG3_FW_UPDATE_FREQ_SEC;
10460 init_timer(&tp->timer);
10461 tp->timer.data = (unsigned long) tp;
10462 tp->timer.function = tg3_timer;
10465 static void tg3_timer_start(struct tg3 *tp)
10467 tp->asf_counter = tp->asf_multiplier;
10468 tp->timer_counter = tp->timer_multiplier;
10470 tp->timer.expires = jiffies + tp->timer_offset;
10471 add_timer(&tp->timer);
10474 static void tg3_timer_stop(struct tg3 *tp)
10476 del_timer_sync(&tp->timer);
10479 /* Restart hardware after configuration changes, self-test, etc.
10480 * Invoked with tp->lock held.
10482 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10483 __releases(tp->lock)
10484 __acquires(tp->lock)
10488 err = tg3_init_hw(tp, reset_phy);
10490 netdev_err(tp->dev,
10491 "Failed to re-initialize device, aborting\n");
10492 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10493 tg3_full_unlock(tp);
10494 tg3_timer_stop(tp);
10496 tg3_napi_enable(tp);
10497 dev_close(tp->dev);
10498 tg3_full_lock(tp, 0);
10503 static void tg3_reset_task(struct work_struct *work)
10505 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10508 tg3_full_lock(tp, 0);
10510 if (!netif_running(tp->dev)) {
10511 tg3_flag_clear(tp, RESET_TASK_PENDING);
10512 tg3_full_unlock(tp);
10516 tg3_full_unlock(tp);
10520 tg3_netif_stop(tp);
10522 tg3_full_lock(tp, 1);
10524 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10525 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10526 tp->write32_rx_mbox = tg3_write_flush_reg32;
10527 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10528 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10531 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10532 err = tg3_init_hw(tp, 1);
10536 tg3_netif_start(tp);
10539 tg3_full_unlock(tp);
10544 tg3_flag_clear(tp, RESET_TASK_PENDING);
10547 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10550 unsigned long flags;
10552 struct tg3_napi *tnapi = &tp->napi[irq_num];
10554 if (tp->irq_cnt == 1)
10555 name = tp->dev->name;
10557 name = &tnapi->irq_lbl[0];
10558 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10559 name[IFNAMSIZ-1] = 0;
10562 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10564 if (tg3_flag(tp, 1SHOT_MSI))
10565 fn = tg3_msi_1shot;
10568 fn = tg3_interrupt;
10569 if (tg3_flag(tp, TAGGED_STATUS))
10570 fn = tg3_interrupt_tagged;
10571 flags = IRQF_SHARED;
10574 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10577 static int tg3_test_interrupt(struct tg3 *tp)
10579 struct tg3_napi *tnapi = &tp->napi[0];
10580 struct net_device *dev = tp->dev;
10581 int err, i, intr_ok = 0;
10584 if (!netif_running(dev))
10587 tg3_disable_ints(tp);
10589 free_irq(tnapi->irq_vec, tnapi);
10592 * Turn off MSI one shot mode. Otherwise this test has no
10593 * observable way to know whether the interrupt was delivered.
10595 if (tg3_flag(tp, 57765_PLUS)) {
10596 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10597 tw32(MSGINT_MODE, val);
10600 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10601 IRQF_SHARED, dev->name, tnapi);
10605 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10606 tg3_enable_ints(tp);
10608 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10611 for (i = 0; i < 5; i++) {
10612 u32 int_mbox, misc_host_ctrl;
10614 int_mbox = tr32_mailbox(tnapi->int_mbox);
10615 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10617 if ((int_mbox != 0) ||
10618 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10623 if (tg3_flag(tp, 57765_PLUS) &&
10624 tnapi->hw_status->status_tag != tnapi->last_tag)
10625 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10630 tg3_disable_ints(tp);
10632 free_irq(tnapi->irq_vec, tnapi);
10634 err = tg3_request_irq(tp, 0);
10640 /* Reenable MSI one shot mode. */
10641 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10642 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10643 tw32(MSGINT_MODE, val);
10651 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10652 * successfully restored
10654 static int tg3_test_msi(struct tg3 *tp)
10659 if (!tg3_flag(tp, USING_MSI))
10662 /* Turn off SERR reporting in case MSI terminates with Master
10665 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10666 pci_write_config_word(tp->pdev, PCI_COMMAND,
10667 pci_cmd & ~PCI_COMMAND_SERR);
10669 err = tg3_test_interrupt(tp);
10671 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10676 /* other failures */
10680 /* MSI test failed, go back to INTx mode */
10681 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10682 "to INTx mode. Please report this failure to the PCI "
10683 "maintainer and include system chipset information\n");
10685 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10687 pci_disable_msi(tp->pdev);
10689 tg3_flag_clear(tp, USING_MSI);
10690 tp->napi[0].irq_vec = tp->pdev->irq;
10692 err = tg3_request_irq(tp, 0);
10696 /* Need to reset the chip because the MSI cycle may have terminated
10697 * with Master Abort.
10699 tg3_full_lock(tp, 1);
10701 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10702 err = tg3_init_hw(tp, 1);
10704 tg3_full_unlock(tp);
10707 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10712 static int tg3_request_firmware(struct tg3 *tp)
10714 const struct tg3_firmware_hdr *fw_hdr;
10716 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10717 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10722 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10724 /* Firmware blob starts with version numbers, followed by
10725 * start address and _full_ length including BSS sections
10726 * (which must be longer than the actual data, of course
10729 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10730 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10731 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10732 tp->fw_len, tp->fw_needed);
10733 release_firmware(tp->fw);
10738 /* We no longer need firmware; we have it. */
10739 tp->fw_needed = NULL;
10743 static u32 tg3_irq_count(struct tg3 *tp)
10745 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10748 /* We want as many rx rings enabled as there are cpus.
10749 * In multiqueue MSI-X mode, the first MSI-X vector
10750 * only deals with link interrupts, etc, so we add
10751 * one to the number of vectors we are requesting.
10753 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10759 static bool tg3_enable_msix(struct tg3 *tp)
10762 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10764 tp->txq_cnt = tp->txq_req;
10765 tp->rxq_cnt = tp->rxq_req;
10767 tp->rxq_cnt = netif_get_num_default_rss_queues();
10768 if (tp->rxq_cnt > tp->rxq_max)
10769 tp->rxq_cnt = tp->rxq_max;
10771 /* Disable multiple TX rings by default. Simple round-robin hardware
10772 * scheduling of the TX rings can cause starvation of rings with
10773 * small packets when other rings have TSO or jumbo packets.
10778 tp->irq_cnt = tg3_irq_count(tp);
10780 for (i = 0; i < tp->irq_max; i++) {
10781 msix_ent[i].entry = i;
10782 msix_ent[i].vector = 0;
10785 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10788 } else if (rc != 0) {
10789 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10791 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10794 tp->rxq_cnt = max(rc - 1, 1);
10796 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10799 for (i = 0; i < tp->irq_max; i++)
10800 tp->napi[i].irq_vec = msix_ent[i].vector;
10802 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10803 pci_disable_msix(tp->pdev);
10807 if (tp->irq_cnt == 1)
10810 tg3_flag_set(tp, ENABLE_RSS);
10812 if (tp->txq_cnt > 1)
10813 tg3_flag_set(tp, ENABLE_TSS);
10815 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10820 static void tg3_ints_init(struct tg3 *tp)
10822 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10823 !tg3_flag(tp, TAGGED_STATUS)) {
10824 /* All MSI supporting chips should support tagged
10825 * status. Assert that this is the case.
10827 netdev_warn(tp->dev,
10828 "MSI without TAGGED_STATUS? Not using MSI\n");
10832 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10833 tg3_flag_set(tp, USING_MSIX);
10834 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10835 tg3_flag_set(tp, USING_MSI);
10837 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10838 u32 msi_mode = tr32(MSGINT_MODE);
10839 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10840 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10841 if (!tg3_flag(tp, 1SHOT_MSI))
10842 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10843 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10846 if (!tg3_flag(tp, USING_MSIX)) {
10848 tp->napi[0].irq_vec = tp->pdev->irq;
10851 if (tp->irq_cnt == 1) {
10854 netif_set_real_num_tx_queues(tp->dev, 1);
10855 netif_set_real_num_rx_queues(tp->dev, 1);
10859 static void tg3_ints_fini(struct tg3 *tp)
10861 if (tg3_flag(tp, USING_MSIX))
10862 pci_disable_msix(tp->pdev);
10863 else if (tg3_flag(tp, USING_MSI))
10864 pci_disable_msi(tp->pdev);
10865 tg3_flag_clear(tp, USING_MSI);
10866 tg3_flag_clear(tp, USING_MSIX);
10867 tg3_flag_clear(tp, ENABLE_RSS);
10868 tg3_flag_clear(tp, ENABLE_TSS);
10871 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10874 struct net_device *dev = tp->dev;
10878 * Setup interrupts first so we know how
10879 * many NAPI resources to allocate
10883 tg3_rss_check_indir_tbl(tp);
10885 /* The placement of this call is tied
10886 * to the setup and use of Host TX descriptors.
10888 err = tg3_alloc_consistent(tp);
10894 tg3_napi_enable(tp);
10896 for (i = 0; i < tp->irq_cnt; i++) {
10897 struct tg3_napi *tnapi = &tp->napi[i];
10898 err = tg3_request_irq(tp, i);
10900 for (i--; i >= 0; i--) {
10901 tnapi = &tp->napi[i];
10902 free_irq(tnapi->irq_vec, tnapi);
10908 tg3_full_lock(tp, 0);
10910 err = tg3_init_hw(tp, reset_phy);
10912 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10913 tg3_free_rings(tp);
10916 tg3_full_unlock(tp);
10921 if (test_irq && tg3_flag(tp, USING_MSI)) {
10922 err = tg3_test_msi(tp);
10925 tg3_full_lock(tp, 0);
10926 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10927 tg3_free_rings(tp);
10928 tg3_full_unlock(tp);
10933 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10934 u32 val = tr32(PCIE_TRANSACTION_CFG);
10936 tw32(PCIE_TRANSACTION_CFG,
10937 val | PCIE_TRANS_CFG_1SHOT_MSI);
10943 tg3_hwmon_open(tp);
10945 tg3_full_lock(tp, 0);
10947 tg3_timer_start(tp);
10948 tg3_flag_set(tp, INIT_COMPLETE);
10949 tg3_enable_ints(tp);
10954 tg3_ptp_resume(tp);
10957 tg3_full_unlock(tp);
10959 netif_tx_start_all_queues(dev);
10962 * Reset loopback feature if it was turned on while the device was down
10963 * make sure that it's installed properly now.
10965 if (dev->features & NETIF_F_LOOPBACK)
10966 tg3_set_loopback(dev, dev->features);
10971 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10972 struct tg3_napi *tnapi = &tp->napi[i];
10973 free_irq(tnapi->irq_vec, tnapi);
10977 tg3_napi_disable(tp);
10979 tg3_free_consistent(tp);
10987 static void tg3_stop(struct tg3 *tp)
10991 tg3_reset_task_cancel(tp);
10992 tg3_netif_stop(tp);
10994 tg3_timer_stop(tp);
10996 tg3_hwmon_close(tp);
11000 tg3_full_lock(tp, 1);
11002 tg3_disable_ints(tp);
11004 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11005 tg3_free_rings(tp);
11006 tg3_flag_clear(tp, INIT_COMPLETE);
11008 tg3_full_unlock(tp);
11010 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11011 struct tg3_napi *tnapi = &tp->napi[i];
11012 free_irq(tnapi->irq_vec, tnapi);
11019 tg3_free_consistent(tp);
11022 static int tg3_open(struct net_device *dev)
11024 struct tg3 *tp = netdev_priv(dev);
11027 if (tp->fw_needed) {
11028 err = tg3_request_firmware(tp);
11029 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11031 netdev_warn(tp->dev, "EEE capability disabled\n");
11032 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11033 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11034 netdev_warn(tp->dev, "EEE capability restored\n");
11035 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11037 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11041 netdev_warn(tp->dev, "TSO capability disabled\n");
11042 tg3_flag_clear(tp, TSO_CAPABLE);
11043 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11044 netdev_notice(tp->dev, "TSO capability restored\n");
11045 tg3_flag_set(tp, TSO_CAPABLE);
11049 tg3_carrier_off(tp);
11051 err = tg3_power_up(tp);
11055 tg3_full_lock(tp, 0);
11057 tg3_disable_ints(tp);
11058 tg3_flag_clear(tp, INIT_COMPLETE);
11060 tg3_full_unlock(tp);
11062 err = tg3_start(tp, true, true, true);
11064 tg3_frob_aux_power(tp, false);
11065 pci_set_power_state(tp->pdev, PCI_D3hot);
11068 if (tg3_flag(tp, PTP_CAPABLE)) {
11069 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11071 if (IS_ERR(tp->ptp_clock))
11072 tp->ptp_clock = NULL;
11078 static int tg3_close(struct net_device *dev)
11080 struct tg3 *tp = netdev_priv(dev);
11086 /* Clear stats across close / open calls */
11087 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11088 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11090 tg3_power_down(tp);
11092 tg3_carrier_off(tp);
11097 static inline u64 get_stat64(tg3_stat64_t *val)
11099 return ((u64)val->high << 32) | ((u64)val->low);
11102 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11104 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11106 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11107 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11108 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11111 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11112 tg3_writephy(tp, MII_TG3_TEST1,
11113 val | MII_TG3_TEST1_CRC_EN);
11114 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11118 tp->phy_crc_errors += val;
11120 return tp->phy_crc_errors;
11123 return get_stat64(&hw_stats->rx_fcs_errors);
11126 #define ESTAT_ADD(member) \
11127 estats->member = old_estats->member + \
11128 get_stat64(&hw_stats->member)
11130 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11132 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11133 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11135 ESTAT_ADD(rx_octets);
11136 ESTAT_ADD(rx_fragments);
11137 ESTAT_ADD(rx_ucast_packets);
11138 ESTAT_ADD(rx_mcast_packets);
11139 ESTAT_ADD(rx_bcast_packets);
11140 ESTAT_ADD(rx_fcs_errors);
11141 ESTAT_ADD(rx_align_errors);
11142 ESTAT_ADD(rx_xon_pause_rcvd);
11143 ESTAT_ADD(rx_xoff_pause_rcvd);
11144 ESTAT_ADD(rx_mac_ctrl_rcvd);
11145 ESTAT_ADD(rx_xoff_entered);
11146 ESTAT_ADD(rx_frame_too_long_errors);
11147 ESTAT_ADD(rx_jabbers);
11148 ESTAT_ADD(rx_undersize_packets);
11149 ESTAT_ADD(rx_in_length_errors);
11150 ESTAT_ADD(rx_out_length_errors);
11151 ESTAT_ADD(rx_64_or_less_octet_packets);
11152 ESTAT_ADD(rx_65_to_127_octet_packets);
11153 ESTAT_ADD(rx_128_to_255_octet_packets);
11154 ESTAT_ADD(rx_256_to_511_octet_packets);
11155 ESTAT_ADD(rx_512_to_1023_octet_packets);
11156 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11157 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11158 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11159 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11160 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11162 ESTAT_ADD(tx_octets);
11163 ESTAT_ADD(tx_collisions);
11164 ESTAT_ADD(tx_xon_sent);
11165 ESTAT_ADD(tx_xoff_sent);
11166 ESTAT_ADD(tx_flow_control);
11167 ESTAT_ADD(tx_mac_errors);
11168 ESTAT_ADD(tx_single_collisions);
11169 ESTAT_ADD(tx_mult_collisions);
11170 ESTAT_ADD(tx_deferred);
11171 ESTAT_ADD(tx_excessive_collisions);
11172 ESTAT_ADD(tx_late_collisions);
11173 ESTAT_ADD(tx_collide_2times);
11174 ESTAT_ADD(tx_collide_3times);
11175 ESTAT_ADD(tx_collide_4times);
11176 ESTAT_ADD(tx_collide_5times);
11177 ESTAT_ADD(tx_collide_6times);
11178 ESTAT_ADD(tx_collide_7times);
11179 ESTAT_ADD(tx_collide_8times);
11180 ESTAT_ADD(tx_collide_9times);
11181 ESTAT_ADD(tx_collide_10times);
11182 ESTAT_ADD(tx_collide_11times);
11183 ESTAT_ADD(tx_collide_12times);
11184 ESTAT_ADD(tx_collide_13times);
11185 ESTAT_ADD(tx_collide_14times);
11186 ESTAT_ADD(tx_collide_15times);
11187 ESTAT_ADD(tx_ucast_packets);
11188 ESTAT_ADD(tx_mcast_packets);
11189 ESTAT_ADD(tx_bcast_packets);
11190 ESTAT_ADD(tx_carrier_sense_errors);
11191 ESTAT_ADD(tx_discards);
11192 ESTAT_ADD(tx_errors);
11194 ESTAT_ADD(dma_writeq_full);
11195 ESTAT_ADD(dma_write_prioq_full);
11196 ESTAT_ADD(rxbds_empty);
11197 ESTAT_ADD(rx_discards);
11198 ESTAT_ADD(rx_errors);
11199 ESTAT_ADD(rx_threshold_hit);
11201 ESTAT_ADD(dma_readq_full);
11202 ESTAT_ADD(dma_read_prioq_full);
11203 ESTAT_ADD(tx_comp_queue_full);
11205 ESTAT_ADD(ring_set_send_prod_index);
11206 ESTAT_ADD(ring_status_update);
11207 ESTAT_ADD(nic_irqs);
11208 ESTAT_ADD(nic_avoided_irqs);
11209 ESTAT_ADD(nic_tx_threshold_hit);
11211 ESTAT_ADD(mbuf_lwm_thresh_hit);
11214 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11216 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11217 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11219 stats->rx_packets = old_stats->rx_packets +
11220 get_stat64(&hw_stats->rx_ucast_packets) +
11221 get_stat64(&hw_stats->rx_mcast_packets) +
11222 get_stat64(&hw_stats->rx_bcast_packets);
11224 stats->tx_packets = old_stats->tx_packets +
11225 get_stat64(&hw_stats->tx_ucast_packets) +
11226 get_stat64(&hw_stats->tx_mcast_packets) +
11227 get_stat64(&hw_stats->tx_bcast_packets);
11229 stats->rx_bytes = old_stats->rx_bytes +
11230 get_stat64(&hw_stats->rx_octets);
11231 stats->tx_bytes = old_stats->tx_bytes +
11232 get_stat64(&hw_stats->tx_octets);
11234 stats->rx_errors = old_stats->rx_errors +
11235 get_stat64(&hw_stats->rx_errors);
11236 stats->tx_errors = old_stats->tx_errors +
11237 get_stat64(&hw_stats->tx_errors) +
11238 get_stat64(&hw_stats->tx_mac_errors) +
11239 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11240 get_stat64(&hw_stats->tx_discards);
11242 stats->multicast = old_stats->multicast +
11243 get_stat64(&hw_stats->rx_mcast_packets);
11244 stats->collisions = old_stats->collisions +
11245 get_stat64(&hw_stats->tx_collisions);
11247 stats->rx_length_errors = old_stats->rx_length_errors +
11248 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11249 get_stat64(&hw_stats->rx_undersize_packets);
11251 stats->rx_over_errors = old_stats->rx_over_errors +
11252 get_stat64(&hw_stats->rxbds_empty);
11253 stats->rx_frame_errors = old_stats->rx_frame_errors +
11254 get_stat64(&hw_stats->rx_align_errors);
11255 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11256 get_stat64(&hw_stats->tx_discards);
11257 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11258 get_stat64(&hw_stats->tx_carrier_sense_errors);
11260 stats->rx_crc_errors = old_stats->rx_crc_errors +
11261 tg3_calc_crc_errors(tp);
11263 stats->rx_missed_errors = old_stats->rx_missed_errors +
11264 get_stat64(&hw_stats->rx_discards);
11266 stats->rx_dropped = tp->rx_dropped;
11267 stats->tx_dropped = tp->tx_dropped;
11270 static int tg3_get_regs_len(struct net_device *dev)
11272 return TG3_REG_BLK_SIZE;
11275 static void tg3_get_regs(struct net_device *dev,
11276 struct ethtool_regs *regs, void *_p)
11278 struct tg3 *tp = netdev_priv(dev);
11282 memset(_p, 0, TG3_REG_BLK_SIZE);
11284 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11287 tg3_full_lock(tp, 0);
11289 tg3_dump_legacy_regs(tp, (u32 *)_p);
11291 tg3_full_unlock(tp);
11294 static int tg3_get_eeprom_len(struct net_device *dev)
11296 struct tg3 *tp = netdev_priv(dev);
11298 return tp->nvram_size;
11301 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11303 struct tg3 *tp = netdev_priv(dev);
11306 u32 i, offset, len, b_offset, b_count;
11309 if (tg3_flag(tp, NO_NVRAM))
11312 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11315 offset = eeprom->offset;
11319 eeprom->magic = TG3_EEPROM_MAGIC;
11322 /* adjustments to start on required 4 byte boundary */
11323 b_offset = offset & 3;
11324 b_count = 4 - b_offset;
11325 if (b_count > len) {
11326 /* i.e. offset=1 len=2 */
11329 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11332 memcpy(data, ((char *)&val) + b_offset, b_count);
11335 eeprom->len += b_count;
11338 /* read bytes up to the last 4 byte boundary */
11339 pd = &data[eeprom->len];
11340 for (i = 0; i < (len - (len & 3)); i += 4) {
11341 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11346 memcpy(pd + i, &val, 4);
11351 /* read last bytes not ending on 4 byte boundary */
11352 pd = &data[eeprom->len];
11354 b_offset = offset + len - b_count;
11355 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11358 memcpy(pd, &val, b_count);
11359 eeprom->len += b_count;
11364 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11366 struct tg3 *tp = netdev_priv(dev);
11368 u32 offset, len, b_offset, odd_len;
11372 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11375 if (tg3_flag(tp, NO_NVRAM) ||
11376 eeprom->magic != TG3_EEPROM_MAGIC)
11379 offset = eeprom->offset;
11382 if ((b_offset = (offset & 3))) {
11383 /* adjustments to start on required 4 byte boundary */
11384 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11395 /* adjustments to end on required 4 byte boundary */
11397 len = (len + 3) & ~3;
11398 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11404 if (b_offset || odd_len) {
11405 buf = kmalloc(len, GFP_KERNEL);
11409 memcpy(buf, &start, 4);
11411 memcpy(buf+len-4, &end, 4);
11412 memcpy(buf + b_offset, data, eeprom->len);
11415 ret = tg3_nvram_write_block(tp, offset, len, buf);
11423 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11425 struct tg3 *tp = netdev_priv(dev);
11427 if (tg3_flag(tp, USE_PHYLIB)) {
11428 struct phy_device *phydev;
11429 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11431 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11432 return phy_ethtool_gset(phydev, cmd);
11435 cmd->supported = (SUPPORTED_Autoneg);
11437 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11438 cmd->supported |= (SUPPORTED_1000baseT_Half |
11439 SUPPORTED_1000baseT_Full);
11441 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11442 cmd->supported |= (SUPPORTED_100baseT_Half |
11443 SUPPORTED_100baseT_Full |
11444 SUPPORTED_10baseT_Half |
11445 SUPPORTED_10baseT_Full |
11447 cmd->port = PORT_TP;
11449 cmd->supported |= SUPPORTED_FIBRE;
11450 cmd->port = PORT_FIBRE;
11453 cmd->advertising = tp->link_config.advertising;
11454 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11455 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11456 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11457 cmd->advertising |= ADVERTISED_Pause;
11459 cmd->advertising |= ADVERTISED_Pause |
11460 ADVERTISED_Asym_Pause;
11462 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11463 cmd->advertising |= ADVERTISED_Asym_Pause;
11466 if (netif_running(dev) && tp->link_up) {
11467 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11468 cmd->duplex = tp->link_config.active_duplex;
11469 cmd->lp_advertising = tp->link_config.rmt_adv;
11470 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11471 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11472 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11474 cmd->eth_tp_mdix = ETH_TP_MDI;
11477 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11478 cmd->duplex = DUPLEX_UNKNOWN;
11479 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11481 cmd->phy_address = tp->phy_addr;
11482 cmd->transceiver = XCVR_INTERNAL;
11483 cmd->autoneg = tp->link_config.autoneg;
11489 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11491 struct tg3 *tp = netdev_priv(dev);
11492 u32 speed = ethtool_cmd_speed(cmd);
11494 if (tg3_flag(tp, USE_PHYLIB)) {
11495 struct phy_device *phydev;
11496 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11498 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11499 return phy_ethtool_sset(phydev, cmd);
11502 if (cmd->autoneg != AUTONEG_ENABLE &&
11503 cmd->autoneg != AUTONEG_DISABLE)
11506 if (cmd->autoneg == AUTONEG_DISABLE &&
11507 cmd->duplex != DUPLEX_FULL &&
11508 cmd->duplex != DUPLEX_HALF)
11511 if (cmd->autoneg == AUTONEG_ENABLE) {
11512 u32 mask = ADVERTISED_Autoneg |
11514 ADVERTISED_Asym_Pause;
11516 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11517 mask |= ADVERTISED_1000baseT_Half |
11518 ADVERTISED_1000baseT_Full;
11520 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11521 mask |= ADVERTISED_100baseT_Half |
11522 ADVERTISED_100baseT_Full |
11523 ADVERTISED_10baseT_Half |
11524 ADVERTISED_10baseT_Full |
11527 mask |= ADVERTISED_FIBRE;
11529 if (cmd->advertising & ~mask)
11532 mask &= (ADVERTISED_1000baseT_Half |
11533 ADVERTISED_1000baseT_Full |
11534 ADVERTISED_100baseT_Half |
11535 ADVERTISED_100baseT_Full |
11536 ADVERTISED_10baseT_Half |
11537 ADVERTISED_10baseT_Full);
11539 cmd->advertising &= mask;
11541 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11542 if (speed != SPEED_1000)
11545 if (cmd->duplex != DUPLEX_FULL)
11548 if (speed != SPEED_100 &&
11554 tg3_full_lock(tp, 0);
11556 tp->link_config.autoneg = cmd->autoneg;
11557 if (cmd->autoneg == AUTONEG_ENABLE) {
11558 tp->link_config.advertising = (cmd->advertising |
11559 ADVERTISED_Autoneg);
11560 tp->link_config.speed = SPEED_UNKNOWN;
11561 tp->link_config.duplex = DUPLEX_UNKNOWN;
11563 tp->link_config.advertising = 0;
11564 tp->link_config.speed = speed;
11565 tp->link_config.duplex = cmd->duplex;
11568 if (netif_running(dev))
11569 tg3_setup_phy(tp, 1);
11571 tg3_full_unlock(tp);
11576 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11578 struct tg3 *tp = netdev_priv(dev);
11580 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11581 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11582 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11583 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11586 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11588 struct tg3 *tp = netdev_priv(dev);
11590 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11591 wol->supported = WAKE_MAGIC;
11593 wol->supported = 0;
11595 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11596 wol->wolopts = WAKE_MAGIC;
11597 memset(&wol->sopass, 0, sizeof(wol->sopass));
11600 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11602 struct tg3 *tp = netdev_priv(dev);
11603 struct device *dp = &tp->pdev->dev;
11605 if (wol->wolopts & ~WAKE_MAGIC)
11607 if ((wol->wolopts & WAKE_MAGIC) &&
11608 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11611 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11613 spin_lock_bh(&tp->lock);
11614 if (device_may_wakeup(dp))
11615 tg3_flag_set(tp, WOL_ENABLE);
11617 tg3_flag_clear(tp, WOL_ENABLE);
11618 spin_unlock_bh(&tp->lock);
11623 static u32 tg3_get_msglevel(struct net_device *dev)
11625 struct tg3 *tp = netdev_priv(dev);
11626 return tp->msg_enable;
11629 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11631 struct tg3 *tp = netdev_priv(dev);
11632 tp->msg_enable = value;
11635 static int tg3_nway_reset(struct net_device *dev)
11637 struct tg3 *tp = netdev_priv(dev);
11640 if (!netif_running(dev))
11643 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11646 if (tg3_flag(tp, USE_PHYLIB)) {
11647 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11649 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11653 spin_lock_bh(&tp->lock);
11655 tg3_readphy(tp, MII_BMCR, &bmcr);
11656 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11657 ((bmcr & BMCR_ANENABLE) ||
11658 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11659 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11663 spin_unlock_bh(&tp->lock);
11669 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11671 struct tg3 *tp = netdev_priv(dev);
11673 ering->rx_max_pending = tp->rx_std_ring_mask;
11674 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11675 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11677 ering->rx_jumbo_max_pending = 0;
11679 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11681 ering->rx_pending = tp->rx_pending;
11682 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11683 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11685 ering->rx_jumbo_pending = 0;
11687 ering->tx_pending = tp->napi[0].tx_pending;
11690 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11692 struct tg3 *tp = netdev_priv(dev);
11693 int i, irq_sync = 0, err = 0;
11695 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11696 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11697 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11698 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11699 (tg3_flag(tp, TSO_BUG) &&
11700 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11703 if (netif_running(dev)) {
11705 tg3_netif_stop(tp);
11709 tg3_full_lock(tp, irq_sync);
11711 tp->rx_pending = ering->rx_pending;
11713 if (tg3_flag(tp, MAX_RXPEND_64) &&
11714 tp->rx_pending > 63)
11715 tp->rx_pending = 63;
11716 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11718 for (i = 0; i < tp->irq_max; i++)
11719 tp->napi[i].tx_pending = ering->tx_pending;
11721 if (netif_running(dev)) {
11722 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11723 err = tg3_restart_hw(tp, 0);
11725 tg3_netif_start(tp);
11728 tg3_full_unlock(tp);
11730 if (irq_sync && !err)
11736 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11738 struct tg3 *tp = netdev_priv(dev);
11740 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11742 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11743 epause->rx_pause = 1;
11745 epause->rx_pause = 0;
11747 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11748 epause->tx_pause = 1;
11750 epause->tx_pause = 0;
11753 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11755 struct tg3 *tp = netdev_priv(dev);
11758 if (tg3_flag(tp, USE_PHYLIB)) {
11760 struct phy_device *phydev;
11762 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11764 if (!(phydev->supported & SUPPORTED_Pause) ||
11765 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11766 (epause->rx_pause != epause->tx_pause)))
11769 tp->link_config.flowctrl = 0;
11770 if (epause->rx_pause) {
11771 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11773 if (epause->tx_pause) {
11774 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11775 newadv = ADVERTISED_Pause;
11777 newadv = ADVERTISED_Pause |
11778 ADVERTISED_Asym_Pause;
11779 } else if (epause->tx_pause) {
11780 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11781 newadv = ADVERTISED_Asym_Pause;
11785 if (epause->autoneg)
11786 tg3_flag_set(tp, PAUSE_AUTONEG);
11788 tg3_flag_clear(tp, PAUSE_AUTONEG);
11790 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11791 u32 oldadv = phydev->advertising &
11792 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11793 if (oldadv != newadv) {
11794 phydev->advertising &=
11795 ~(ADVERTISED_Pause |
11796 ADVERTISED_Asym_Pause);
11797 phydev->advertising |= newadv;
11798 if (phydev->autoneg) {
11800 * Always renegotiate the link to
11801 * inform our link partner of our
11802 * flow control settings, even if the
11803 * flow control is forced. Let
11804 * tg3_adjust_link() do the final
11805 * flow control setup.
11807 return phy_start_aneg(phydev);
11811 if (!epause->autoneg)
11812 tg3_setup_flow_control(tp, 0, 0);
11814 tp->link_config.advertising &=
11815 ~(ADVERTISED_Pause |
11816 ADVERTISED_Asym_Pause);
11817 tp->link_config.advertising |= newadv;
11822 if (netif_running(dev)) {
11823 tg3_netif_stop(tp);
11827 tg3_full_lock(tp, irq_sync);
11829 if (epause->autoneg)
11830 tg3_flag_set(tp, PAUSE_AUTONEG);
11832 tg3_flag_clear(tp, PAUSE_AUTONEG);
11833 if (epause->rx_pause)
11834 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11836 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11837 if (epause->tx_pause)
11838 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11840 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11842 if (netif_running(dev)) {
11843 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11844 err = tg3_restart_hw(tp, 0);
11846 tg3_netif_start(tp);
11849 tg3_full_unlock(tp);
11855 static int tg3_get_sset_count(struct net_device *dev, int sset)
11859 return TG3_NUM_TEST;
11861 return TG3_NUM_STATS;
11863 return -EOPNOTSUPP;
11867 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11868 u32 *rules __always_unused)
11870 struct tg3 *tp = netdev_priv(dev);
11872 if (!tg3_flag(tp, SUPPORT_MSIX))
11873 return -EOPNOTSUPP;
11875 switch (info->cmd) {
11876 case ETHTOOL_GRXRINGS:
11877 if (netif_running(tp->dev))
11878 info->data = tp->rxq_cnt;
11880 info->data = num_online_cpus();
11881 if (info->data > TG3_RSS_MAX_NUM_QS)
11882 info->data = TG3_RSS_MAX_NUM_QS;
11885 /* The first interrupt vector only
11886 * handles link interrupts.
11892 return -EOPNOTSUPP;
11896 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11899 struct tg3 *tp = netdev_priv(dev);
11901 if (tg3_flag(tp, SUPPORT_MSIX))
11902 size = TG3_RSS_INDIR_TBL_SIZE;
11907 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11909 struct tg3 *tp = netdev_priv(dev);
11912 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11913 indir[i] = tp->rss_ind_tbl[i];
11918 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11920 struct tg3 *tp = netdev_priv(dev);
11923 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11924 tp->rss_ind_tbl[i] = indir[i];
11926 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11929 /* It is legal to write the indirection
11930 * table while the device is running.
11932 tg3_full_lock(tp, 0);
11933 tg3_rss_write_indir_tbl(tp);
11934 tg3_full_unlock(tp);
11939 static void tg3_get_channels(struct net_device *dev,
11940 struct ethtool_channels *channel)
11942 struct tg3 *tp = netdev_priv(dev);
11943 u32 deflt_qs = netif_get_num_default_rss_queues();
11945 channel->max_rx = tp->rxq_max;
11946 channel->max_tx = tp->txq_max;
11948 if (netif_running(dev)) {
11949 channel->rx_count = tp->rxq_cnt;
11950 channel->tx_count = tp->txq_cnt;
11953 channel->rx_count = tp->rxq_req;
11955 channel->rx_count = min(deflt_qs, tp->rxq_max);
11958 channel->tx_count = tp->txq_req;
11960 channel->tx_count = min(deflt_qs, tp->txq_max);
11964 static int tg3_set_channels(struct net_device *dev,
11965 struct ethtool_channels *channel)
11967 struct tg3 *tp = netdev_priv(dev);
11969 if (!tg3_flag(tp, SUPPORT_MSIX))
11970 return -EOPNOTSUPP;
11972 if (channel->rx_count > tp->rxq_max ||
11973 channel->tx_count > tp->txq_max)
11976 tp->rxq_req = channel->rx_count;
11977 tp->txq_req = channel->tx_count;
11979 if (!netif_running(dev))
11984 tg3_carrier_off(tp);
11986 tg3_start(tp, true, false, false);
11991 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11993 switch (stringset) {
11995 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11998 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12001 WARN_ON(1); /* we need a WARN() */
12006 static int tg3_set_phys_id(struct net_device *dev,
12007 enum ethtool_phys_id_state state)
12009 struct tg3 *tp = netdev_priv(dev);
12011 if (!netif_running(tp->dev))
12015 case ETHTOOL_ID_ACTIVE:
12016 return 1; /* cycle on/off once per second */
12018 case ETHTOOL_ID_ON:
12019 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12020 LED_CTRL_1000MBPS_ON |
12021 LED_CTRL_100MBPS_ON |
12022 LED_CTRL_10MBPS_ON |
12023 LED_CTRL_TRAFFIC_OVERRIDE |
12024 LED_CTRL_TRAFFIC_BLINK |
12025 LED_CTRL_TRAFFIC_LED);
12028 case ETHTOOL_ID_OFF:
12029 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12030 LED_CTRL_TRAFFIC_OVERRIDE);
12033 case ETHTOOL_ID_INACTIVE:
12034 tw32(MAC_LED_CTRL, tp->led_ctrl);
12041 static void tg3_get_ethtool_stats(struct net_device *dev,
12042 struct ethtool_stats *estats, u64 *tmp_stats)
12044 struct tg3 *tp = netdev_priv(dev);
12047 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12049 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12052 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12056 u32 offset = 0, len = 0;
12059 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12062 if (magic == TG3_EEPROM_MAGIC) {
12063 for (offset = TG3_NVM_DIR_START;
12064 offset < TG3_NVM_DIR_END;
12065 offset += TG3_NVM_DIRENT_SIZE) {
12066 if (tg3_nvram_read(tp, offset, &val))
12069 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12070 TG3_NVM_DIRTYPE_EXTVPD)
12074 if (offset != TG3_NVM_DIR_END) {
12075 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12076 if (tg3_nvram_read(tp, offset + 4, &offset))
12079 offset = tg3_nvram_logical_addr(tp, offset);
12083 if (!offset || !len) {
12084 offset = TG3_NVM_VPD_OFF;
12085 len = TG3_NVM_VPD_LEN;
12088 buf = kmalloc(len, GFP_KERNEL);
12092 if (magic == TG3_EEPROM_MAGIC) {
12093 for (i = 0; i < len; i += 4) {
12094 /* The data is in little-endian format in NVRAM.
12095 * Use the big-endian read routines to preserve
12096 * the byte order as it exists in NVRAM.
12098 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12104 unsigned int pos = 0;
12106 ptr = (u8 *)&buf[0];
12107 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12108 cnt = pci_read_vpd(tp->pdev, pos,
12110 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12128 #define NVRAM_TEST_SIZE 0x100
12129 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12130 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12131 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12132 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12133 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12134 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12135 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12136 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12138 static int tg3_test_nvram(struct tg3 *tp)
12140 u32 csum, magic, len;
12142 int i, j, k, err = 0, size;
12144 if (tg3_flag(tp, NO_NVRAM))
12147 if (tg3_nvram_read(tp, 0, &magic) != 0)
12150 if (magic == TG3_EEPROM_MAGIC)
12151 size = NVRAM_TEST_SIZE;
12152 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12153 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12154 TG3_EEPROM_SB_FORMAT_1) {
12155 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12156 case TG3_EEPROM_SB_REVISION_0:
12157 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12159 case TG3_EEPROM_SB_REVISION_2:
12160 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12162 case TG3_EEPROM_SB_REVISION_3:
12163 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12165 case TG3_EEPROM_SB_REVISION_4:
12166 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12168 case TG3_EEPROM_SB_REVISION_5:
12169 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12171 case TG3_EEPROM_SB_REVISION_6:
12172 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12179 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12180 size = NVRAM_SELFBOOT_HW_SIZE;
12184 buf = kmalloc(size, GFP_KERNEL);
12189 for (i = 0, j = 0; i < size; i += 4, j++) {
12190 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12197 /* Selfboot format */
12198 magic = be32_to_cpu(buf[0]);
12199 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12200 TG3_EEPROM_MAGIC_FW) {
12201 u8 *buf8 = (u8 *) buf, csum8 = 0;
12203 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12204 TG3_EEPROM_SB_REVISION_2) {
12205 /* For rev 2, the csum doesn't include the MBA. */
12206 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12208 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12211 for (i = 0; i < size; i++)
12224 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12225 TG3_EEPROM_MAGIC_HW) {
12226 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12227 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12228 u8 *buf8 = (u8 *) buf;
12230 /* Separate the parity bits and the data bytes. */
12231 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12232 if ((i == 0) || (i == 8)) {
12236 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12237 parity[k++] = buf8[i] & msk;
12239 } else if (i == 16) {
12243 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12244 parity[k++] = buf8[i] & msk;
12247 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12248 parity[k++] = buf8[i] & msk;
12251 data[j++] = buf8[i];
12255 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12256 u8 hw8 = hweight8(data[i]);
12258 if ((hw8 & 0x1) && parity[i])
12260 else if (!(hw8 & 0x1) && !parity[i])
12269 /* Bootstrap checksum at offset 0x10 */
12270 csum = calc_crc((unsigned char *) buf, 0x10);
12271 if (csum != le32_to_cpu(buf[0x10/4]))
12274 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12275 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12276 if (csum != le32_to_cpu(buf[0xfc/4]))
12281 buf = tg3_vpd_readblock(tp, &len);
12285 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12287 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12291 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12294 i += PCI_VPD_LRDT_TAG_SIZE;
12295 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12296 PCI_VPD_RO_KEYWORD_CHKSUM);
12300 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12302 for (i = 0; i <= j; i++)
12303 csum8 += ((u8 *)buf)[i];
12317 #define TG3_SERDES_TIMEOUT_SEC 2
12318 #define TG3_COPPER_TIMEOUT_SEC 6
12320 static int tg3_test_link(struct tg3 *tp)
12324 if (!netif_running(tp->dev))
12327 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12328 max = TG3_SERDES_TIMEOUT_SEC;
12330 max = TG3_COPPER_TIMEOUT_SEC;
12332 for (i = 0; i < max; i++) {
12336 if (msleep_interruptible(1000))
12343 /* Only test the commonly used registers */
12344 static int tg3_test_registers(struct tg3 *tp)
12346 int i, is_5705, is_5750;
12347 u32 offset, read_mask, write_mask, val, save_val, read_val;
12351 #define TG3_FL_5705 0x1
12352 #define TG3_FL_NOT_5705 0x2
12353 #define TG3_FL_NOT_5788 0x4
12354 #define TG3_FL_NOT_5750 0x8
12358 /* MAC Control Registers */
12359 { MAC_MODE, TG3_FL_NOT_5705,
12360 0x00000000, 0x00ef6f8c },
12361 { MAC_MODE, TG3_FL_5705,
12362 0x00000000, 0x01ef6b8c },
12363 { MAC_STATUS, TG3_FL_NOT_5705,
12364 0x03800107, 0x00000000 },
12365 { MAC_STATUS, TG3_FL_5705,
12366 0x03800100, 0x00000000 },
12367 { MAC_ADDR_0_HIGH, 0x0000,
12368 0x00000000, 0x0000ffff },
12369 { MAC_ADDR_0_LOW, 0x0000,
12370 0x00000000, 0xffffffff },
12371 { MAC_RX_MTU_SIZE, 0x0000,
12372 0x00000000, 0x0000ffff },
12373 { MAC_TX_MODE, 0x0000,
12374 0x00000000, 0x00000070 },
12375 { MAC_TX_LENGTHS, 0x0000,
12376 0x00000000, 0x00003fff },
12377 { MAC_RX_MODE, TG3_FL_NOT_5705,
12378 0x00000000, 0x000007fc },
12379 { MAC_RX_MODE, TG3_FL_5705,
12380 0x00000000, 0x000007dc },
12381 { MAC_HASH_REG_0, 0x0000,
12382 0x00000000, 0xffffffff },
12383 { MAC_HASH_REG_1, 0x0000,
12384 0x00000000, 0xffffffff },
12385 { MAC_HASH_REG_2, 0x0000,
12386 0x00000000, 0xffffffff },
12387 { MAC_HASH_REG_3, 0x0000,
12388 0x00000000, 0xffffffff },
12390 /* Receive Data and Receive BD Initiator Control Registers. */
12391 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12392 0x00000000, 0xffffffff },
12393 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12394 0x00000000, 0xffffffff },
12395 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12396 0x00000000, 0x00000003 },
12397 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12398 0x00000000, 0xffffffff },
12399 { RCVDBDI_STD_BD+0, 0x0000,
12400 0x00000000, 0xffffffff },
12401 { RCVDBDI_STD_BD+4, 0x0000,
12402 0x00000000, 0xffffffff },
12403 { RCVDBDI_STD_BD+8, 0x0000,
12404 0x00000000, 0xffff0002 },
12405 { RCVDBDI_STD_BD+0xc, 0x0000,
12406 0x00000000, 0xffffffff },
12408 /* Receive BD Initiator Control Registers. */
12409 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12410 0x00000000, 0xffffffff },
12411 { RCVBDI_STD_THRESH, TG3_FL_5705,
12412 0x00000000, 0x000003ff },
12413 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12414 0x00000000, 0xffffffff },
12416 /* Host Coalescing Control Registers. */
12417 { HOSTCC_MODE, TG3_FL_NOT_5705,
12418 0x00000000, 0x00000004 },
12419 { HOSTCC_MODE, TG3_FL_5705,
12420 0x00000000, 0x000000f6 },
12421 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12422 0x00000000, 0xffffffff },
12423 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12424 0x00000000, 0x000003ff },
12425 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12426 0x00000000, 0xffffffff },
12427 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12428 0x00000000, 0x000003ff },
12429 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12430 0x00000000, 0xffffffff },
12431 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12432 0x00000000, 0x000000ff },
12433 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12434 0x00000000, 0xffffffff },
12435 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12436 0x00000000, 0x000000ff },
12437 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12438 0x00000000, 0xffffffff },
12439 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12440 0x00000000, 0xffffffff },
12441 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12442 0x00000000, 0xffffffff },
12443 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12444 0x00000000, 0x000000ff },
12445 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12446 0x00000000, 0xffffffff },
12447 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12448 0x00000000, 0x000000ff },
12449 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12450 0x00000000, 0xffffffff },
12451 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12452 0x00000000, 0xffffffff },
12453 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12454 0x00000000, 0xffffffff },
12455 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12456 0x00000000, 0xffffffff },
12457 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12458 0x00000000, 0xffffffff },
12459 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12460 0xffffffff, 0x00000000 },
12461 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12462 0xffffffff, 0x00000000 },
12464 /* Buffer Manager Control Registers. */
12465 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12466 0x00000000, 0x007fff80 },
12467 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12468 0x00000000, 0x007fffff },
12469 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12470 0x00000000, 0x0000003f },
12471 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12472 0x00000000, 0x000001ff },
12473 { BUFMGR_MB_HIGH_WATER, 0x0000,
12474 0x00000000, 0x000001ff },
12475 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12476 0xffffffff, 0x00000000 },
12477 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12478 0xffffffff, 0x00000000 },
12480 /* Mailbox Registers */
12481 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12482 0x00000000, 0x000001ff },
12483 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12484 0x00000000, 0x000001ff },
12485 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12486 0x00000000, 0x000007ff },
12487 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12488 0x00000000, 0x000001ff },
12490 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12493 is_5705 = is_5750 = 0;
12494 if (tg3_flag(tp, 5705_PLUS)) {
12496 if (tg3_flag(tp, 5750_PLUS))
12500 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12501 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12504 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12507 if (tg3_flag(tp, IS_5788) &&
12508 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12511 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12514 offset = (u32) reg_tbl[i].offset;
12515 read_mask = reg_tbl[i].read_mask;
12516 write_mask = reg_tbl[i].write_mask;
12518 /* Save the original register content */
12519 save_val = tr32(offset);
12521 /* Determine the read-only value. */
12522 read_val = save_val & read_mask;
12524 /* Write zero to the register, then make sure the read-only bits
12525 * are not changed and the read/write bits are all zeros.
12529 val = tr32(offset);
12531 /* Test the read-only and read/write bits. */
12532 if (((val & read_mask) != read_val) || (val & write_mask))
12535 /* Write ones to all the bits defined by RdMask and WrMask, then
12536 * make sure the read-only bits are not changed and the
12537 * read/write bits are all ones.
12539 tw32(offset, read_mask | write_mask);
12541 val = tr32(offset);
12543 /* Test the read-only bits. */
12544 if ((val & read_mask) != read_val)
12547 /* Test the read/write bits. */
12548 if ((val & write_mask) != write_mask)
12551 tw32(offset, save_val);
12557 if (netif_msg_hw(tp))
12558 netdev_err(tp->dev,
12559 "Register test failed at offset %x\n", offset);
12560 tw32(offset, save_val);
12564 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12566 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12570 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12571 for (j = 0; j < len; j += 4) {
12574 tg3_write_mem(tp, offset + j, test_pattern[i]);
12575 tg3_read_mem(tp, offset + j, &val);
12576 if (val != test_pattern[i])
12583 static int tg3_test_memory(struct tg3 *tp)
12585 static struct mem_entry {
12588 } mem_tbl_570x[] = {
12589 { 0x00000000, 0x00b50},
12590 { 0x00002000, 0x1c000},
12591 { 0xffffffff, 0x00000}
12592 }, mem_tbl_5705[] = {
12593 { 0x00000100, 0x0000c},
12594 { 0x00000200, 0x00008},
12595 { 0x00004000, 0x00800},
12596 { 0x00006000, 0x01000},
12597 { 0x00008000, 0x02000},
12598 { 0x00010000, 0x0e000},
12599 { 0xffffffff, 0x00000}
12600 }, mem_tbl_5755[] = {
12601 { 0x00000200, 0x00008},
12602 { 0x00004000, 0x00800},
12603 { 0x00006000, 0x00800},
12604 { 0x00008000, 0x02000},
12605 { 0x00010000, 0x0c000},
12606 { 0xffffffff, 0x00000}
12607 }, mem_tbl_5906[] = {
12608 { 0x00000200, 0x00008},
12609 { 0x00004000, 0x00400},
12610 { 0x00006000, 0x00400},
12611 { 0x00008000, 0x01000},
12612 { 0x00010000, 0x01000},
12613 { 0xffffffff, 0x00000}
12614 }, mem_tbl_5717[] = {
12615 { 0x00000200, 0x00008},
12616 { 0x00010000, 0x0a000},
12617 { 0x00020000, 0x13c00},
12618 { 0xffffffff, 0x00000}
12619 }, mem_tbl_57765[] = {
12620 { 0x00000200, 0x00008},
12621 { 0x00004000, 0x00800},
12622 { 0x00006000, 0x09800},
12623 { 0x00010000, 0x0a000},
12624 { 0xffffffff, 0x00000}
12626 struct mem_entry *mem_tbl;
12630 if (tg3_flag(tp, 5717_PLUS))
12631 mem_tbl = mem_tbl_5717;
12632 else if (tg3_flag(tp, 57765_CLASS) ||
12633 tg3_asic_rev(tp) == ASIC_REV_5762)
12634 mem_tbl = mem_tbl_57765;
12635 else if (tg3_flag(tp, 5755_PLUS))
12636 mem_tbl = mem_tbl_5755;
12637 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12638 mem_tbl = mem_tbl_5906;
12639 else if (tg3_flag(tp, 5705_PLUS))
12640 mem_tbl = mem_tbl_5705;
12642 mem_tbl = mem_tbl_570x;
12644 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12645 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12653 #define TG3_TSO_MSS 500
12655 #define TG3_TSO_IP_HDR_LEN 20
12656 #define TG3_TSO_TCP_HDR_LEN 20
12657 #define TG3_TSO_TCP_OPT_LEN 12
12659 static const u8 tg3_tso_header[] = {
12661 0x45, 0x00, 0x00, 0x00,
12662 0x00, 0x00, 0x40, 0x00,
12663 0x40, 0x06, 0x00, 0x00,
12664 0x0a, 0x00, 0x00, 0x01,
12665 0x0a, 0x00, 0x00, 0x02,
12666 0x0d, 0x00, 0xe0, 0x00,
12667 0x00, 0x00, 0x01, 0x00,
12668 0x00, 0x00, 0x02, 0x00,
12669 0x80, 0x10, 0x10, 0x00,
12670 0x14, 0x09, 0x00, 0x00,
12671 0x01, 0x01, 0x08, 0x0a,
12672 0x11, 0x11, 0x11, 0x11,
12673 0x11, 0x11, 0x11, 0x11,
12676 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12678 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12679 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12681 struct sk_buff *skb;
12682 u8 *tx_data, *rx_data;
12684 int num_pkts, tx_len, rx_len, i, err;
12685 struct tg3_rx_buffer_desc *desc;
12686 struct tg3_napi *tnapi, *rnapi;
12687 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12689 tnapi = &tp->napi[0];
12690 rnapi = &tp->napi[0];
12691 if (tp->irq_cnt > 1) {
12692 if (tg3_flag(tp, ENABLE_RSS))
12693 rnapi = &tp->napi[1];
12694 if (tg3_flag(tp, ENABLE_TSS))
12695 tnapi = &tp->napi[1];
12697 coal_now = tnapi->coal_now | rnapi->coal_now;
12702 skb = netdev_alloc_skb(tp->dev, tx_len);
12706 tx_data = skb_put(skb, tx_len);
12707 memcpy(tx_data, tp->dev->dev_addr, 6);
12708 memset(tx_data + 6, 0x0, 8);
12710 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12712 if (tso_loopback) {
12713 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12715 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12716 TG3_TSO_TCP_OPT_LEN;
12718 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12719 sizeof(tg3_tso_header));
12722 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12723 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12725 /* Set the total length field in the IP header */
12726 iph->tot_len = htons((u16)(mss + hdr_len));
12728 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12729 TXD_FLAG_CPU_POST_DMA);
12731 if (tg3_flag(tp, HW_TSO_1) ||
12732 tg3_flag(tp, HW_TSO_2) ||
12733 tg3_flag(tp, HW_TSO_3)) {
12735 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12736 th = (struct tcphdr *)&tx_data[val];
12739 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12741 if (tg3_flag(tp, HW_TSO_3)) {
12742 mss |= (hdr_len & 0xc) << 12;
12743 if (hdr_len & 0x10)
12744 base_flags |= 0x00000010;
12745 base_flags |= (hdr_len & 0x3e0) << 5;
12746 } else if (tg3_flag(tp, HW_TSO_2))
12747 mss |= hdr_len << 9;
12748 else if (tg3_flag(tp, HW_TSO_1) ||
12749 tg3_asic_rev(tp) == ASIC_REV_5705) {
12750 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12752 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12755 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12758 data_off = ETH_HLEN;
12760 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12761 tx_len > VLAN_ETH_FRAME_LEN)
12762 base_flags |= TXD_FLAG_JMB_PKT;
12765 for (i = data_off; i < tx_len; i++)
12766 tx_data[i] = (u8) (i & 0xff);
12768 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12769 if (pci_dma_mapping_error(tp->pdev, map)) {
12770 dev_kfree_skb(skb);
12774 val = tnapi->tx_prod;
12775 tnapi->tx_buffers[val].skb = skb;
12776 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12778 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12783 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12785 budget = tg3_tx_avail(tnapi);
12786 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12787 base_flags | TXD_FLAG_END, mss, 0)) {
12788 tnapi->tx_buffers[val].skb = NULL;
12789 dev_kfree_skb(skb);
12795 /* Sync BD data before updating mailbox */
12798 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12799 tr32_mailbox(tnapi->prodmbox);
12803 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12804 for (i = 0; i < 35; i++) {
12805 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12810 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12811 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12812 if ((tx_idx == tnapi->tx_prod) &&
12813 (rx_idx == (rx_start_idx + num_pkts)))
12817 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12818 dev_kfree_skb(skb);
12820 if (tx_idx != tnapi->tx_prod)
12823 if (rx_idx != rx_start_idx + num_pkts)
12827 while (rx_idx != rx_start_idx) {
12828 desc = &rnapi->rx_rcb[rx_start_idx++];
12829 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12830 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12832 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12833 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12836 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12839 if (!tso_loopback) {
12840 if (rx_len != tx_len)
12843 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12844 if (opaque_key != RXD_OPAQUE_RING_STD)
12847 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12850 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12851 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12852 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12856 if (opaque_key == RXD_OPAQUE_RING_STD) {
12857 rx_data = tpr->rx_std_buffers[desc_idx].data;
12858 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12860 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12861 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12862 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12867 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12868 PCI_DMA_FROMDEVICE);
12870 rx_data += TG3_RX_OFFSET(tp);
12871 for (i = data_off; i < rx_len; i++, val++) {
12872 if (*(rx_data + i) != (u8) (val & 0xff))
12879 /* tg3_free_rings will unmap and free the rx_data */
12884 #define TG3_STD_LOOPBACK_FAILED 1
12885 #define TG3_JMB_LOOPBACK_FAILED 2
12886 #define TG3_TSO_LOOPBACK_FAILED 4
12887 #define TG3_LOOPBACK_FAILED \
12888 (TG3_STD_LOOPBACK_FAILED | \
12889 TG3_JMB_LOOPBACK_FAILED | \
12890 TG3_TSO_LOOPBACK_FAILED)
12892 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12896 u32 jmb_pkt_sz = 9000;
12899 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12901 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12902 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12904 if (!netif_running(tp->dev)) {
12905 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12906 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12908 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12912 err = tg3_reset_hw(tp, 1);
12914 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12915 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12917 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12921 if (tg3_flag(tp, ENABLE_RSS)) {
12924 /* Reroute all rx packets to the 1st queue */
12925 for (i = MAC_RSS_INDIR_TBL_0;
12926 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12930 /* HW errata - mac loopback fails in some cases on 5780.
12931 * Normal traffic and PHY loopback are not affected by
12932 * errata. Also, the MAC loopback test is deprecated for
12933 * all newer ASIC revisions.
12935 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12936 !tg3_flag(tp, CPMU_PRESENT)) {
12937 tg3_mac_loopback(tp, true);
12939 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12940 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12942 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12943 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12944 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12946 tg3_mac_loopback(tp, false);
12949 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12950 !tg3_flag(tp, USE_PHYLIB)) {
12953 tg3_phy_lpbk_set(tp, 0, false);
12955 /* Wait for link */
12956 for (i = 0; i < 100; i++) {
12957 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12962 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12963 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12964 if (tg3_flag(tp, TSO_CAPABLE) &&
12965 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12966 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12967 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12968 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12969 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12972 tg3_phy_lpbk_set(tp, 0, true);
12974 /* All link indications report up, but the hardware
12975 * isn't really ready for about 20 msec. Double it
12980 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12981 data[TG3_EXT_LOOPB_TEST] |=
12982 TG3_STD_LOOPBACK_FAILED;
12983 if (tg3_flag(tp, TSO_CAPABLE) &&
12984 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12985 data[TG3_EXT_LOOPB_TEST] |=
12986 TG3_TSO_LOOPBACK_FAILED;
12987 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12988 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12989 data[TG3_EXT_LOOPB_TEST] |=
12990 TG3_JMB_LOOPBACK_FAILED;
12993 /* Re-enable gphy autopowerdown. */
12994 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12995 tg3_phy_toggle_apd(tp, true);
12998 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12999 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13002 tp->phy_flags |= eee_cap;
13007 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13010 struct tg3 *tp = netdev_priv(dev);
13011 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13013 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13014 tg3_power_up(tp)) {
13015 etest->flags |= ETH_TEST_FL_FAILED;
13016 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13020 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13022 if (tg3_test_nvram(tp) != 0) {
13023 etest->flags |= ETH_TEST_FL_FAILED;
13024 data[TG3_NVRAM_TEST] = 1;
13026 if (!doextlpbk && tg3_test_link(tp)) {
13027 etest->flags |= ETH_TEST_FL_FAILED;
13028 data[TG3_LINK_TEST] = 1;
13030 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13031 int err, err2 = 0, irq_sync = 0;
13033 if (netif_running(dev)) {
13035 tg3_netif_stop(tp);
13039 tg3_full_lock(tp, irq_sync);
13040 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13041 err = tg3_nvram_lock(tp);
13042 tg3_halt_cpu(tp, RX_CPU_BASE);
13043 if (!tg3_flag(tp, 5705_PLUS))
13044 tg3_halt_cpu(tp, TX_CPU_BASE);
13046 tg3_nvram_unlock(tp);
13048 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13051 if (tg3_test_registers(tp) != 0) {
13052 etest->flags |= ETH_TEST_FL_FAILED;
13053 data[TG3_REGISTER_TEST] = 1;
13056 if (tg3_test_memory(tp) != 0) {
13057 etest->flags |= ETH_TEST_FL_FAILED;
13058 data[TG3_MEMORY_TEST] = 1;
13062 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13064 if (tg3_test_loopback(tp, data, doextlpbk))
13065 etest->flags |= ETH_TEST_FL_FAILED;
13067 tg3_full_unlock(tp);
13069 if (tg3_test_interrupt(tp) != 0) {
13070 etest->flags |= ETH_TEST_FL_FAILED;
13071 data[TG3_INTERRUPT_TEST] = 1;
13074 tg3_full_lock(tp, 0);
13076 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13077 if (netif_running(dev)) {
13078 tg3_flag_set(tp, INIT_COMPLETE);
13079 err2 = tg3_restart_hw(tp, 1);
13081 tg3_netif_start(tp);
13084 tg3_full_unlock(tp);
13086 if (irq_sync && !err2)
13089 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13090 tg3_power_down(tp);
13094 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13095 struct ifreq *ifr, int cmd)
13097 struct tg3 *tp = netdev_priv(dev);
13098 struct hwtstamp_config stmpconf;
13100 if (!tg3_flag(tp, PTP_CAPABLE))
13103 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13106 if (stmpconf.flags)
13109 switch (stmpconf.tx_type) {
13110 case HWTSTAMP_TX_ON:
13111 tg3_flag_set(tp, TX_TSTAMP_EN);
13113 case HWTSTAMP_TX_OFF:
13114 tg3_flag_clear(tp, TX_TSTAMP_EN);
13120 switch (stmpconf.rx_filter) {
13121 case HWTSTAMP_FILTER_NONE:
13124 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13125 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13126 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13128 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13129 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13130 TG3_RX_PTP_CTL_SYNC_EVNT;
13132 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13133 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13134 TG3_RX_PTP_CTL_DELAY_REQ;
13136 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13137 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13138 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13140 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13141 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13142 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13144 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13145 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13146 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13148 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13149 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13150 TG3_RX_PTP_CTL_SYNC_EVNT;
13152 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13153 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13154 TG3_RX_PTP_CTL_SYNC_EVNT;
13156 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13157 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13158 TG3_RX_PTP_CTL_SYNC_EVNT;
13160 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13161 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13162 TG3_RX_PTP_CTL_DELAY_REQ;
13164 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13165 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13166 TG3_RX_PTP_CTL_DELAY_REQ;
13168 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13169 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13170 TG3_RX_PTP_CTL_DELAY_REQ;
13176 if (netif_running(dev) && tp->rxptpctl)
13177 tw32(TG3_RX_PTP_CTL,
13178 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13180 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13184 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13186 struct mii_ioctl_data *data = if_mii(ifr);
13187 struct tg3 *tp = netdev_priv(dev);
13190 if (tg3_flag(tp, USE_PHYLIB)) {
13191 struct phy_device *phydev;
13192 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13194 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13195 return phy_mii_ioctl(phydev, ifr, cmd);
13200 data->phy_id = tp->phy_addr;
13203 case SIOCGMIIREG: {
13206 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13207 break; /* We have no PHY */
13209 if (!netif_running(dev))
13212 spin_lock_bh(&tp->lock);
13213 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13214 data->reg_num & 0x1f, &mii_regval);
13215 spin_unlock_bh(&tp->lock);
13217 data->val_out = mii_regval;
13223 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13224 break; /* We have no PHY */
13226 if (!netif_running(dev))
13229 spin_lock_bh(&tp->lock);
13230 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13231 data->reg_num & 0x1f, data->val_in);
13232 spin_unlock_bh(&tp->lock);
13236 case SIOCSHWTSTAMP:
13237 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13243 return -EOPNOTSUPP;
13246 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13248 struct tg3 *tp = netdev_priv(dev);
13250 memcpy(ec, &tp->coal, sizeof(*ec));
13254 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13256 struct tg3 *tp = netdev_priv(dev);
13257 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13258 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13260 if (!tg3_flag(tp, 5705_PLUS)) {
13261 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13262 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13263 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13264 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13267 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13268 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13269 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13270 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13271 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13272 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13273 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13274 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13275 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13276 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13279 /* No rx interrupts will be generated if both are zero */
13280 if ((ec->rx_coalesce_usecs == 0) &&
13281 (ec->rx_max_coalesced_frames == 0))
13284 /* No tx interrupts will be generated if both are zero */
13285 if ((ec->tx_coalesce_usecs == 0) &&
13286 (ec->tx_max_coalesced_frames == 0))
13289 /* Only copy relevant parameters, ignore all others. */
13290 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13291 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13292 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13293 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13294 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13295 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13296 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13297 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13298 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13300 if (netif_running(dev)) {
13301 tg3_full_lock(tp, 0);
13302 __tg3_set_coalesce(tp, &tp->coal);
13303 tg3_full_unlock(tp);
13308 static const struct ethtool_ops tg3_ethtool_ops = {
13309 .get_settings = tg3_get_settings,
13310 .set_settings = tg3_set_settings,
13311 .get_drvinfo = tg3_get_drvinfo,
13312 .get_regs_len = tg3_get_regs_len,
13313 .get_regs = tg3_get_regs,
13314 .get_wol = tg3_get_wol,
13315 .set_wol = tg3_set_wol,
13316 .get_msglevel = tg3_get_msglevel,
13317 .set_msglevel = tg3_set_msglevel,
13318 .nway_reset = tg3_nway_reset,
13319 .get_link = ethtool_op_get_link,
13320 .get_eeprom_len = tg3_get_eeprom_len,
13321 .get_eeprom = tg3_get_eeprom,
13322 .set_eeprom = tg3_set_eeprom,
13323 .get_ringparam = tg3_get_ringparam,
13324 .set_ringparam = tg3_set_ringparam,
13325 .get_pauseparam = tg3_get_pauseparam,
13326 .set_pauseparam = tg3_set_pauseparam,
13327 .self_test = tg3_self_test,
13328 .get_strings = tg3_get_strings,
13329 .set_phys_id = tg3_set_phys_id,
13330 .get_ethtool_stats = tg3_get_ethtool_stats,
13331 .get_coalesce = tg3_get_coalesce,
13332 .set_coalesce = tg3_set_coalesce,
13333 .get_sset_count = tg3_get_sset_count,
13334 .get_rxnfc = tg3_get_rxnfc,
13335 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13336 .get_rxfh_indir = tg3_get_rxfh_indir,
13337 .set_rxfh_indir = tg3_set_rxfh_indir,
13338 .get_channels = tg3_get_channels,
13339 .set_channels = tg3_set_channels,
13340 .get_ts_info = tg3_get_ts_info,
13343 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13344 struct rtnl_link_stats64 *stats)
13346 struct tg3 *tp = netdev_priv(dev);
13348 spin_lock_bh(&tp->lock);
13349 if (!tp->hw_stats) {
13350 spin_unlock_bh(&tp->lock);
13351 return &tp->net_stats_prev;
13354 tg3_get_nstats(tp, stats);
13355 spin_unlock_bh(&tp->lock);
13360 static void tg3_set_rx_mode(struct net_device *dev)
13362 struct tg3 *tp = netdev_priv(dev);
13364 if (!netif_running(dev))
13367 tg3_full_lock(tp, 0);
13368 __tg3_set_rx_mode(dev);
13369 tg3_full_unlock(tp);
13372 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13375 dev->mtu = new_mtu;
13377 if (new_mtu > ETH_DATA_LEN) {
13378 if (tg3_flag(tp, 5780_CLASS)) {
13379 netdev_update_features(dev);
13380 tg3_flag_clear(tp, TSO_CAPABLE);
13382 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13385 if (tg3_flag(tp, 5780_CLASS)) {
13386 tg3_flag_set(tp, TSO_CAPABLE);
13387 netdev_update_features(dev);
13389 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13393 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13395 struct tg3 *tp = netdev_priv(dev);
13396 int err, reset_phy = 0;
13398 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13401 if (!netif_running(dev)) {
13402 /* We'll just catch it later when the
13405 tg3_set_mtu(dev, tp, new_mtu);
13411 tg3_netif_stop(tp);
13413 tg3_full_lock(tp, 1);
13415 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13417 tg3_set_mtu(dev, tp, new_mtu);
13419 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13420 * breaks all requests to 256 bytes.
13422 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13425 err = tg3_restart_hw(tp, reset_phy);
13428 tg3_netif_start(tp);
13430 tg3_full_unlock(tp);
13438 static const struct net_device_ops tg3_netdev_ops = {
13439 .ndo_open = tg3_open,
13440 .ndo_stop = tg3_close,
13441 .ndo_start_xmit = tg3_start_xmit,
13442 .ndo_get_stats64 = tg3_get_stats64,
13443 .ndo_validate_addr = eth_validate_addr,
13444 .ndo_set_rx_mode = tg3_set_rx_mode,
13445 .ndo_set_mac_address = tg3_set_mac_addr,
13446 .ndo_do_ioctl = tg3_ioctl,
13447 .ndo_tx_timeout = tg3_tx_timeout,
13448 .ndo_change_mtu = tg3_change_mtu,
13449 .ndo_fix_features = tg3_fix_features,
13450 .ndo_set_features = tg3_set_features,
13451 #ifdef CONFIG_NET_POLL_CONTROLLER
13452 .ndo_poll_controller = tg3_poll_controller,
13456 static void tg3_get_eeprom_size(struct tg3 *tp)
13458 u32 cursize, val, magic;
13460 tp->nvram_size = EEPROM_CHIP_SIZE;
13462 if (tg3_nvram_read(tp, 0, &magic) != 0)
13465 if ((magic != TG3_EEPROM_MAGIC) &&
13466 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13467 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13471 * Size the chip by reading offsets at increasing powers of two.
13472 * When we encounter our validation signature, we know the addressing
13473 * has wrapped around, and thus have our chip size.
13477 while (cursize < tp->nvram_size) {
13478 if (tg3_nvram_read(tp, cursize, &val) != 0)
13487 tp->nvram_size = cursize;
13490 static void tg3_get_nvram_size(struct tg3 *tp)
13494 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13497 /* Selfboot format */
13498 if (val != TG3_EEPROM_MAGIC) {
13499 tg3_get_eeprom_size(tp);
13503 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13505 /* This is confusing. We want to operate on the
13506 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13507 * call will read from NVRAM and byteswap the data
13508 * according to the byteswapping settings for all
13509 * other register accesses. This ensures the data we
13510 * want will always reside in the lower 16-bits.
13511 * However, the data in NVRAM is in LE format, which
13512 * means the data from the NVRAM read will always be
13513 * opposite the endianness of the CPU. The 16-bit
13514 * byteswap then brings the data to CPU endianness.
13516 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13520 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13523 static void tg3_get_nvram_info(struct tg3 *tp)
13527 nvcfg1 = tr32(NVRAM_CFG1);
13528 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13529 tg3_flag_set(tp, FLASH);
13531 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13532 tw32(NVRAM_CFG1, nvcfg1);
13535 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13536 tg3_flag(tp, 5780_CLASS)) {
13537 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13538 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13539 tp->nvram_jedecnum = JEDEC_ATMEL;
13540 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13541 tg3_flag_set(tp, NVRAM_BUFFERED);
13543 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13544 tp->nvram_jedecnum = JEDEC_ATMEL;
13545 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13547 case FLASH_VENDOR_ATMEL_EEPROM:
13548 tp->nvram_jedecnum = JEDEC_ATMEL;
13549 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13550 tg3_flag_set(tp, NVRAM_BUFFERED);
13552 case FLASH_VENDOR_ST:
13553 tp->nvram_jedecnum = JEDEC_ST;
13554 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13555 tg3_flag_set(tp, NVRAM_BUFFERED);
13557 case FLASH_VENDOR_SAIFUN:
13558 tp->nvram_jedecnum = JEDEC_SAIFUN;
13559 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13561 case FLASH_VENDOR_SST_SMALL:
13562 case FLASH_VENDOR_SST_LARGE:
13563 tp->nvram_jedecnum = JEDEC_SST;
13564 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13568 tp->nvram_jedecnum = JEDEC_ATMEL;
13569 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13570 tg3_flag_set(tp, NVRAM_BUFFERED);
13574 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13576 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13577 case FLASH_5752PAGE_SIZE_256:
13578 tp->nvram_pagesize = 256;
13580 case FLASH_5752PAGE_SIZE_512:
13581 tp->nvram_pagesize = 512;
13583 case FLASH_5752PAGE_SIZE_1K:
13584 tp->nvram_pagesize = 1024;
13586 case FLASH_5752PAGE_SIZE_2K:
13587 tp->nvram_pagesize = 2048;
13589 case FLASH_5752PAGE_SIZE_4K:
13590 tp->nvram_pagesize = 4096;
13592 case FLASH_5752PAGE_SIZE_264:
13593 tp->nvram_pagesize = 264;
13595 case FLASH_5752PAGE_SIZE_528:
13596 tp->nvram_pagesize = 528;
13601 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13605 nvcfg1 = tr32(NVRAM_CFG1);
13607 /* NVRAM protection for TPM */
13608 if (nvcfg1 & (1 << 27))
13609 tg3_flag_set(tp, PROTECTED_NVRAM);
13611 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13612 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13613 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13614 tp->nvram_jedecnum = JEDEC_ATMEL;
13615 tg3_flag_set(tp, NVRAM_BUFFERED);
13617 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13618 tp->nvram_jedecnum = JEDEC_ATMEL;
13619 tg3_flag_set(tp, NVRAM_BUFFERED);
13620 tg3_flag_set(tp, FLASH);
13622 case FLASH_5752VENDOR_ST_M45PE10:
13623 case FLASH_5752VENDOR_ST_M45PE20:
13624 case FLASH_5752VENDOR_ST_M45PE40:
13625 tp->nvram_jedecnum = JEDEC_ST;
13626 tg3_flag_set(tp, NVRAM_BUFFERED);
13627 tg3_flag_set(tp, FLASH);
13631 if (tg3_flag(tp, FLASH)) {
13632 tg3_nvram_get_pagesize(tp, nvcfg1);
13634 /* For eeprom, set pagesize to maximum eeprom size */
13635 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13637 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13638 tw32(NVRAM_CFG1, nvcfg1);
13642 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13644 u32 nvcfg1, protect = 0;
13646 nvcfg1 = tr32(NVRAM_CFG1);
13648 /* NVRAM protection for TPM */
13649 if (nvcfg1 & (1 << 27)) {
13650 tg3_flag_set(tp, PROTECTED_NVRAM);
13654 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13656 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13657 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13658 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13659 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13660 tp->nvram_jedecnum = JEDEC_ATMEL;
13661 tg3_flag_set(tp, NVRAM_BUFFERED);
13662 tg3_flag_set(tp, FLASH);
13663 tp->nvram_pagesize = 264;
13664 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13665 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13666 tp->nvram_size = (protect ? 0x3e200 :
13667 TG3_NVRAM_SIZE_512KB);
13668 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13669 tp->nvram_size = (protect ? 0x1f200 :
13670 TG3_NVRAM_SIZE_256KB);
13672 tp->nvram_size = (protect ? 0x1f200 :
13673 TG3_NVRAM_SIZE_128KB);
13675 case FLASH_5752VENDOR_ST_M45PE10:
13676 case FLASH_5752VENDOR_ST_M45PE20:
13677 case FLASH_5752VENDOR_ST_M45PE40:
13678 tp->nvram_jedecnum = JEDEC_ST;
13679 tg3_flag_set(tp, NVRAM_BUFFERED);
13680 tg3_flag_set(tp, FLASH);
13681 tp->nvram_pagesize = 256;
13682 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13683 tp->nvram_size = (protect ?
13684 TG3_NVRAM_SIZE_64KB :
13685 TG3_NVRAM_SIZE_128KB);
13686 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13687 tp->nvram_size = (protect ?
13688 TG3_NVRAM_SIZE_64KB :
13689 TG3_NVRAM_SIZE_256KB);
13691 tp->nvram_size = (protect ?
13692 TG3_NVRAM_SIZE_128KB :
13693 TG3_NVRAM_SIZE_512KB);
13698 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13702 nvcfg1 = tr32(NVRAM_CFG1);
13704 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13705 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13706 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13707 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13708 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13709 tp->nvram_jedecnum = JEDEC_ATMEL;
13710 tg3_flag_set(tp, NVRAM_BUFFERED);
13711 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13713 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13714 tw32(NVRAM_CFG1, nvcfg1);
13716 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13717 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13718 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13719 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13720 tp->nvram_jedecnum = JEDEC_ATMEL;
13721 tg3_flag_set(tp, NVRAM_BUFFERED);
13722 tg3_flag_set(tp, FLASH);
13723 tp->nvram_pagesize = 264;
13725 case FLASH_5752VENDOR_ST_M45PE10:
13726 case FLASH_5752VENDOR_ST_M45PE20:
13727 case FLASH_5752VENDOR_ST_M45PE40:
13728 tp->nvram_jedecnum = JEDEC_ST;
13729 tg3_flag_set(tp, NVRAM_BUFFERED);
13730 tg3_flag_set(tp, FLASH);
13731 tp->nvram_pagesize = 256;
13736 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13738 u32 nvcfg1, protect = 0;
13740 nvcfg1 = tr32(NVRAM_CFG1);
13742 /* NVRAM protection for TPM */
13743 if (nvcfg1 & (1 << 27)) {
13744 tg3_flag_set(tp, PROTECTED_NVRAM);
13748 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13750 case FLASH_5761VENDOR_ATMEL_ADB021D:
13751 case FLASH_5761VENDOR_ATMEL_ADB041D:
13752 case FLASH_5761VENDOR_ATMEL_ADB081D:
13753 case FLASH_5761VENDOR_ATMEL_ADB161D:
13754 case FLASH_5761VENDOR_ATMEL_MDB021D:
13755 case FLASH_5761VENDOR_ATMEL_MDB041D:
13756 case FLASH_5761VENDOR_ATMEL_MDB081D:
13757 case FLASH_5761VENDOR_ATMEL_MDB161D:
13758 tp->nvram_jedecnum = JEDEC_ATMEL;
13759 tg3_flag_set(tp, NVRAM_BUFFERED);
13760 tg3_flag_set(tp, FLASH);
13761 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13762 tp->nvram_pagesize = 256;
13764 case FLASH_5761VENDOR_ST_A_M45PE20:
13765 case FLASH_5761VENDOR_ST_A_M45PE40:
13766 case FLASH_5761VENDOR_ST_A_M45PE80:
13767 case FLASH_5761VENDOR_ST_A_M45PE16:
13768 case FLASH_5761VENDOR_ST_M_M45PE20:
13769 case FLASH_5761VENDOR_ST_M_M45PE40:
13770 case FLASH_5761VENDOR_ST_M_M45PE80:
13771 case FLASH_5761VENDOR_ST_M_M45PE16:
13772 tp->nvram_jedecnum = JEDEC_ST;
13773 tg3_flag_set(tp, NVRAM_BUFFERED);
13774 tg3_flag_set(tp, FLASH);
13775 tp->nvram_pagesize = 256;
13780 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13783 case FLASH_5761VENDOR_ATMEL_ADB161D:
13784 case FLASH_5761VENDOR_ATMEL_MDB161D:
13785 case FLASH_5761VENDOR_ST_A_M45PE16:
13786 case FLASH_5761VENDOR_ST_M_M45PE16:
13787 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13789 case FLASH_5761VENDOR_ATMEL_ADB081D:
13790 case FLASH_5761VENDOR_ATMEL_MDB081D:
13791 case FLASH_5761VENDOR_ST_A_M45PE80:
13792 case FLASH_5761VENDOR_ST_M_M45PE80:
13793 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13795 case FLASH_5761VENDOR_ATMEL_ADB041D:
13796 case FLASH_5761VENDOR_ATMEL_MDB041D:
13797 case FLASH_5761VENDOR_ST_A_M45PE40:
13798 case FLASH_5761VENDOR_ST_M_M45PE40:
13799 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13801 case FLASH_5761VENDOR_ATMEL_ADB021D:
13802 case FLASH_5761VENDOR_ATMEL_MDB021D:
13803 case FLASH_5761VENDOR_ST_A_M45PE20:
13804 case FLASH_5761VENDOR_ST_M_M45PE20:
13805 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13811 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13813 tp->nvram_jedecnum = JEDEC_ATMEL;
13814 tg3_flag_set(tp, NVRAM_BUFFERED);
13815 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13818 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13822 nvcfg1 = tr32(NVRAM_CFG1);
13824 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13825 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13826 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13827 tp->nvram_jedecnum = JEDEC_ATMEL;
13828 tg3_flag_set(tp, NVRAM_BUFFERED);
13829 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13831 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13832 tw32(NVRAM_CFG1, nvcfg1);
13834 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13835 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13836 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13837 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13838 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13839 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13840 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13841 tp->nvram_jedecnum = JEDEC_ATMEL;
13842 tg3_flag_set(tp, NVRAM_BUFFERED);
13843 tg3_flag_set(tp, FLASH);
13845 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13846 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13847 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13848 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13849 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13851 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13852 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13853 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13855 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13856 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13857 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13861 case FLASH_5752VENDOR_ST_M45PE10:
13862 case FLASH_5752VENDOR_ST_M45PE20:
13863 case FLASH_5752VENDOR_ST_M45PE40:
13864 tp->nvram_jedecnum = JEDEC_ST;
13865 tg3_flag_set(tp, NVRAM_BUFFERED);
13866 tg3_flag_set(tp, FLASH);
13868 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13869 case FLASH_5752VENDOR_ST_M45PE10:
13870 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13872 case FLASH_5752VENDOR_ST_M45PE20:
13873 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13875 case FLASH_5752VENDOR_ST_M45PE40:
13876 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13881 tg3_flag_set(tp, NO_NVRAM);
13885 tg3_nvram_get_pagesize(tp, nvcfg1);
13886 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13887 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13891 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13895 nvcfg1 = tr32(NVRAM_CFG1);
13897 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13898 case FLASH_5717VENDOR_ATMEL_EEPROM:
13899 case FLASH_5717VENDOR_MICRO_EEPROM:
13900 tp->nvram_jedecnum = JEDEC_ATMEL;
13901 tg3_flag_set(tp, NVRAM_BUFFERED);
13902 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13904 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13905 tw32(NVRAM_CFG1, nvcfg1);
13907 case FLASH_5717VENDOR_ATMEL_MDB011D:
13908 case FLASH_5717VENDOR_ATMEL_ADB011B:
13909 case FLASH_5717VENDOR_ATMEL_ADB011D:
13910 case FLASH_5717VENDOR_ATMEL_MDB021D:
13911 case FLASH_5717VENDOR_ATMEL_ADB021B:
13912 case FLASH_5717VENDOR_ATMEL_ADB021D:
13913 case FLASH_5717VENDOR_ATMEL_45USPT:
13914 tp->nvram_jedecnum = JEDEC_ATMEL;
13915 tg3_flag_set(tp, NVRAM_BUFFERED);
13916 tg3_flag_set(tp, FLASH);
13918 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13919 case FLASH_5717VENDOR_ATMEL_MDB021D:
13920 /* Detect size with tg3_nvram_get_size() */
13922 case FLASH_5717VENDOR_ATMEL_ADB021B:
13923 case FLASH_5717VENDOR_ATMEL_ADB021D:
13924 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13927 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13931 case FLASH_5717VENDOR_ST_M_M25PE10:
13932 case FLASH_5717VENDOR_ST_A_M25PE10:
13933 case FLASH_5717VENDOR_ST_M_M45PE10:
13934 case FLASH_5717VENDOR_ST_A_M45PE10:
13935 case FLASH_5717VENDOR_ST_M_M25PE20:
13936 case FLASH_5717VENDOR_ST_A_M25PE20:
13937 case FLASH_5717VENDOR_ST_M_M45PE20:
13938 case FLASH_5717VENDOR_ST_A_M45PE20:
13939 case FLASH_5717VENDOR_ST_25USPT:
13940 case FLASH_5717VENDOR_ST_45USPT:
13941 tp->nvram_jedecnum = JEDEC_ST;
13942 tg3_flag_set(tp, NVRAM_BUFFERED);
13943 tg3_flag_set(tp, FLASH);
13945 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13946 case FLASH_5717VENDOR_ST_M_M25PE20:
13947 case FLASH_5717VENDOR_ST_M_M45PE20:
13948 /* Detect size with tg3_nvram_get_size() */
13950 case FLASH_5717VENDOR_ST_A_M25PE20:
13951 case FLASH_5717VENDOR_ST_A_M45PE20:
13952 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13955 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13960 tg3_flag_set(tp, NO_NVRAM);
13964 tg3_nvram_get_pagesize(tp, nvcfg1);
13965 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13966 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13969 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13971 u32 nvcfg1, nvmpinstrp;
13973 nvcfg1 = tr32(NVRAM_CFG1);
13974 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13976 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13977 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13978 tg3_flag_set(tp, NO_NVRAM);
13982 switch (nvmpinstrp) {
13983 case FLASH_5762_EEPROM_HD:
13984 nvmpinstrp = FLASH_5720_EEPROM_HD;
13986 case FLASH_5762_EEPROM_LD:
13987 nvmpinstrp = FLASH_5720_EEPROM_LD;
13989 case FLASH_5720VENDOR_M_ST_M45PE20:
13990 /* This pinstrap supports multiple sizes, so force it
13991 * to read the actual size from location 0xf0.
13993 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
13998 switch (nvmpinstrp) {
13999 case FLASH_5720_EEPROM_HD:
14000 case FLASH_5720_EEPROM_LD:
14001 tp->nvram_jedecnum = JEDEC_ATMEL;
14002 tg3_flag_set(tp, NVRAM_BUFFERED);
14004 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14005 tw32(NVRAM_CFG1, nvcfg1);
14006 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14007 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14009 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14011 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14012 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14013 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14014 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14015 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14016 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14017 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14018 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14019 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14020 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14021 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14022 case FLASH_5720VENDOR_ATMEL_45USPT:
14023 tp->nvram_jedecnum = JEDEC_ATMEL;
14024 tg3_flag_set(tp, NVRAM_BUFFERED);
14025 tg3_flag_set(tp, FLASH);
14027 switch (nvmpinstrp) {
14028 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14029 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14030 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14031 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14033 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14034 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14035 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14036 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14038 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14039 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14040 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14043 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14044 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14048 case FLASH_5720VENDOR_M_ST_M25PE10:
14049 case FLASH_5720VENDOR_M_ST_M45PE10:
14050 case FLASH_5720VENDOR_A_ST_M25PE10:
14051 case FLASH_5720VENDOR_A_ST_M45PE10:
14052 case FLASH_5720VENDOR_M_ST_M25PE20:
14053 case FLASH_5720VENDOR_M_ST_M45PE20:
14054 case FLASH_5720VENDOR_A_ST_M25PE20:
14055 case FLASH_5720VENDOR_A_ST_M45PE20:
14056 case FLASH_5720VENDOR_M_ST_M25PE40:
14057 case FLASH_5720VENDOR_M_ST_M45PE40:
14058 case FLASH_5720VENDOR_A_ST_M25PE40:
14059 case FLASH_5720VENDOR_A_ST_M45PE40:
14060 case FLASH_5720VENDOR_M_ST_M25PE80:
14061 case FLASH_5720VENDOR_M_ST_M45PE80:
14062 case FLASH_5720VENDOR_A_ST_M25PE80:
14063 case FLASH_5720VENDOR_A_ST_M45PE80:
14064 case FLASH_5720VENDOR_ST_25USPT:
14065 case FLASH_5720VENDOR_ST_45USPT:
14066 tp->nvram_jedecnum = JEDEC_ST;
14067 tg3_flag_set(tp, NVRAM_BUFFERED);
14068 tg3_flag_set(tp, FLASH);
14070 switch (nvmpinstrp) {
14071 case FLASH_5720VENDOR_M_ST_M25PE20:
14072 case FLASH_5720VENDOR_M_ST_M45PE20:
14073 case FLASH_5720VENDOR_A_ST_M25PE20:
14074 case FLASH_5720VENDOR_A_ST_M45PE20:
14075 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14077 case FLASH_5720VENDOR_M_ST_M25PE40:
14078 case FLASH_5720VENDOR_M_ST_M45PE40:
14079 case FLASH_5720VENDOR_A_ST_M25PE40:
14080 case FLASH_5720VENDOR_A_ST_M45PE40:
14081 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14083 case FLASH_5720VENDOR_M_ST_M25PE80:
14084 case FLASH_5720VENDOR_M_ST_M45PE80:
14085 case FLASH_5720VENDOR_A_ST_M25PE80:
14086 case FLASH_5720VENDOR_A_ST_M45PE80:
14087 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14090 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14091 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14096 tg3_flag_set(tp, NO_NVRAM);
14100 tg3_nvram_get_pagesize(tp, nvcfg1);
14101 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14102 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14104 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14107 if (tg3_nvram_read(tp, 0, &val))
14110 if (val != TG3_EEPROM_MAGIC &&
14111 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14112 tg3_flag_set(tp, NO_NVRAM);
14116 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14117 static void tg3_nvram_init(struct tg3 *tp)
14119 if (tg3_flag(tp, IS_SSB_CORE)) {
14120 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14121 tg3_flag_clear(tp, NVRAM);
14122 tg3_flag_clear(tp, NVRAM_BUFFERED);
14123 tg3_flag_set(tp, NO_NVRAM);
14127 tw32_f(GRC_EEPROM_ADDR,
14128 (EEPROM_ADDR_FSM_RESET |
14129 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14130 EEPROM_ADDR_CLKPERD_SHIFT)));
14134 /* Enable seeprom accesses. */
14135 tw32_f(GRC_LOCAL_CTRL,
14136 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14139 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14140 tg3_asic_rev(tp) != ASIC_REV_5701) {
14141 tg3_flag_set(tp, NVRAM);
14143 if (tg3_nvram_lock(tp)) {
14144 netdev_warn(tp->dev,
14145 "Cannot get nvram lock, %s failed\n",
14149 tg3_enable_nvram_access(tp);
14151 tp->nvram_size = 0;
14153 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14154 tg3_get_5752_nvram_info(tp);
14155 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14156 tg3_get_5755_nvram_info(tp);
14157 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14158 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14159 tg3_asic_rev(tp) == ASIC_REV_5785)
14160 tg3_get_5787_nvram_info(tp);
14161 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14162 tg3_get_5761_nvram_info(tp);
14163 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14164 tg3_get_5906_nvram_info(tp);
14165 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14166 tg3_flag(tp, 57765_CLASS))
14167 tg3_get_57780_nvram_info(tp);
14168 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14169 tg3_asic_rev(tp) == ASIC_REV_5719)
14170 tg3_get_5717_nvram_info(tp);
14171 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14172 tg3_asic_rev(tp) == ASIC_REV_5762)
14173 tg3_get_5720_nvram_info(tp);
14175 tg3_get_nvram_info(tp);
14177 if (tp->nvram_size == 0)
14178 tg3_get_nvram_size(tp);
14180 tg3_disable_nvram_access(tp);
14181 tg3_nvram_unlock(tp);
14184 tg3_flag_clear(tp, NVRAM);
14185 tg3_flag_clear(tp, NVRAM_BUFFERED);
14187 tg3_get_eeprom_size(tp);
14191 struct subsys_tbl_ent {
14192 u16 subsys_vendor, subsys_devid;
14196 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14197 /* Broadcom boards. */
14198 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14199 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14200 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14201 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14202 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14203 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14204 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14205 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14206 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14207 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14208 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14209 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14210 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14211 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14212 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14213 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14214 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14215 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14216 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14217 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14218 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14219 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14222 { TG3PCI_SUBVENDOR_ID_3COM,
14223 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14224 { TG3PCI_SUBVENDOR_ID_3COM,
14225 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14226 { TG3PCI_SUBVENDOR_ID_3COM,
14227 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14228 { TG3PCI_SUBVENDOR_ID_3COM,
14229 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14230 { TG3PCI_SUBVENDOR_ID_3COM,
14231 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14234 { TG3PCI_SUBVENDOR_ID_DELL,
14235 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14236 { TG3PCI_SUBVENDOR_ID_DELL,
14237 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14238 { TG3PCI_SUBVENDOR_ID_DELL,
14239 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14240 { TG3PCI_SUBVENDOR_ID_DELL,
14241 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14243 /* Compaq boards. */
14244 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14245 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14246 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14247 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14248 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14249 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14250 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14251 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14252 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14253 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14256 { TG3PCI_SUBVENDOR_ID_IBM,
14257 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14260 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14264 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14265 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14266 tp->pdev->subsystem_vendor) &&
14267 (subsys_id_to_phy_id[i].subsys_devid ==
14268 tp->pdev->subsystem_device))
14269 return &subsys_id_to_phy_id[i];
14274 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14278 tp->phy_id = TG3_PHY_ID_INVALID;
14279 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14281 /* Assume an onboard device and WOL capable by default. */
14282 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14283 tg3_flag_set(tp, WOL_CAP);
14285 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14286 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14287 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14288 tg3_flag_set(tp, IS_NIC);
14290 val = tr32(VCPU_CFGSHDW);
14291 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14292 tg3_flag_set(tp, ASPM_WORKAROUND);
14293 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14294 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14295 tg3_flag_set(tp, WOL_ENABLE);
14296 device_set_wakeup_enable(&tp->pdev->dev, true);
14301 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14302 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14303 u32 nic_cfg, led_cfg;
14304 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14305 int eeprom_phy_serdes = 0;
14307 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14308 tp->nic_sram_data_cfg = nic_cfg;
14310 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14311 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14312 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14313 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14314 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14315 (ver > 0) && (ver < 0x100))
14316 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14318 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14319 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14321 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14322 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14323 eeprom_phy_serdes = 1;
14325 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14326 if (nic_phy_id != 0) {
14327 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14328 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14330 eeprom_phy_id = (id1 >> 16) << 10;
14331 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14332 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14336 tp->phy_id = eeprom_phy_id;
14337 if (eeprom_phy_serdes) {
14338 if (!tg3_flag(tp, 5705_PLUS))
14339 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14341 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14344 if (tg3_flag(tp, 5750_PLUS))
14345 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14346 SHASTA_EXT_LED_MODE_MASK);
14348 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14352 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14353 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14356 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14357 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14360 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14361 tp->led_ctrl = LED_CTRL_MODE_MAC;
14363 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14364 * read on some older 5700/5701 bootcode.
14366 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14367 tg3_asic_rev(tp) == ASIC_REV_5701)
14368 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14372 case SHASTA_EXT_LED_SHARED:
14373 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14374 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14375 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14376 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14377 LED_CTRL_MODE_PHY_2);
14380 case SHASTA_EXT_LED_MAC:
14381 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14384 case SHASTA_EXT_LED_COMBO:
14385 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14386 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14387 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14388 LED_CTRL_MODE_PHY_2);
14393 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14394 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14395 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14396 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14398 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14399 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14401 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14402 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14403 if ((tp->pdev->subsystem_vendor ==
14404 PCI_VENDOR_ID_ARIMA) &&
14405 (tp->pdev->subsystem_device == 0x205a ||
14406 tp->pdev->subsystem_device == 0x2063))
14407 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14409 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14410 tg3_flag_set(tp, IS_NIC);
14413 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14414 tg3_flag_set(tp, ENABLE_ASF);
14415 if (tg3_flag(tp, 5750_PLUS))
14416 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14419 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14420 tg3_flag(tp, 5750_PLUS))
14421 tg3_flag_set(tp, ENABLE_APE);
14423 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14424 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14425 tg3_flag_clear(tp, WOL_CAP);
14427 if (tg3_flag(tp, WOL_CAP) &&
14428 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14429 tg3_flag_set(tp, WOL_ENABLE);
14430 device_set_wakeup_enable(&tp->pdev->dev, true);
14433 if (cfg2 & (1 << 17))
14434 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14436 /* serdes signal pre-emphasis in register 0x590 set by */
14437 /* bootcode if bit 18 is set */
14438 if (cfg2 & (1 << 18))
14439 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14441 if ((tg3_flag(tp, 57765_PLUS) ||
14442 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14443 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14444 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14445 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14447 if (tg3_flag(tp, PCI_EXPRESS) &&
14448 tg3_asic_rev(tp) != ASIC_REV_5785 &&
14449 !tg3_flag(tp, 57765_PLUS)) {
14452 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14453 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14454 tg3_flag_set(tp, ASPM_WORKAROUND);
14457 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14458 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14459 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14460 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14461 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14462 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14465 if (tg3_flag(tp, WOL_CAP))
14466 device_set_wakeup_enable(&tp->pdev->dev,
14467 tg3_flag(tp, WOL_ENABLE));
14469 device_set_wakeup_capable(&tp->pdev->dev, false);
14472 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14475 u32 val2, off = offset * 8;
14477 err = tg3_nvram_lock(tp);
14481 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14482 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14483 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14484 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14487 for (i = 0; i < 100; i++) {
14488 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14489 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14490 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14496 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14498 tg3_nvram_unlock(tp);
14499 if (val2 & APE_OTP_STATUS_CMD_DONE)
14505 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14510 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14511 tw32(OTP_CTRL, cmd);
14513 /* Wait for up to 1 ms for command to execute. */
14514 for (i = 0; i < 100; i++) {
14515 val = tr32(OTP_STATUS);
14516 if (val & OTP_STATUS_CMD_DONE)
14521 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14524 /* Read the gphy configuration from the OTP region of the chip. The gphy
14525 * configuration is a 32-bit value that straddles the alignment boundary.
14526 * We do two 32-bit reads and then shift and merge the results.
14528 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14530 u32 bhalf_otp, thalf_otp;
14532 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14534 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14537 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14539 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14542 thalf_otp = tr32(OTP_READ_DATA);
14544 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14546 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14549 bhalf_otp = tr32(OTP_READ_DATA);
14551 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14554 static void tg3_phy_init_link_config(struct tg3 *tp)
14556 u32 adv = ADVERTISED_Autoneg;
14558 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14559 adv |= ADVERTISED_1000baseT_Half |
14560 ADVERTISED_1000baseT_Full;
14562 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14563 adv |= ADVERTISED_100baseT_Half |
14564 ADVERTISED_100baseT_Full |
14565 ADVERTISED_10baseT_Half |
14566 ADVERTISED_10baseT_Full |
14569 adv |= ADVERTISED_FIBRE;
14571 tp->link_config.advertising = adv;
14572 tp->link_config.speed = SPEED_UNKNOWN;
14573 tp->link_config.duplex = DUPLEX_UNKNOWN;
14574 tp->link_config.autoneg = AUTONEG_ENABLE;
14575 tp->link_config.active_speed = SPEED_UNKNOWN;
14576 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14581 static int tg3_phy_probe(struct tg3 *tp)
14583 u32 hw_phy_id_1, hw_phy_id_2;
14584 u32 hw_phy_id, hw_phy_id_masked;
14587 /* flow control autonegotiation is default behavior */
14588 tg3_flag_set(tp, PAUSE_AUTONEG);
14589 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14591 if (tg3_flag(tp, ENABLE_APE)) {
14592 switch (tp->pci_fn) {
14594 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14597 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14600 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14603 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14608 if (tg3_flag(tp, USE_PHYLIB))
14609 return tg3_phy_init(tp);
14611 /* Reading the PHY ID register can conflict with ASF
14612 * firmware access to the PHY hardware.
14615 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14616 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14618 /* Now read the physical PHY_ID from the chip and verify
14619 * that it is sane. If it doesn't look good, we fall back
14620 * to either the hard-coded table based PHY_ID and failing
14621 * that the value found in the eeprom area.
14623 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14624 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14626 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14627 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14628 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14630 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14633 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14634 tp->phy_id = hw_phy_id;
14635 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14636 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14638 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14640 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14641 /* Do nothing, phy ID already set up in
14642 * tg3_get_eeprom_hw_cfg().
14645 struct subsys_tbl_ent *p;
14647 /* No eeprom signature? Try the hardcoded
14648 * subsys device table.
14650 p = tg3_lookup_by_subsys(tp);
14652 tp->phy_id = p->phy_id;
14653 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14654 /* For now we saw the IDs 0xbc050cd0,
14655 * 0xbc050f80 and 0xbc050c30 on devices
14656 * connected to an BCM4785 and there are
14657 * probably more. Just assume that the phy is
14658 * supported when it is connected to a SSB core
14665 tp->phy_id == TG3_PHY_ID_BCM8002)
14666 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14670 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14671 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14672 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14673 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14674 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14675 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14676 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14677 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14678 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14679 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14681 tg3_phy_init_link_config(tp);
14683 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14684 !tg3_flag(tp, ENABLE_APE) &&
14685 !tg3_flag(tp, ENABLE_ASF)) {
14688 tg3_readphy(tp, MII_BMSR, &bmsr);
14689 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14690 (bmsr & BMSR_LSTATUS))
14691 goto skip_phy_reset;
14693 err = tg3_phy_reset(tp);
14697 tg3_phy_set_wirespeed(tp);
14699 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14700 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14701 tp->link_config.flowctrl);
14703 tg3_writephy(tp, MII_BMCR,
14704 BMCR_ANENABLE | BMCR_ANRESTART);
14709 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14710 err = tg3_init_5401phy_dsp(tp);
14714 err = tg3_init_5401phy_dsp(tp);
14720 static void tg3_read_vpd(struct tg3 *tp)
14723 unsigned int block_end, rosize, len;
14727 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14731 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14733 goto out_not_found;
14735 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14736 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14737 i += PCI_VPD_LRDT_TAG_SIZE;
14739 if (block_end > vpdlen)
14740 goto out_not_found;
14742 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14743 PCI_VPD_RO_KEYWORD_MFR_ID);
14745 len = pci_vpd_info_field_size(&vpd_data[j]);
14747 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14748 if (j + len > block_end || len != 4 ||
14749 memcmp(&vpd_data[j], "1028", 4))
14752 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14753 PCI_VPD_RO_KEYWORD_VENDOR0);
14757 len = pci_vpd_info_field_size(&vpd_data[j]);
14759 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14760 if (j + len > block_end)
14763 if (len >= sizeof(tp->fw_ver))
14764 len = sizeof(tp->fw_ver) - 1;
14765 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14766 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14771 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14772 PCI_VPD_RO_KEYWORD_PARTNO);
14774 goto out_not_found;
14776 len = pci_vpd_info_field_size(&vpd_data[i]);
14778 i += PCI_VPD_INFO_FLD_HDR_SIZE;
14779 if (len > TG3_BPN_SIZE ||
14780 (len + i) > vpdlen)
14781 goto out_not_found;
14783 memcpy(tp->board_part_number, &vpd_data[i], len);
14787 if (tp->board_part_number[0])
14791 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14792 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14793 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14794 strcpy(tp->board_part_number, "BCM5717");
14795 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14796 strcpy(tp->board_part_number, "BCM5718");
14799 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14800 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14801 strcpy(tp->board_part_number, "BCM57780");
14802 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14803 strcpy(tp->board_part_number, "BCM57760");
14804 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14805 strcpy(tp->board_part_number, "BCM57790");
14806 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14807 strcpy(tp->board_part_number, "BCM57788");
14810 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14811 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14812 strcpy(tp->board_part_number, "BCM57761");
14813 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14814 strcpy(tp->board_part_number, "BCM57765");
14815 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14816 strcpy(tp->board_part_number, "BCM57781");
14817 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14818 strcpy(tp->board_part_number, "BCM57785");
14819 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14820 strcpy(tp->board_part_number, "BCM57791");
14821 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14822 strcpy(tp->board_part_number, "BCM57795");
14825 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14826 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14827 strcpy(tp->board_part_number, "BCM57762");
14828 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14829 strcpy(tp->board_part_number, "BCM57766");
14830 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14831 strcpy(tp->board_part_number, "BCM57782");
14832 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14833 strcpy(tp->board_part_number, "BCM57786");
14836 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14837 strcpy(tp->board_part_number, "BCM95906");
14840 strcpy(tp->board_part_number, "none");
14844 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14848 if (tg3_nvram_read(tp, offset, &val) ||
14849 (val & 0xfc000000) != 0x0c000000 ||
14850 tg3_nvram_read(tp, offset + 4, &val) ||
14857 static void tg3_read_bc_ver(struct tg3 *tp)
14859 u32 val, offset, start, ver_offset;
14861 bool newver = false;
14863 if (tg3_nvram_read(tp, 0xc, &offset) ||
14864 tg3_nvram_read(tp, 0x4, &start))
14867 offset = tg3_nvram_logical_addr(tp, offset);
14869 if (tg3_nvram_read(tp, offset, &val))
14872 if ((val & 0xfc000000) == 0x0c000000) {
14873 if (tg3_nvram_read(tp, offset + 4, &val))
14880 dst_off = strlen(tp->fw_ver);
14883 if (TG3_VER_SIZE - dst_off < 16 ||
14884 tg3_nvram_read(tp, offset + 8, &ver_offset))
14887 offset = offset + ver_offset - start;
14888 for (i = 0; i < 16; i += 4) {
14890 if (tg3_nvram_read_be32(tp, offset + i, &v))
14893 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14898 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14901 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14902 TG3_NVM_BCVER_MAJSFT;
14903 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14904 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14905 "v%d.%02d", major, minor);
14909 static void tg3_read_hwsb_ver(struct tg3 *tp)
14911 u32 val, major, minor;
14913 /* Use native endian representation */
14914 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14917 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14918 TG3_NVM_HWSB_CFG1_MAJSFT;
14919 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14920 TG3_NVM_HWSB_CFG1_MINSFT;
14922 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14925 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14927 u32 offset, major, minor, build;
14929 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14931 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14934 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14935 case TG3_EEPROM_SB_REVISION_0:
14936 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14938 case TG3_EEPROM_SB_REVISION_2:
14939 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14941 case TG3_EEPROM_SB_REVISION_3:
14942 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14944 case TG3_EEPROM_SB_REVISION_4:
14945 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14947 case TG3_EEPROM_SB_REVISION_5:
14948 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14950 case TG3_EEPROM_SB_REVISION_6:
14951 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14957 if (tg3_nvram_read(tp, offset, &val))
14960 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14961 TG3_EEPROM_SB_EDH_BLD_SHFT;
14962 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14963 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14964 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
14966 if (minor > 99 || build > 26)
14969 offset = strlen(tp->fw_ver);
14970 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14971 " v%d.%02d", major, minor);
14974 offset = strlen(tp->fw_ver);
14975 if (offset < TG3_VER_SIZE - 1)
14976 tp->fw_ver[offset] = 'a' + build - 1;
14980 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14982 u32 val, offset, start;
14985 for (offset = TG3_NVM_DIR_START;
14986 offset < TG3_NVM_DIR_END;
14987 offset += TG3_NVM_DIRENT_SIZE) {
14988 if (tg3_nvram_read(tp, offset, &val))
14991 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14995 if (offset == TG3_NVM_DIR_END)
14998 if (!tg3_flag(tp, 5705_PLUS))
14999 start = 0x08000000;
15000 else if (tg3_nvram_read(tp, offset - 4, &start))
15003 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15004 !tg3_fw_img_is_valid(tp, offset) ||
15005 tg3_nvram_read(tp, offset + 8, &val))
15008 offset += val - start;
15010 vlen = strlen(tp->fw_ver);
15012 tp->fw_ver[vlen++] = ',';
15013 tp->fw_ver[vlen++] = ' ';
15015 for (i = 0; i < 4; i++) {
15017 if (tg3_nvram_read_be32(tp, offset, &v))
15020 offset += sizeof(v);
15022 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15023 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15027 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15032 static void tg3_probe_ncsi(struct tg3 *tp)
15036 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15037 if (apedata != APE_SEG_SIG_MAGIC)
15040 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15041 if (!(apedata & APE_FW_STATUS_READY))
15044 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15045 tg3_flag_set(tp, APE_HAS_NCSI);
15048 static void tg3_read_dash_ver(struct tg3 *tp)
15054 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15056 if (tg3_flag(tp, APE_HAS_NCSI))
15058 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15063 vlen = strlen(tp->fw_ver);
15065 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15067 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15068 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15069 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15070 (apedata & APE_FW_VERSION_BLDMSK));
15073 static void tg3_read_otp_ver(struct tg3 *tp)
15077 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15080 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15081 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15082 TG3_OTP_MAGIC0_VALID(val)) {
15083 u64 val64 = (u64) val << 32 | val2;
15087 for (i = 0; i < 7; i++) {
15088 if ((val64 & 0xff) == 0)
15090 ver = val64 & 0xff;
15093 vlen = strlen(tp->fw_ver);
15094 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15098 static void tg3_read_fw_ver(struct tg3 *tp)
15101 bool vpd_vers = false;
15103 if (tp->fw_ver[0] != 0)
15106 if (tg3_flag(tp, NO_NVRAM)) {
15107 strcat(tp->fw_ver, "sb");
15108 tg3_read_otp_ver(tp);
15112 if (tg3_nvram_read(tp, 0, &val))
15115 if (val == TG3_EEPROM_MAGIC)
15116 tg3_read_bc_ver(tp);
15117 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15118 tg3_read_sb_ver(tp, val);
15119 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15120 tg3_read_hwsb_ver(tp);
15122 if (tg3_flag(tp, ENABLE_ASF)) {
15123 if (tg3_flag(tp, ENABLE_APE)) {
15124 tg3_probe_ncsi(tp);
15126 tg3_read_dash_ver(tp);
15127 } else if (!vpd_vers) {
15128 tg3_read_mgmtfw_ver(tp);
15132 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15135 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15137 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15138 return TG3_RX_RET_MAX_SIZE_5717;
15139 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15140 return TG3_RX_RET_MAX_SIZE_5700;
15142 return TG3_RX_RET_MAX_SIZE_5705;
15145 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15146 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15147 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15148 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15152 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15154 struct pci_dev *peer;
15155 unsigned int func, devnr = tp->pdev->devfn & ~7;
15157 for (func = 0; func < 8; func++) {
15158 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15159 if (peer && peer != tp->pdev)
15163 /* 5704 can be configured in single-port mode, set peer to
15164 * tp->pdev in that case.
15172 * We don't need to keep the refcount elevated; there's no way
15173 * to remove one half of this device without removing the other
15180 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15182 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15183 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15186 /* All devices that use the alternate
15187 * ASIC REV location have a CPMU.
15189 tg3_flag_set(tp, CPMU_PRESENT);
15191 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15192 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15193 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15194 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15195 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15196 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15198 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15199 reg = TG3PCI_GEN2_PRODID_ASICREV;
15200 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15201 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15202 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15203 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15204 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15205 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15206 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15207 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15208 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15209 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15210 reg = TG3PCI_GEN15_PRODID_ASICREV;
15212 reg = TG3PCI_PRODID_ASICREV;
15214 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15217 /* Wrong chip ID in 5752 A0. This code can be removed later
15218 * as A0 is not in production.
15220 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15221 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15223 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15224 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15226 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15227 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15228 tg3_asic_rev(tp) == ASIC_REV_5720)
15229 tg3_flag_set(tp, 5717_PLUS);
15231 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15232 tg3_asic_rev(tp) == ASIC_REV_57766)
15233 tg3_flag_set(tp, 57765_CLASS);
15235 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15236 tg3_asic_rev(tp) == ASIC_REV_5762)
15237 tg3_flag_set(tp, 57765_PLUS);
15239 /* Intentionally exclude ASIC_REV_5906 */
15240 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15241 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15242 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15243 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15244 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15245 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15246 tg3_flag(tp, 57765_PLUS))
15247 tg3_flag_set(tp, 5755_PLUS);
15249 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15250 tg3_asic_rev(tp) == ASIC_REV_5714)
15251 tg3_flag_set(tp, 5780_CLASS);
15253 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15254 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15255 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15256 tg3_flag(tp, 5755_PLUS) ||
15257 tg3_flag(tp, 5780_CLASS))
15258 tg3_flag_set(tp, 5750_PLUS);
15260 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15261 tg3_flag(tp, 5750_PLUS))
15262 tg3_flag_set(tp, 5705_PLUS);
15265 static bool tg3_10_100_only_device(struct tg3 *tp,
15266 const struct pci_device_id *ent)
15268 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15270 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15271 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15272 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15275 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15276 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15277 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15287 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15290 u32 pci_state_reg, grc_misc_cfg;
15295 /* Force memory write invalidate off. If we leave it on,
15296 * then on 5700_BX chips we have to enable a workaround.
15297 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15298 * to match the cacheline size. The Broadcom driver have this
15299 * workaround but turns MWI off all the times so never uses
15300 * it. This seems to suggest that the workaround is insufficient.
15302 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15303 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15304 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15306 /* Important! -- Make sure register accesses are byteswapped
15307 * correctly. Also, for those chips that require it, make
15308 * sure that indirect register accesses are enabled before
15309 * the first operation.
15311 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15313 tp->misc_host_ctrl |= (misc_ctrl_reg &
15314 MISC_HOST_CTRL_CHIPREV);
15315 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15316 tp->misc_host_ctrl);
15318 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15320 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15321 * we need to disable memory and use config. cycles
15322 * only to access all registers. The 5702/03 chips
15323 * can mistakenly decode the special cycles from the
15324 * ICH chipsets as memory write cycles, causing corruption
15325 * of register and memory space. Only certain ICH bridges
15326 * will drive special cycles with non-zero data during the
15327 * address phase which can fall within the 5703's address
15328 * range. This is not an ICH bug as the PCI spec allows
15329 * non-zero address during special cycles. However, only
15330 * these ICH bridges are known to drive non-zero addresses
15331 * during special cycles.
15333 * Since special cycles do not cross PCI bridges, we only
15334 * enable this workaround if the 5703 is on the secondary
15335 * bus of these ICH bridges.
15337 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15338 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15339 static struct tg3_dev_id {
15343 } ich_chipsets[] = {
15344 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15346 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15348 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15350 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15354 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15355 struct pci_dev *bridge = NULL;
15357 while (pci_id->vendor != 0) {
15358 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15364 if (pci_id->rev != PCI_ANY_ID) {
15365 if (bridge->revision > pci_id->rev)
15368 if (bridge->subordinate &&
15369 (bridge->subordinate->number ==
15370 tp->pdev->bus->number)) {
15371 tg3_flag_set(tp, ICH_WORKAROUND);
15372 pci_dev_put(bridge);
15378 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15379 static struct tg3_dev_id {
15382 } bridge_chipsets[] = {
15383 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15384 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15387 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15388 struct pci_dev *bridge = NULL;
15390 while (pci_id->vendor != 0) {
15391 bridge = pci_get_device(pci_id->vendor,
15398 if (bridge->subordinate &&
15399 (bridge->subordinate->number <=
15400 tp->pdev->bus->number) &&
15401 (bridge->subordinate->busn_res.end >=
15402 tp->pdev->bus->number)) {
15403 tg3_flag_set(tp, 5701_DMA_BUG);
15404 pci_dev_put(bridge);
15410 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15411 * DMA addresses > 40-bit. This bridge may have other additional
15412 * 57xx devices behind it in some 4-port NIC designs for example.
15413 * Any tg3 device found behind the bridge will also need the 40-bit
15416 if (tg3_flag(tp, 5780_CLASS)) {
15417 tg3_flag_set(tp, 40BIT_DMA_BUG);
15418 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15420 struct pci_dev *bridge = NULL;
15423 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15424 PCI_DEVICE_ID_SERVERWORKS_EPB,
15426 if (bridge && bridge->subordinate &&
15427 (bridge->subordinate->number <=
15428 tp->pdev->bus->number) &&
15429 (bridge->subordinate->busn_res.end >=
15430 tp->pdev->bus->number)) {
15431 tg3_flag_set(tp, 40BIT_DMA_BUG);
15432 pci_dev_put(bridge);
15438 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15439 tg3_asic_rev(tp) == ASIC_REV_5714)
15440 tp->pdev_peer = tg3_find_peer(tp);
15442 /* Determine TSO capabilities */
15443 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15444 ; /* Do nothing. HW bug. */
15445 else if (tg3_flag(tp, 57765_PLUS))
15446 tg3_flag_set(tp, HW_TSO_3);
15447 else if (tg3_flag(tp, 5755_PLUS) ||
15448 tg3_asic_rev(tp) == ASIC_REV_5906)
15449 tg3_flag_set(tp, HW_TSO_2);
15450 else if (tg3_flag(tp, 5750_PLUS)) {
15451 tg3_flag_set(tp, HW_TSO_1);
15452 tg3_flag_set(tp, TSO_BUG);
15453 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15454 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15455 tg3_flag_clear(tp, TSO_BUG);
15456 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15457 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15458 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15459 tg3_flag_set(tp, FW_TSO);
15460 tg3_flag_set(tp, TSO_BUG);
15461 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15462 tp->fw_needed = FIRMWARE_TG3TSO5;
15464 tp->fw_needed = FIRMWARE_TG3TSO;
15467 /* Selectively allow TSO based on operating conditions */
15468 if (tg3_flag(tp, HW_TSO_1) ||
15469 tg3_flag(tp, HW_TSO_2) ||
15470 tg3_flag(tp, HW_TSO_3) ||
15471 tg3_flag(tp, FW_TSO)) {
15472 /* For firmware TSO, assume ASF is disabled.
15473 * We'll disable TSO later if we discover ASF
15474 * is enabled in tg3_get_eeprom_hw_cfg().
15476 tg3_flag_set(tp, TSO_CAPABLE);
15478 tg3_flag_clear(tp, TSO_CAPABLE);
15479 tg3_flag_clear(tp, TSO_BUG);
15480 tp->fw_needed = NULL;
15483 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15484 tp->fw_needed = FIRMWARE_TG3;
15486 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15487 tp->fw_needed = FIRMWARE_TG357766;
15491 if (tg3_flag(tp, 5750_PLUS)) {
15492 tg3_flag_set(tp, SUPPORT_MSI);
15493 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15494 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15495 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15496 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15497 tp->pdev_peer == tp->pdev))
15498 tg3_flag_clear(tp, SUPPORT_MSI);
15500 if (tg3_flag(tp, 5755_PLUS) ||
15501 tg3_asic_rev(tp) == ASIC_REV_5906) {
15502 tg3_flag_set(tp, 1SHOT_MSI);
15505 if (tg3_flag(tp, 57765_PLUS)) {
15506 tg3_flag_set(tp, SUPPORT_MSIX);
15507 tp->irq_max = TG3_IRQ_MAX_VECS;
15513 if (tp->irq_max > 1) {
15514 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15515 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15517 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15518 tg3_asic_rev(tp) == ASIC_REV_5720)
15519 tp->txq_max = tp->irq_max - 1;
15522 if (tg3_flag(tp, 5755_PLUS) ||
15523 tg3_asic_rev(tp) == ASIC_REV_5906)
15524 tg3_flag_set(tp, SHORT_DMA_BUG);
15526 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15527 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15529 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15530 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15531 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15532 tg3_asic_rev(tp) == ASIC_REV_5762)
15533 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15535 if (tg3_flag(tp, 57765_PLUS) &&
15536 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15537 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15539 if (!tg3_flag(tp, 5705_PLUS) ||
15540 tg3_flag(tp, 5780_CLASS) ||
15541 tg3_flag(tp, USE_JUMBO_BDFLAG))
15542 tg3_flag_set(tp, JUMBO_CAPABLE);
15544 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15547 if (pci_is_pcie(tp->pdev)) {
15550 tg3_flag_set(tp, PCI_EXPRESS);
15552 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15553 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15554 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15555 tg3_flag_clear(tp, HW_TSO_2);
15556 tg3_flag_clear(tp, TSO_CAPABLE);
15558 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15559 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15560 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15561 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15562 tg3_flag_set(tp, CLKREQ_BUG);
15563 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15564 tg3_flag_set(tp, L1PLLPD_EN);
15566 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15567 /* BCM5785 devices are effectively PCIe devices, and should
15568 * follow PCIe codepaths, but do not have a PCIe capabilities
15571 tg3_flag_set(tp, PCI_EXPRESS);
15572 } else if (!tg3_flag(tp, 5705_PLUS) ||
15573 tg3_flag(tp, 5780_CLASS)) {
15574 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15575 if (!tp->pcix_cap) {
15576 dev_err(&tp->pdev->dev,
15577 "Cannot find PCI-X capability, aborting\n");
15581 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15582 tg3_flag_set(tp, PCIX_MODE);
15585 /* If we have an AMD 762 or VIA K8T800 chipset, write
15586 * reordering to the mailbox registers done by the host
15587 * controller can cause major troubles. We read back from
15588 * every mailbox register write to force the writes to be
15589 * posted to the chip in order.
15591 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15592 !tg3_flag(tp, PCI_EXPRESS))
15593 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15595 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15596 &tp->pci_cacheline_sz);
15597 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15598 &tp->pci_lat_timer);
15599 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15600 tp->pci_lat_timer < 64) {
15601 tp->pci_lat_timer = 64;
15602 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15603 tp->pci_lat_timer);
15606 /* Important! -- It is critical that the PCI-X hw workaround
15607 * situation is decided before the first MMIO register access.
15609 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15610 /* 5700 BX chips need to have their TX producer index
15611 * mailboxes written twice to workaround a bug.
15613 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15615 /* If we are in PCI-X mode, enable register write workaround.
15617 * The workaround is to use indirect register accesses
15618 * for all chip writes not to mailbox registers.
15620 if (tg3_flag(tp, PCIX_MODE)) {
15623 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15625 /* The chip can have it's power management PCI config
15626 * space registers clobbered due to this bug.
15627 * So explicitly force the chip into D0 here.
15629 pci_read_config_dword(tp->pdev,
15630 tp->pm_cap + PCI_PM_CTRL,
15632 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15633 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15634 pci_write_config_dword(tp->pdev,
15635 tp->pm_cap + PCI_PM_CTRL,
15638 /* Also, force SERR#/PERR# in PCI command. */
15639 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15640 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15641 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15645 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15646 tg3_flag_set(tp, PCI_HIGH_SPEED);
15647 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15648 tg3_flag_set(tp, PCI_32BIT);
15650 /* Chip-specific fixup from Broadcom driver */
15651 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15652 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15653 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15654 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15657 /* Default fast path register access methods */
15658 tp->read32 = tg3_read32;
15659 tp->write32 = tg3_write32;
15660 tp->read32_mbox = tg3_read32;
15661 tp->write32_mbox = tg3_write32;
15662 tp->write32_tx_mbox = tg3_write32;
15663 tp->write32_rx_mbox = tg3_write32;
15665 /* Various workaround register access methods */
15666 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15667 tp->write32 = tg3_write_indirect_reg32;
15668 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15669 (tg3_flag(tp, PCI_EXPRESS) &&
15670 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15672 * Back to back register writes can cause problems on these
15673 * chips, the workaround is to read back all reg writes
15674 * except those to mailbox regs.
15676 * See tg3_write_indirect_reg32().
15678 tp->write32 = tg3_write_flush_reg32;
15681 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15682 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15683 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15684 tp->write32_rx_mbox = tg3_write_flush_reg32;
15687 if (tg3_flag(tp, ICH_WORKAROUND)) {
15688 tp->read32 = tg3_read_indirect_reg32;
15689 tp->write32 = tg3_write_indirect_reg32;
15690 tp->read32_mbox = tg3_read_indirect_mbox;
15691 tp->write32_mbox = tg3_write_indirect_mbox;
15692 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15693 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15698 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15699 pci_cmd &= ~PCI_COMMAND_MEMORY;
15700 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15702 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15703 tp->read32_mbox = tg3_read32_mbox_5906;
15704 tp->write32_mbox = tg3_write32_mbox_5906;
15705 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15706 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15709 if (tp->write32 == tg3_write_indirect_reg32 ||
15710 (tg3_flag(tp, PCIX_MODE) &&
15711 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15712 tg3_asic_rev(tp) == ASIC_REV_5701)))
15713 tg3_flag_set(tp, SRAM_USE_CONFIG);
15715 /* The memory arbiter has to be enabled in order for SRAM accesses
15716 * to succeed. Normally on powerup the tg3 chip firmware will make
15717 * sure it is enabled, but other entities such as system netboot
15718 * code might disable it.
15720 val = tr32(MEMARB_MODE);
15721 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15723 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15724 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15725 tg3_flag(tp, 5780_CLASS)) {
15726 if (tg3_flag(tp, PCIX_MODE)) {
15727 pci_read_config_dword(tp->pdev,
15728 tp->pcix_cap + PCI_X_STATUS,
15730 tp->pci_fn = val & 0x7;
15732 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15733 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15734 tg3_asic_rev(tp) == ASIC_REV_5720) {
15735 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15736 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15737 val = tr32(TG3_CPMU_STATUS);
15739 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15740 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15742 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15743 TG3_CPMU_STATUS_FSHFT_5719;
15746 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15747 tp->write32_tx_mbox = tg3_write_flush_reg32;
15748 tp->write32_rx_mbox = tg3_write_flush_reg32;
15751 /* Get eeprom hw config before calling tg3_set_power_state().
15752 * In particular, the TG3_FLAG_IS_NIC flag must be
15753 * determined before calling tg3_set_power_state() so that
15754 * we know whether or not to switch out of Vaux power.
15755 * When the flag is set, it means that GPIO1 is used for eeprom
15756 * write protect and also implies that it is a LOM where GPIOs
15757 * are not used to switch power.
15759 tg3_get_eeprom_hw_cfg(tp);
15761 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15762 tg3_flag_clear(tp, TSO_CAPABLE);
15763 tg3_flag_clear(tp, TSO_BUG);
15764 tp->fw_needed = NULL;
15767 if (tg3_flag(tp, ENABLE_APE)) {
15768 /* Allow reads and writes to the
15769 * APE register and memory space.
15771 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15772 PCISTATE_ALLOW_APE_SHMEM_WR |
15773 PCISTATE_ALLOW_APE_PSPACE_WR;
15774 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15777 tg3_ape_lock_init(tp);
15780 /* Set up tp->grc_local_ctrl before calling
15781 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15782 * will bring 5700's external PHY out of reset.
15783 * It is also used as eeprom write protect on LOMs.
15785 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15786 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15787 tg3_flag(tp, EEPROM_WRITE_PROT))
15788 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15789 GRC_LCLCTRL_GPIO_OUTPUT1);
15790 /* Unused GPIO3 must be driven as output on 5752 because there
15791 * are no pull-up resistors on unused GPIO pins.
15793 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15794 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15796 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15797 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15798 tg3_flag(tp, 57765_CLASS))
15799 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15801 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15802 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15803 /* Turn off the debug UART. */
15804 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15805 if (tg3_flag(tp, IS_NIC))
15806 /* Keep VMain power. */
15807 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15808 GRC_LCLCTRL_GPIO_OUTPUT0;
15811 if (tg3_asic_rev(tp) == ASIC_REV_5762)
15812 tp->grc_local_ctrl |=
15813 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15815 /* Switch out of Vaux if it is a NIC */
15816 tg3_pwrsrc_switch_to_vmain(tp);
15818 /* Derive initial jumbo mode from MTU assigned in
15819 * ether_setup() via the alloc_etherdev() call
15821 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15822 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15824 /* Determine WakeOnLan speed to use. */
15825 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15826 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15827 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15828 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15829 tg3_flag_clear(tp, WOL_SPEED_100MB);
15831 tg3_flag_set(tp, WOL_SPEED_100MB);
15834 if (tg3_asic_rev(tp) == ASIC_REV_5906)
15835 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15837 /* A few boards don't want Ethernet@WireSpeed phy feature */
15838 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15839 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15840 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15841 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15842 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15843 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15844 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15846 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15847 tg3_chip_rev(tp) == CHIPREV_5704_AX)
15848 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15849 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15850 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15852 if (tg3_flag(tp, 5705_PLUS) &&
15853 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15854 tg3_asic_rev(tp) != ASIC_REV_5785 &&
15855 tg3_asic_rev(tp) != ASIC_REV_57780 &&
15856 !tg3_flag(tp, 57765_PLUS)) {
15857 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15858 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15859 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15860 tg3_asic_rev(tp) == ASIC_REV_5761) {
15861 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15862 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15863 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15864 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15865 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15867 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15870 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15871 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15872 tp->phy_otp = tg3_read_otp_phycfg(tp);
15873 if (tp->phy_otp == 0)
15874 tp->phy_otp = TG3_OTP_DEFAULT;
15877 if (tg3_flag(tp, CPMU_PRESENT))
15878 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15880 tp->mi_mode = MAC_MI_MODE_BASE;
15882 tp->coalesce_mode = 0;
15883 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15884 tg3_chip_rev(tp) != CHIPREV_5700_BX)
15885 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15887 /* Set these bits to enable statistics workaround. */
15888 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15889 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15890 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15891 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15892 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15895 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15896 tg3_asic_rev(tp) == ASIC_REV_57780)
15897 tg3_flag_set(tp, USE_PHYLIB);
15899 err = tg3_mdio_init(tp);
15903 /* Initialize data/descriptor byte/word swapping. */
15904 val = tr32(GRC_MODE);
15905 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15906 tg3_asic_rev(tp) == ASIC_REV_5762)
15907 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15908 GRC_MODE_WORD_SWAP_B2HRX_DATA |
15909 GRC_MODE_B2HRX_ENABLE |
15910 GRC_MODE_HTX2B_ENABLE |
15911 GRC_MODE_HOST_STACKUP);
15913 val &= GRC_MODE_HOST_STACKUP;
15915 tw32(GRC_MODE, val | tp->grc_mode);
15917 tg3_switch_clocks(tp);
15919 /* Clear this out for sanity. */
15920 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15922 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15924 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15925 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15926 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15927 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15928 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15929 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15930 void __iomem *sram_base;
15932 /* Write some dummy words into the SRAM status block
15933 * area, see if it reads back correctly. If the return
15934 * value is bad, force enable the PCIX workaround.
15936 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15938 writel(0x00000000, sram_base);
15939 writel(0x00000000, sram_base + 4);
15940 writel(0xffffffff, sram_base + 4);
15941 if (readl(sram_base) != 0x00000000)
15942 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15947 tg3_nvram_init(tp);
15949 /* If the device has an NVRAM, no need to load patch firmware */
15950 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15951 !tg3_flag(tp, NO_NVRAM))
15952 tp->fw_needed = NULL;
15954 grc_misc_cfg = tr32(GRC_MISC_CFG);
15955 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15957 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15958 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15959 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15960 tg3_flag_set(tp, IS_5788);
15962 if (!tg3_flag(tp, IS_5788) &&
15963 tg3_asic_rev(tp) != ASIC_REV_5700)
15964 tg3_flag_set(tp, TAGGED_STATUS);
15965 if (tg3_flag(tp, TAGGED_STATUS)) {
15966 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15967 HOSTCC_MODE_CLRTICK_TXBD);
15969 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15970 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15971 tp->misc_host_ctrl);
15974 /* Preserve the APE MAC_MODE bits */
15975 if (tg3_flag(tp, ENABLE_APE))
15976 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15980 if (tg3_10_100_only_device(tp, ent))
15981 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15983 err = tg3_phy_probe(tp);
15985 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15986 /* ... but do not return immediately ... */
15991 tg3_read_fw_ver(tp);
15993 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15994 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15996 if (tg3_asic_rev(tp) == ASIC_REV_5700)
15997 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15999 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16002 /* 5700 {AX,BX} chips have a broken status block link
16003 * change bit implementation, so we must use the
16004 * status register in those cases.
16006 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16007 tg3_flag_set(tp, USE_LINKCHG_REG);
16009 tg3_flag_clear(tp, USE_LINKCHG_REG);
16011 /* The led_ctrl is set during tg3_phy_probe, here we might
16012 * have to force the link status polling mechanism based
16013 * upon subsystem IDs.
16015 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16016 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16017 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16018 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16019 tg3_flag_set(tp, USE_LINKCHG_REG);
16022 /* For all SERDES we poll the MAC status register. */
16023 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16024 tg3_flag_set(tp, POLL_SERDES);
16026 tg3_flag_clear(tp, POLL_SERDES);
16028 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16029 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16030 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16031 tg3_flag(tp, PCIX_MODE)) {
16032 tp->rx_offset = NET_SKB_PAD;
16033 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16034 tp->rx_copy_thresh = ~(u16)0;
16038 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16039 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16040 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16042 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16044 /* Increment the rx prod index on the rx std ring by at most
16045 * 8 for these chips to workaround hw errata.
16047 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16048 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16049 tg3_asic_rev(tp) == ASIC_REV_5755)
16050 tp->rx_std_max_post = 8;
16052 if (tg3_flag(tp, ASPM_WORKAROUND))
16053 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16054 PCIE_PWR_MGMT_L1_THRESH_MSK;
16059 #ifdef CONFIG_SPARC
16060 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16062 struct net_device *dev = tp->dev;
16063 struct pci_dev *pdev = tp->pdev;
16064 struct device_node *dp = pci_device_to_OF_node(pdev);
16065 const unsigned char *addr;
16068 addr = of_get_property(dp, "local-mac-address", &len);
16069 if (addr && len == 6) {
16070 memcpy(dev->dev_addr, addr, 6);
16076 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16078 struct net_device *dev = tp->dev;
16080 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16085 static int tg3_get_device_address(struct tg3 *tp)
16087 struct net_device *dev = tp->dev;
16088 u32 hi, lo, mac_offset;
16092 #ifdef CONFIG_SPARC
16093 if (!tg3_get_macaddr_sparc(tp))
16097 if (tg3_flag(tp, IS_SSB_CORE)) {
16098 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16099 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16104 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16105 tg3_flag(tp, 5780_CLASS)) {
16106 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16108 if (tg3_nvram_lock(tp))
16109 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16111 tg3_nvram_unlock(tp);
16112 } else if (tg3_flag(tp, 5717_PLUS)) {
16113 if (tp->pci_fn & 1)
16115 if (tp->pci_fn > 1)
16116 mac_offset += 0x18c;
16117 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16120 /* First try to get it from MAC address mailbox. */
16121 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16122 if ((hi >> 16) == 0x484b) {
16123 dev->dev_addr[0] = (hi >> 8) & 0xff;
16124 dev->dev_addr[1] = (hi >> 0) & 0xff;
16126 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16127 dev->dev_addr[2] = (lo >> 24) & 0xff;
16128 dev->dev_addr[3] = (lo >> 16) & 0xff;
16129 dev->dev_addr[4] = (lo >> 8) & 0xff;
16130 dev->dev_addr[5] = (lo >> 0) & 0xff;
16132 /* Some old bootcode may report a 0 MAC address in SRAM */
16133 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16136 /* Next, try NVRAM. */
16137 if (!tg3_flag(tp, NO_NVRAM) &&
16138 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16139 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16140 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16141 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16143 /* Finally just fetch it out of the MAC control regs. */
16145 hi = tr32(MAC_ADDR_0_HIGH);
16146 lo = tr32(MAC_ADDR_0_LOW);
16148 dev->dev_addr[5] = lo & 0xff;
16149 dev->dev_addr[4] = (lo >> 8) & 0xff;
16150 dev->dev_addr[3] = (lo >> 16) & 0xff;
16151 dev->dev_addr[2] = (lo >> 24) & 0xff;
16152 dev->dev_addr[1] = hi & 0xff;
16153 dev->dev_addr[0] = (hi >> 8) & 0xff;
16157 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16158 #ifdef CONFIG_SPARC
16159 if (!tg3_get_default_macaddr_sparc(tp))
16167 #define BOUNDARY_SINGLE_CACHELINE 1
16168 #define BOUNDARY_MULTI_CACHELINE 2
16170 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16172 int cacheline_size;
16176 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16178 cacheline_size = 1024;
16180 cacheline_size = (int) byte * 4;
16182 /* On 5703 and later chips, the boundary bits have no
16185 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16186 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16187 !tg3_flag(tp, PCI_EXPRESS))
16190 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16191 goal = BOUNDARY_MULTI_CACHELINE;
16193 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16194 goal = BOUNDARY_SINGLE_CACHELINE;
16200 if (tg3_flag(tp, 57765_PLUS)) {
16201 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16208 /* PCI controllers on most RISC systems tend to disconnect
16209 * when a device tries to burst across a cache-line boundary.
16210 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16212 * Unfortunately, for PCI-E there are only limited
16213 * write-side controls for this, and thus for reads
16214 * we will still get the disconnects. We'll also waste
16215 * these PCI cycles for both read and write for chips
16216 * other than 5700 and 5701 which do not implement the
16219 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16220 switch (cacheline_size) {
16225 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16226 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16227 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16229 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16230 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16235 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16236 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16240 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16241 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16244 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16245 switch (cacheline_size) {
16249 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16250 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16251 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16257 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16258 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16262 switch (cacheline_size) {
16264 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16265 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16266 DMA_RWCTRL_WRITE_BNDRY_16);
16271 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16272 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16273 DMA_RWCTRL_WRITE_BNDRY_32);
16278 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16279 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16280 DMA_RWCTRL_WRITE_BNDRY_64);
16285 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16286 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16287 DMA_RWCTRL_WRITE_BNDRY_128);
16292 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16293 DMA_RWCTRL_WRITE_BNDRY_256);
16296 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16297 DMA_RWCTRL_WRITE_BNDRY_512);
16301 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16302 DMA_RWCTRL_WRITE_BNDRY_1024);
16311 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16312 int size, int to_device)
16314 struct tg3_internal_buffer_desc test_desc;
16315 u32 sram_dma_descs;
16318 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16320 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16321 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16322 tw32(RDMAC_STATUS, 0);
16323 tw32(WDMAC_STATUS, 0);
16325 tw32(BUFMGR_MODE, 0);
16326 tw32(FTQ_RESET, 0);
16328 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16329 test_desc.addr_lo = buf_dma & 0xffffffff;
16330 test_desc.nic_mbuf = 0x00002100;
16331 test_desc.len = size;
16334 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16335 * the *second* time the tg3 driver was getting loaded after an
16338 * Broadcom tells me:
16339 * ...the DMA engine is connected to the GRC block and a DMA
16340 * reset may affect the GRC block in some unpredictable way...
16341 * The behavior of resets to individual blocks has not been tested.
16343 * Broadcom noted the GRC reset will also reset all sub-components.
16346 test_desc.cqid_sqid = (13 << 8) | 2;
16348 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16351 test_desc.cqid_sqid = (16 << 8) | 7;
16353 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16356 test_desc.flags = 0x00000005;
16358 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16361 val = *(((u32 *)&test_desc) + i);
16362 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16363 sram_dma_descs + (i * sizeof(u32)));
16364 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16366 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16369 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16371 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16374 for (i = 0; i < 40; i++) {
16378 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16380 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16381 if ((val & 0xffff) == sram_dma_descs) {
16392 #define TEST_BUFFER_SIZE 0x2000
16394 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16395 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16399 static int tg3_test_dma(struct tg3 *tp)
16401 dma_addr_t buf_dma;
16402 u32 *buf, saved_dma_rwctrl;
16405 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16406 &buf_dma, GFP_KERNEL);
16412 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16413 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16415 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16417 if (tg3_flag(tp, 57765_PLUS))
16420 if (tg3_flag(tp, PCI_EXPRESS)) {
16421 /* DMA read watermark not used on PCIE */
16422 tp->dma_rwctrl |= 0x00180000;
16423 } else if (!tg3_flag(tp, PCIX_MODE)) {
16424 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16425 tg3_asic_rev(tp) == ASIC_REV_5750)
16426 tp->dma_rwctrl |= 0x003f0000;
16428 tp->dma_rwctrl |= 0x003f000f;
16430 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16431 tg3_asic_rev(tp) == ASIC_REV_5704) {
16432 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16433 u32 read_water = 0x7;
16435 /* If the 5704 is behind the EPB bridge, we can
16436 * do the less restrictive ONE_DMA workaround for
16437 * better performance.
16439 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16440 tg3_asic_rev(tp) == ASIC_REV_5704)
16441 tp->dma_rwctrl |= 0x8000;
16442 else if (ccval == 0x6 || ccval == 0x7)
16443 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16445 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16447 /* Set bit 23 to enable PCIX hw bug fix */
16449 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16450 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16452 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16453 /* 5780 always in PCIX mode */
16454 tp->dma_rwctrl |= 0x00144000;
16455 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16456 /* 5714 always in PCIX mode */
16457 tp->dma_rwctrl |= 0x00148000;
16459 tp->dma_rwctrl |= 0x001b000f;
16462 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16463 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16465 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16466 tg3_asic_rev(tp) == ASIC_REV_5704)
16467 tp->dma_rwctrl &= 0xfffffff0;
16469 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16470 tg3_asic_rev(tp) == ASIC_REV_5701) {
16471 /* Remove this if it causes problems for some boards. */
16472 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16474 /* On 5700/5701 chips, we need to set this bit.
16475 * Otherwise the chip will issue cacheline transactions
16476 * to streamable DMA memory with not all the byte
16477 * enables turned on. This is an error on several
16478 * RISC PCI controllers, in particular sparc64.
16480 * On 5703/5704 chips, this bit has been reassigned
16481 * a different meaning. In particular, it is used
16482 * on those chips to enable a PCI-X workaround.
16484 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16487 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16490 /* Unneeded, already done by tg3_get_invariants. */
16491 tg3_switch_clocks(tp);
16494 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16495 tg3_asic_rev(tp) != ASIC_REV_5701)
16498 /* It is best to perform DMA test with maximum write burst size
16499 * to expose the 5700/5701 write DMA bug.
16501 saved_dma_rwctrl = tp->dma_rwctrl;
16502 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16503 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16508 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16511 /* Send the buffer to the chip. */
16512 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16514 dev_err(&tp->pdev->dev,
16515 "%s: Buffer write failed. err = %d\n",
16521 /* validate data reached card RAM correctly. */
16522 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16524 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16525 if (le32_to_cpu(val) != p[i]) {
16526 dev_err(&tp->pdev->dev,
16527 "%s: Buffer corrupted on device! "
16528 "(%d != %d)\n", __func__, val, i);
16529 /* ret = -ENODEV here? */
16534 /* Now read it back. */
16535 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16537 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16538 "err = %d\n", __func__, ret);
16543 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16547 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16548 DMA_RWCTRL_WRITE_BNDRY_16) {
16549 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16550 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16551 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16554 dev_err(&tp->pdev->dev,
16555 "%s: Buffer corrupted on read back! "
16556 "(%d != %d)\n", __func__, p[i], i);
16562 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16568 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16569 DMA_RWCTRL_WRITE_BNDRY_16) {
16570 /* DMA test passed without adjusting DMA boundary,
16571 * now look for chipsets that are known to expose the
16572 * DMA bug without failing the test.
16574 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16575 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16576 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16578 /* Safe to use the calculated DMA boundary. */
16579 tp->dma_rwctrl = saved_dma_rwctrl;
16582 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16586 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16591 static void tg3_init_bufmgr_config(struct tg3 *tp)
16593 if (tg3_flag(tp, 57765_PLUS)) {
16594 tp->bufmgr_config.mbuf_read_dma_low_water =
16595 DEFAULT_MB_RDMA_LOW_WATER_5705;
16596 tp->bufmgr_config.mbuf_mac_rx_low_water =
16597 DEFAULT_MB_MACRX_LOW_WATER_57765;
16598 tp->bufmgr_config.mbuf_high_water =
16599 DEFAULT_MB_HIGH_WATER_57765;
16601 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16602 DEFAULT_MB_RDMA_LOW_WATER_5705;
16603 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16604 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16605 tp->bufmgr_config.mbuf_high_water_jumbo =
16606 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16607 } else if (tg3_flag(tp, 5705_PLUS)) {
16608 tp->bufmgr_config.mbuf_read_dma_low_water =
16609 DEFAULT_MB_RDMA_LOW_WATER_5705;
16610 tp->bufmgr_config.mbuf_mac_rx_low_water =
16611 DEFAULT_MB_MACRX_LOW_WATER_5705;
16612 tp->bufmgr_config.mbuf_high_water =
16613 DEFAULT_MB_HIGH_WATER_5705;
16614 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16615 tp->bufmgr_config.mbuf_mac_rx_low_water =
16616 DEFAULT_MB_MACRX_LOW_WATER_5906;
16617 tp->bufmgr_config.mbuf_high_water =
16618 DEFAULT_MB_HIGH_WATER_5906;
16621 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16622 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16623 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16624 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16625 tp->bufmgr_config.mbuf_high_water_jumbo =
16626 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16628 tp->bufmgr_config.mbuf_read_dma_low_water =
16629 DEFAULT_MB_RDMA_LOW_WATER;
16630 tp->bufmgr_config.mbuf_mac_rx_low_water =
16631 DEFAULT_MB_MACRX_LOW_WATER;
16632 tp->bufmgr_config.mbuf_high_water =
16633 DEFAULT_MB_HIGH_WATER;
16635 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16636 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16637 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16638 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16639 tp->bufmgr_config.mbuf_high_water_jumbo =
16640 DEFAULT_MB_HIGH_WATER_JUMBO;
16643 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16644 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16647 static char *tg3_phy_string(struct tg3 *tp)
16649 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16650 case TG3_PHY_ID_BCM5400: return "5400";
16651 case TG3_PHY_ID_BCM5401: return "5401";
16652 case TG3_PHY_ID_BCM5411: return "5411";
16653 case TG3_PHY_ID_BCM5701: return "5701";
16654 case TG3_PHY_ID_BCM5703: return "5703";
16655 case TG3_PHY_ID_BCM5704: return "5704";
16656 case TG3_PHY_ID_BCM5705: return "5705";
16657 case TG3_PHY_ID_BCM5750: return "5750";
16658 case TG3_PHY_ID_BCM5752: return "5752";
16659 case TG3_PHY_ID_BCM5714: return "5714";
16660 case TG3_PHY_ID_BCM5780: return "5780";
16661 case TG3_PHY_ID_BCM5755: return "5755";
16662 case TG3_PHY_ID_BCM5787: return "5787";
16663 case TG3_PHY_ID_BCM5784: return "5784";
16664 case TG3_PHY_ID_BCM5756: return "5722/5756";
16665 case TG3_PHY_ID_BCM5906: return "5906";
16666 case TG3_PHY_ID_BCM5761: return "5761";
16667 case TG3_PHY_ID_BCM5718C: return "5718C";
16668 case TG3_PHY_ID_BCM5718S: return "5718S";
16669 case TG3_PHY_ID_BCM57765: return "57765";
16670 case TG3_PHY_ID_BCM5719C: return "5719C";
16671 case TG3_PHY_ID_BCM5720C: return "5720C";
16672 case TG3_PHY_ID_BCM5762: return "5762C";
16673 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16674 case 0: return "serdes";
16675 default: return "unknown";
16679 static char *tg3_bus_string(struct tg3 *tp, char *str)
16681 if (tg3_flag(tp, PCI_EXPRESS)) {
16682 strcpy(str, "PCI Express");
16684 } else if (tg3_flag(tp, PCIX_MODE)) {
16685 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16687 strcpy(str, "PCIX:");
16689 if ((clock_ctrl == 7) ||
16690 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16691 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16692 strcat(str, "133MHz");
16693 else if (clock_ctrl == 0)
16694 strcat(str, "33MHz");
16695 else if (clock_ctrl == 2)
16696 strcat(str, "50MHz");
16697 else if (clock_ctrl == 4)
16698 strcat(str, "66MHz");
16699 else if (clock_ctrl == 6)
16700 strcat(str, "100MHz");
16702 strcpy(str, "PCI:");
16703 if (tg3_flag(tp, PCI_HIGH_SPEED))
16704 strcat(str, "66MHz");
16706 strcat(str, "33MHz");
16708 if (tg3_flag(tp, PCI_32BIT))
16709 strcat(str, ":32-bit");
16711 strcat(str, ":64-bit");
16715 static void tg3_init_coal(struct tg3 *tp)
16717 struct ethtool_coalesce *ec = &tp->coal;
16719 memset(ec, 0, sizeof(*ec));
16720 ec->cmd = ETHTOOL_GCOALESCE;
16721 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16722 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16723 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16724 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16725 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16726 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16727 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16728 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16729 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16731 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16732 HOSTCC_MODE_CLRTICK_TXBD)) {
16733 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16734 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16735 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16736 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16739 if (tg3_flag(tp, 5705_PLUS)) {
16740 ec->rx_coalesce_usecs_irq = 0;
16741 ec->tx_coalesce_usecs_irq = 0;
16742 ec->stats_block_coalesce_usecs = 0;
16746 static int tg3_init_one(struct pci_dev *pdev,
16747 const struct pci_device_id *ent)
16749 struct net_device *dev;
16751 int i, err, pm_cap;
16752 u32 sndmbx, rcvmbx, intmbx;
16754 u64 dma_mask, persist_dma_mask;
16755 netdev_features_t features = 0;
16757 printk_once(KERN_INFO "%s\n", version);
16759 err = pci_enable_device(pdev);
16761 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16765 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16767 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16768 goto err_out_disable_pdev;
16771 pci_set_master(pdev);
16773 /* Find power-management capability. */
16774 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16776 dev_err(&pdev->dev,
16777 "Cannot find Power Management capability, aborting\n");
16779 goto err_out_free_res;
16782 err = pci_set_power_state(pdev, PCI_D0);
16784 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16785 goto err_out_free_res;
16788 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16791 goto err_out_power_down;
16794 SET_NETDEV_DEV(dev, &pdev->dev);
16796 tp = netdev_priv(dev);
16799 tp->pm_cap = pm_cap;
16800 tp->rx_mode = TG3_DEF_RX_MODE;
16801 tp->tx_mode = TG3_DEF_TX_MODE;
16805 tp->msg_enable = tg3_debug;
16807 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16809 if (pdev_is_ssb_gige_core(pdev)) {
16810 tg3_flag_set(tp, IS_SSB_CORE);
16811 if (ssb_gige_must_flush_posted_writes(pdev))
16812 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16813 if (ssb_gige_one_dma_at_once(pdev))
16814 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16815 if (ssb_gige_have_roboswitch(pdev))
16816 tg3_flag_set(tp, ROBOSWITCH);
16817 if (ssb_gige_is_rgmii(pdev))
16818 tg3_flag_set(tp, RGMII_MODE);
16821 /* The word/byte swap controls here control register access byte
16822 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16825 tp->misc_host_ctrl =
16826 MISC_HOST_CTRL_MASK_PCI_INT |
16827 MISC_HOST_CTRL_WORD_SWAP |
16828 MISC_HOST_CTRL_INDIR_ACCESS |
16829 MISC_HOST_CTRL_PCISTATE_RW;
16831 /* The NONFRM (non-frame) byte/word swap controls take effect
16832 * on descriptor entries, anything which isn't packet data.
16834 * The StrongARM chips on the board (one for tx, one for rx)
16835 * are running in big-endian mode.
16837 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16838 GRC_MODE_WSWAP_NONFRM_DATA);
16839 #ifdef __BIG_ENDIAN
16840 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16842 spin_lock_init(&tp->lock);
16843 spin_lock_init(&tp->indirect_lock);
16844 INIT_WORK(&tp->reset_task, tg3_reset_task);
16846 tp->regs = pci_ioremap_bar(pdev, BAR_0);
16848 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16850 goto err_out_free_dev;
16853 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16854 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16855 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16856 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16857 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16858 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16859 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16860 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16861 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16862 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16863 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16864 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16865 tg3_flag_set(tp, ENABLE_APE);
16866 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16867 if (!tp->aperegs) {
16868 dev_err(&pdev->dev,
16869 "Cannot map APE registers, aborting\n");
16871 goto err_out_iounmap;
16875 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16876 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16878 dev->ethtool_ops = &tg3_ethtool_ops;
16879 dev->watchdog_timeo = TG3_TX_TIMEOUT;
16880 dev->netdev_ops = &tg3_netdev_ops;
16881 dev->irq = pdev->irq;
16883 err = tg3_get_invariants(tp, ent);
16885 dev_err(&pdev->dev,
16886 "Problem fetching invariants of chip, aborting\n");
16887 goto err_out_apeunmap;
16890 /* The EPB bridge inside 5714, 5715, and 5780 and any
16891 * device behind the EPB cannot support DMA addresses > 40-bit.
16892 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16893 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16894 * do DMA address check in tg3_start_xmit().
16896 if (tg3_flag(tp, IS_5788))
16897 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16898 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16899 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16900 #ifdef CONFIG_HIGHMEM
16901 dma_mask = DMA_BIT_MASK(64);
16904 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16906 /* Configure DMA attributes. */
16907 if (dma_mask > DMA_BIT_MASK(32)) {
16908 err = pci_set_dma_mask(pdev, dma_mask);
16910 features |= NETIF_F_HIGHDMA;
16911 err = pci_set_consistent_dma_mask(pdev,
16914 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16915 "DMA for consistent allocations\n");
16916 goto err_out_apeunmap;
16920 if (err || dma_mask == DMA_BIT_MASK(32)) {
16921 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16923 dev_err(&pdev->dev,
16924 "No usable DMA configuration, aborting\n");
16925 goto err_out_apeunmap;
16929 tg3_init_bufmgr_config(tp);
16931 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16933 /* 5700 B0 chips do not support checksumming correctly due
16934 * to hardware bugs.
16936 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16937 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16939 if (tg3_flag(tp, 5755_PLUS))
16940 features |= NETIF_F_IPV6_CSUM;
16943 /* TSO is on by default on chips that support hardware TSO.
16944 * Firmware TSO on older chips gives lower performance, so it
16945 * is off by default, but can be enabled using ethtool.
16947 if ((tg3_flag(tp, HW_TSO_1) ||
16948 tg3_flag(tp, HW_TSO_2) ||
16949 tg3_flag(tp, HW_TSO_3)) &&
16950 (features & NETIF_F_IP_CSUM))
16951 features |= NETIF_F_TSO;
16952 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16953 if (features & NETIF_F_IPV6_CSUM)
16954 features |= NETIF_F_TSO6;
16955 if (tg3_flag(tp, HW_TSO_3) ||
16956 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16957 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16958 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16959 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16960 tg3_asic_rev(tp) == ASIC_REV_57780)
16961 features |= NETIF_F_TSO_ECN;
16964 dev->features |= features;
16965 dev->vlan_features |= features;
16968 * Add loopback capability only for a subset of devices that support
16969 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16970 * loopback for the remaining devices.
16972 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16973 !tg3_flag(tp, CPMU_PRESENT))
16974 /* Add the loopback capability */
16975 features |= NETIF_F_LOOPBACK;
16977 dev->hw_features |= features;
16979 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16980 !tg3_flag(tp, TSO_CAPABLE) &&
16981 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16982 tg3_flag_set(tp, MAX_RXPEND_64);
16983 tp->rx_pending = 63;
16986 err = tg3_get_device_address(tp);
16988 dev_err(&pdev->dev,
16989 "Could not obtain valid ethernet address, aborting\n");
16990 goto err_out_apeunmap;
16994 * Reset chip in case UNDI or EFI driver did not shutdown
16995 * DMA self test will enable WDMAC and we'll see (spurious)
16996 * pending DMA on the PCI bus at that point.
16998 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16999 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17000 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17001 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17004 err = tg3_test_dma(tp);
17006 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17007 goto err_out_apeunmap;
17010 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17011 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17012 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17013 for (i = 0; i < tp->irq_max; i++) {
17014 struct tg3_napi *tnapi = &tp->napi[i];
17017 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17019 tnapi->int_mbox = intmbx;
17025 tnapi->consmbox = rcvmbx;
17026 tnapi->prodmbox = sndmbx;
17029 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17031 tnapi->coal_now = HOSTCC_MODE_NOW;
17033 if (!tg3_flag(tp, SUPPORT_MSIX))
17037 * If we support MSIX, we'll be using RSS. If we're using
17038 * RSS, the first vector only handles link interrupts and the
17039 * remaining vectors handle rx and tx interrupts. Reuse the
17040 * mailbox values for the next iteration. The values we setup
17041 * above are still useful for the single vectored mode.
17056 pci_set_drvdata(pdev, dev);
17058 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17059 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17060 tg3_asic_rev(tp) == ASIC_REV_5762)
17061 tg3_flag_set(tp, PTP_CAPABLE);
17063 if (tg3_flag(tp, 5717_PLUS)) {
17064 /* Resume a low-power mode */
17065 tg3_frob_aux_power(tp, false);
17068 tg3_timer_init(tp);
17070 tg3_carrier_off(tp);
17072 err = register_netdev(dev);
17074 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17075 goto err_out_apeunmap;
17078 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17079 tp->board_part_number,
17080 tg3_chip_rev_id(tp),
17081 tg3_bus_string(tp, str),
17084 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17085 struct phy_device *phydev;
17086 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17088 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17089 phydev->drv->name, dev_name(&phydev->dev));
17093 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17094 ethtype = "10/100Base-TX";
17095 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17096 ethtype = "1000Base-SX";
17098 ethtype = "10/100/1000Base-T";
17100 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17101 "(WireSpeed[%d], EEE[%d])\n",
17102 tg3_phy_string(tp), ethtype,
17103 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17104 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17107 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17108 (dev->features & NETIF_F_RXCSUM) != 0,
17109 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17110 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17111 tg3_flag(tp, ENABLE_ASF) != 0,
17112 tg3_flag(tp, TSO_CAPABLE) != 0);
17113 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17115 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17116 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17118 pci_save_state(pdev);
17124 iounmap(tp->aperegs);
17125 tp->aperegs = NULL;
17137 err_out_power_down:
17138 pci_set_power_state(pdev, PCI_D3hot);
17141 pci_release_regions(pdev);
17143 err_out_disable_pdev:
17144 pci_disable_device(pdev);
17145 pci_set_drvdata(pdev, NULL);
17149 static void tg3_remove_one(struct pci_dev *pdev)
17151 struct net_device *dev = pci_get_drvdata(pdev);
17154 struct tg3 *tp = netdev_priv(dev);
17156 release_firmware(tp->fw);
17158 tg3_reset_task_cancel(tp);
17160 if (tg3_flag(tp, USE_PHYLIB)) {
17165 unregister_netdev(dev);
17167 iounmap(tp->aperegs);
17168 tp->aperegs = NULL;
17175 pci_release_regions(pdev);
17176 pci_disable_device(pdev);
17177 pci_set_drvdata(pdev, NULL);
17181 #ifdef CONFIG_PM_SLEEP
17182 static int tg3_suspend(struct device *device)
17184 struct pci_dev *pdev = to_pci_dev(device);
17185 struct net_device *dev = pci_get_drvdata(pdev);
17186 struct tg3 *tp = netdev_priv(dev);
17189 if (!netif_running(dev))
17192 tg3_reset_task_cancel(tp);
17194 tg3_netif_stop(tp);
17196 tg3_timer_stop(tp);
17198 tg3_full_lock(tp, 1);
17199 tg3_disable_ints(tp);
17200 tg3_full_unlock(tp);
17202 netif_device_detach(dev);
17204 tg3_full_lock(tp, 0);
17205 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17206 tg3_flag_clear(tp, INIT_COMPLETE);
17207 tg3_full_unlock(tp);
17209 err = tg3_power_down_prepare(tp);
17213 tg3_full_lock(tp, 0);
17215 tg3_flag_set(tp, INIT_COMPLETE);
17216 err2 = tg3_restart_hw(tp, 1);
17220 tg3_timer_start(tp);
17222 netif_device_attach(dev);
17223 tg3_netif_start(tp);
17226 tg3_full_unlock(tp);
17235 static int tg3_resume(struct device *device)
17237 struct pci_dev *pdev = to_pci_dev(device);
17238 struct net_device *dev = pci_get_drvdata(pdev);
17239 struct tg3 *tp = netdev_priv(dev);
17242 if (!netif_running(dev))
17245 netif_device_attach(dev);
17247 tg3_full_lock(tp, 0);
17249 tg3_flag_set(tp, INIT_COMPLETE);
17250 err = tg3_restart_hw(tp, 1);
17254 tg3_timer_start(tp);
17256 tg3_netif_start(tp);
17259 tg3_full_unlock(tp);
17267 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17268 #define TG3_PM_OPS (&tg3_pm_ops)
17272 #define TG3_PM_OPS NULL
17274 #endif /* CONFIG_PM_SLEEP */
17277 * tg3_io_error_detected - called when PCI error is detected
17278 * @pdev: Pointer to PCI device
17279 * @state: The current pci connection state
17281 * This function is called after a PCI bus error affecting
17282 * this device has been detected.
17284 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17285 pci_channel_state_t state)
17287 struct net_device *netdev = pci_get_drvdata(pdev);
17288 struct tg3 *tp = netdev_priv(netdev);
17289 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17291 netdev_info(netdev, "PCI I/O error detected\n");
17295 if (!netif_running(netdev))
17300 tg3_netif_stop(tp);
17302 tg3_timer_stop(tp);
17304 /* Want to make sure that the reset task doesn't run */
17305 tg3_reset_task_cancel(tp);
17307 netif_device_detach(netdev);
17309 /* Clean up software state, even if MMIO is blocked */
17310 tg3_full_lock(tp, 0);
17311 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17312 tg3_full_unlock(tp);
17315 if (state == pci_channel_io_perm_failure)
17316 err = PCI_ERS_RESULT_DISCONNECT;
17318 pci_disable_device(pdev);
17326 * tg3_io_slot_reset - called after the pci bus has been reset.
17327 * @pdev: Pointer to PCI device
17329 * Restart the card from scratch, as if from a cold-boot.
17330 * At this point, the card has exprienced a hard reset,
17331 * followed by fixups by BIOS, and has its config space
17332 * set up identically to what it was at cold boot.
17334 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17336 struct net_device *netdev = pci_get_drvdata(pdev);
17337 struct tg3 *tp = netdev_priv(netdev);
17338 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17343 if (pci_enable_device(pdev)) {
17344 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17348 pci_set_master(pdev);
17349 pci_restore_state(pdev);
17350 pci_save_state(pdev);
17352 if (!netif_running(netdev)) {
17353 rc = PCI_ERS_RESULT_RECOVERED;
17357 err = tg3_power_up(tp);
17361 rc = PCI_ERS_RESULT_RECOVERED;
17370 * tg3_io_resume - called when traffic can start flowing again.
17371 * @pdev: Pointer to PCI device
17373 * This callback is called when the error recovery driver tells
17374 * us that its OK to resume normal operation.
17376 static void tg3_io_resume(struct pci_dev *pdev)
17378 struct net_device *netdev = pci_get_drvdata(pdev);
17379 struct tg3 *tp = netdev_priv(netdev);
17384 if (!netif_running(netdev))
17387 tg3_full_lock(tp, 0);
17388 tg3_flag_set(tp, INIT_COMPLETE);
17389 err = tg3_restart_hw(tp, 1);
17391 tg3_full_unlock(tp);
17392 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17396 netif_device_attach(netdev);
17398 tg3_timer_start(tp);
17400 tg3_netif_start(tp);
17402 tg3_full_unlock(tp);
17410 static const struct pci_error_handlers tg3_err_handler = {
17411 .error_detected = tg3_io_error_detected,
17412 .slot_reset = tg3_io_slot_reset,
17413 .resume = tg3_io_resume
17416 static struct pci_driver tg3_driver = {
17417 .name = DRV_MODULE_NAME,
17418 .id_table = tg3_pci_tbl,
17419 .probe = tg3_init_one,
17420 .remove = tg3_remove_one,
17421 .err_handler = &tg3_err_handler,
17422 .driver.pm = TG3_PM_OPS,
17425 static int __init tg3_init(void)
17427 return pci_register_driver(&tg3_driver);
17430 static void __exit tg3_cleanup(void)
17432 pci_unregister_driver(&tg3_driver);
17435 module_init(tg3_init);
17436 module_exit(tg3_cleanup);