2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 133
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Jul 29, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
747 if (pci_channel_offline(tp->pdev))
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
766 if (!tg3_flag(tp, ENABLE_APE))
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
776 bit = APE_LOCK_GRANT_DRIVER;
778 bit = 1 << tp->pci_fn;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
793 gnt = TG3_APE_PER_LOCK_GRANT;
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
816 return timeout_us ? 0 : -EBUSY;
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
823 for (i = 0; i < timeout_us / 10; i++) {
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
826 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832 return i == timeout_us / 10;
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
839 u32 i, bufoff, msgoff, maxlen, apedata;
841 if (!tg3_flag(tp, APE_HAS_NCSI))
844 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 if (apedata != APE_SEG_SIG_MAGIC)
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
852 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
854 msgoff = bufoff + 2 * sizeof(u32);
855 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
860 /* Cap xfer sizes to scratchpad limits. */
861 length = (len > maxlen) ? maxlen : len;
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err = tg3_ape_event_lock(tp, 1000);
873 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 APE_EVENT_STATUS_SCRTCHPD_READ |
875 APE_EVENT_STATUS_EVENT_PENDING;
876 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
878 tg3_ape_write32(tp, bufoff, base_off);
879 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
881 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
886 if (tg3_ape_wait_for_event(tp, 30000))
889 for (i = 0; length; i += 4, length -= 4) {
890 u32 val = tg3_ape_read32(tp, msgoff + i);
891 memcpy(data, &val, sizeof(u32));
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 if (apedata != APE_SEG_SIG_MAGIC)
908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 if (!(apedata & APE_FW_STATUS_READY))
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err = tg3_ape_event_lock(tp, 1000);
917 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 event | APE_EVENT_STATUS_EVENT_PENDING);
920 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
931 if (!tg3_flag(tp, ENABLE_APE))
935 case RESET_KIND_INIT:
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 APE_HOST_SEG_SIG_MAGIC);
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 APE_HOST_SEG_LEN_MAGIC);
940 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 APE_HOST_BEHAV_NO_PHYLOCK);
946 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 TG3_APE_HOST_DRVR_STATE_START);
949 event = APE_EVENT_STATUS_STATE_START;
951 case RESET_KIND_SHUTDOWN:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
959 if (device_may_wakeup(&tp->pdev->dev) &&
960 tg3_flag(tp, WOL_ENABLE)) {
961 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 TG3_APE_HOST_WOL_SPEED_AUTO);
963 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
965 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
969 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1638 if (pci_channel_offline(tp->pdev))
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1651 if (!tg3_readphy(tp, MII_BMCR, ®))
1653 if (!tg3_readphy(tp, MII_BMSR, ®))
1654 val |= (reg & 0xffff);
1658 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1660 if (!tg3_readphy(tp, MII_LPA, ®))
1661 val |= (reg & 0xffff);
1665 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1666 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1668 if (!tg3_readphy(tp, MII_STAT1000, ®))
1669 val |= (reg & 0xffff);
1673 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3 *tp)
1685 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1688 tg3_phy_gather_ump_data(tp, data);
1690 tg3_wait_for_event_ack(tp);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1699 tg3_generate_fw_event(tp);
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3 *tp)
1705 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1706 /* Wait for RX cpu to ACK the previous event. */
1707 tg3_wait_for_event_ack(tp);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1711 tg3_generate_fw_event(tp);
1713 /* Wait for RX cpu to ACK this event. */
1714 tg3_wait_for_event_ack(tp);
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1721 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1722 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1724 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1726 case RESET_KIND_INIT:
1727 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1731 case RESET_KIND_SHUTDOWN:
1732 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 case RESET_KIND_SUSPEND:
1737 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 case RESET_KIND_INIT:
1753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 DRV_STATE_START_DONE);
1757 case RESET_KIND_SHUTDOWN:
1758 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 DRV_STATE_UNLOAD_DONE);
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1771 if (tg3_flag(tp, ENABLE_ASF)) {
1773 case RESET_KIND_INIT:
1774 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 case RESET_KIND_SHUTDOWN:
1779 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 case RESET_KIND_SUSPEND:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794 static int tg3_poll_fw(struct tg3 *tp)
1799 if (tg3_flag(tp, NO_FWARE_REPORTED))
1802 if (tg3_flag(tp, IS_SSB_CORE)) {
1803 /* We don't use firmware. */
1807 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 /* Wait up to 20ms for init done. */
1809 for (i = 0; i < 200; i++) {
1810 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 if (pci_channel_offline(tp->pdev))
1820 /* Wait for firmware initialization to complete. */
1821 for (i = 0; i < 100000; i++) {
1822 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1823 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1825 if (pci_channel_offline(tp->pdev)) {
1826 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1827 tg3_flag_set(tp, NO_FWARE_REPORTED);
1828 netdev_info(tp->dev, "No firmware running\n");
1837 /* Chip might not be fitted with firmware. Some Sun onboard
1838 * parts are configured like that. So don't signal the timeout
1839 * of the above loop as an error, but do report the lack of
1840 * running firmware once.
1842 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1843 tg3_flag_set(tp, NO_FWARE_REPORTED);
1845 netdev_info(tp->dev, "No firmware running\n");
1848 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1849 /* The 57765 A0 needs a little more
1850 * time to do some important work.
1858 static void tg3_link_report(struct tg3 *tp)
1860 if (!netif_carrier_ok(tp->dev)) {
1861 netif_info(tp, link, tp->dev, "Link is down\n");
1862 tg3_ump_link_report(tp);
1863 } else if (netif_msg_link(tp)) {
1864 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1865 (tp->link_config.active_speed == SPEED_1000 ?
1867 (tp->link_config.active_speed == SPEED_100 ?
1869 (tp->link_config.active_duplex == DUPLEX_FULL ?
1872 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1873 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1875 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1878 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1879 netdev_info(tp->dev, "EEE is %s\n",
1880 tp->setlpicnt ? "enabled" : "disabled");
1882 tg3_ump_link_report(tp);
1885 tp->link_up = netif_carrier_ok(tp->dev);
1888 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1892 if (adv & ADVERTISE_PAUSE_CAP) {
1893 flowctrl |= FLOW_CTRL_RX;
1894 if (!(adv & ADVERTISE_PAUSE_ASYM))
1895 flowctrl |= FLOW_CTRL_TX;
1896 } else if (adv & ADVERTISE_PAUSE_ASYM)
1897 flowctrl |= FLOW_CTRL_TX;
1902 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1906 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1907 miireg = ADVERTISE_1000XPAUSE;
1908 else if (flow_ctrl & FLOW_CTRL_TX)
1909 miireg = ADVERTISE_1000XPSE_ASYM;
1910 else if (flow_ctrl & FLOW_CTRL_RX)
1911 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1918 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1922 if (adv & ADVERTISE_1000XPAUSE) {
1923 flowctrl |= FLOW_CTRL_RX;
1924 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1925 flowctrl |= FLOW_CTRL_TX;
1926 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1927 flowctrl |= FLOW_CTRL_TX;
1932 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1936 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1937 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1938 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1939 if (lcladv & ADVERTISE_1000XPAUSE)
1941 if (rmtadv & ADVERTISE_1000XPAUSE)
1948 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1952 u32 old_rx_mode = tp->rx_mode;
1953 u32 old_tx_mode = tp->tx_mode;
1955 if (tg3_flag(tp, USE_PHYLIB))
1956 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1958 autoneg = tp->link_config.autoneg;
1960 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1961 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1962 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1964 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1966 flowctrl = tp->link_config.flowctrl;
1968 tp->link_config.active_flowctrl = flowctrl;
1970 if (flowctrl & FLOW_CTRL_RX)
1971 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1973 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1975 if (old_rx_mode != tp->rx_mode)
1976 tw32_f(MAC_RX_MODE, tp->rx_mode);
1978 if (flowctrl & FLOW_CTRL_TX)
1979 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1981 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1983 if (old_tx_mode != tp->tx_mode)
1984 tw32_f(MAC_TX_MODE, tp->tx_mode);
1987 static void tg3_adjust_link(struct net_device *dev)
1989 u8 oldflowctrl, linkmesg = 0;
1990 u32 mac_mode, lcl_adv, rmt_adv;
1991 struct tg3 *tp = netdev_priv(dev);
1992 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1994 spin_lock_bh(&tp->lock);
1996 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1997 MAC_MODE_HALF_DUPLEX);
1999 oldflowctrl = tp->link_config.active_flowctrl;
2005 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2006 mac_mode |= MAC_MODE_PORT_MODE_MII;
2007 else if (phydev->speed == SPEED_1000 ||
2008 tg3_asic_rev(tp) != ASIC_REV_5785)
2009 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2011 mac_mode |= MAC_MODE_PORT_MODE_MII;
2013 if (phydev->duplex == DUPLEX_HALF)
2014 mac_mode |= MAC_MODE_HALF_DUPLEX;
2016 lcl_adv = mii_advertise_flowctrl(
2017 tp->link_config.flowctrl);
2020 rmt_adv = LPA_PAUSE_CAP;
2021 if (phydev->asym_pause)
2022 rmt_adv |= LPA_PAUSE_ASYM;
2025 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2027 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2029 if (mac_mode != tp->mac_mode) {
2030 tp->mac_mode = mac_mode;
2031 tw32_f(MAC_MODE, tp->mac_mode);
2035 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2036 if (phydev->speed == SPEED_10)
2038 MAC_MI_STAT_10MBPS_MODE |
2039 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2041 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2044 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2045 tw32(MAC_TX_LENGTHS,
2046 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2047 (6 << TX_LENGTHS_IPG_SHIFT) |
2048 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2050 tw32(MAC_TX_LENGTHS,
2051 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052 (6 << TX_LENGTHS_IPG_SHIFT) |
2053 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2055 if (phydev->link != tp->old_link ||
2056 phydev->speed != tp->link_config.active_speed ||
2057 phydev->duplex != tp->link_config.active_duplex ||
2058 oldflowctrl != tp->link_config.active_flowctrl)
2061 tp->old_link = phydev->link;
2062 tp->link_config.active_speed = phydev->speed;
2063 tp->link_config.active_duplex = phydev->duplex;
2065 spin_unlock_bh(&tp->lock);
2068 tg3_link_report(tp);
2071 static int tg3_phy_init(struct tg3 *tp)
2073 struct phy_device *phydev;
2075 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2078 /* Bring the PHY back to a known state. */
2081 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2083 /* Attach the MAC to the PHY. */
2084 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2085 tg3_adjust_link, phydev->interface);
2086 if (IS_ERR(phydev)) {
2087 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2088 return PTR_ERR(phydev);
2091 /* Mask with MAC supported features. */
2092 switch (phydev->interface) {
2093 case PHY_INTERFACE_MODE_GMII:
2094 case PHY_INTERFACE_MODE_RGMII:
2095 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2096 phydev->supported &= (PHY_GBIT_FEATURES |
2098 SUPPORTED_Asym_Pause);
2102 case PHY_INTERFACE_MODE_MII:
2103 phydev->supported &= (PHY_BASIC_FEATURES |
2105 SUPPORTED_Asym_Pause);
2108 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2112 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2114 phydev->advertising = phydev->supported;
2119 static void tg3_phy_start(struct tg3 *tp)
2121 struct phy_device *phydev;
2123 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2126 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2128 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2130 phydev->speed = tp->link_config.speed;
2131 phydev->duplex = tp->link_config.duplex;
2132 phydev->autoneg = tp->link_config.autoneg;
2133 phydev->advertising = tp->link_config.advertising;
2138 phy_start_aneg(phydev);
2141 static void tg3_phy_stop(struct tg3 *tp)
2143 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2146 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2149 static void tg3_phy_fini(struct tg3 *tp)
2151 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2153 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2157 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2162 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2165 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2166 /* Cannot do read-modify-write on 5401 */
2167 err = tg3_phy_auxctl_write(tp,
2168 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2169 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2174 err = tg3_phy_auxctl_read(tp,
2175 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2179 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2180 err = tg3_phy_auxctl_write(tp,
2181 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2187 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2191 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2194 tg3_writephy(tp, MII_TG3_FET_TEST,
2195 phytest | MII_TG3_FET_SHADOW_EN);
2196 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2198 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2200 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2201 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2203 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2207 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2211 if (!tg3_flag(tp, 5705_PLUS) ||
2212 (tg3_flag(tp, 5717_PLUS) &&
2213 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2216 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2217 tg3_phy_fet_toggle_apd(tp, enable);
2221 reg = MII_TG3_MISC_SHDW_WREN |
2222 MII_TG3_MISC_SHDW_SCR5_SEL |
2223 MII_TG3_MISC_SHDW_SCR5_LPED |
2224 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225 MII_TG3_MISC_SHDW_SCR5_SDTL |
2226 MII_TG3_MISC_SHDW_SCR5_C125OE;
2227 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2230 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2233 reg = MII_TG3_MISC_SHDW_WREN |
2234 MII_TG3_MISC_SHDW_APD_SEL |
2235 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2237 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2239 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2246 if (!tg3_flag(tp, 5705_PLUS) ||
2247 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2250 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2253 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2256 tg3_writephy(tp, MII_TG3_FET_TEST,
2257 ephy | MII_TG3_FET_SHADOW_EN);
2258 if (!tg3_readphy(tp, reg, &phy)) {
2260 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2262 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263 tg3_writephy(tp, reg, phy);
2265 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2270 ret = tg3_phy_auxctl_read(tp,
2271 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2274 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2276 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277 tg3_phy_auxctl_write(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2288 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2291 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2293 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2309 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2313 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2317 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2321 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2324 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2327 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2331 tg3_phy_toggle_auxctl_smdsp(tp, false);
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2337 struct ethtool_eee *dest = &tp->eee;
2339 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2345 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2348 /* Pull eee_active */
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351 dest->eee_active = 1;
2353 dest->eee_active = 0;
2355 /* Pull lp advertised settings */
2356 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2358 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2360 /* Pull advertised and eee_enabled settings */
2361 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2363 dest->eee_enabled = !!val;
2364 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2366 /* Pull tx_lpi_enabled */
2367 val = tr32(TG3_CPMU_EEE_MODE);
2368 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2370 /* Pull lpi timer value */
2371 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2378 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2383 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2385 tp->link_config.active_duplex == DUPLEX_FULL &&
2386 (tp->link_config.active_speed == SPEED_100 ||
2387 tp->link_config.active_speed == SPEED_1000)) {
2390 if (tp->link_config.active_speed == SPEED_1000)
2391 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2393 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2395 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2397 tg3_eee_pull_config(tp, NULL);
2398 if (tp->eee.eee_active)
2402 if (!tp->setlpicnt) {
2403 if (current_link_up &&
2404 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406 tg3_phy_toggle_auxctl_smdsp(tp, false);
2409 val = tr32(TG3_CPMU_EEE_MODE);
2410 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2418 if (tp->link_config.active_speed == SPEED_1000 &&
2419 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421 tg3_flag(tp, 57765_CLASS)) &&
2422 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423 val = MII_TG3_DSP_TAP26_ALNOKO |
2424 MII_TG3_DSP_TAP26_RMRXSTO;
2425 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426 tg3_phy_toggle_auxctl_smdsp(tp, false);
2429 val = tr32(TG3_CPMU_EEE_MODE);
2430 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2440 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441 if ((tmp32 & 0x1000) == 0)
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2453 static const u32 test_pat[4][6] = {
2454 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2461 for (chan = 0; chan < 4; chan++) {
2464 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465 (chan * 0x2000) | 0x0200);
2466 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2468 for (i = 0; i < 6; i++)
2469 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2472 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473 if (tg3_wait_macro_done(tp)) {
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 (chan * 0x2000) | 0x0200);
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481 if (tg3_wait_macro_done(tp)) {
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487 if (tg3_wait_macro_done(tp)) {
2492 for (i = 0; i < 6; i += 2) {
2495 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497 tg3_wait_macro_done(tp)) {
2503 if (low != test_pat[chan][i] ||
2504 high != test_pat[chan][i+1]) {
2505 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2521 for (chan = 0; chan < 4; chan++) {
2524 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525 (chan * 0x2000) | 0x0200);
2526 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527 for (i = 0; i < 6; i++)
2528 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530 if (tg3_wait_macro_done(tp))
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2539 u32 reg32, phy9_orig;
2540 int retries, do_phy_reset, err;
2546 err = tg3_bmcr_reset(tp);
2552 /* Disable transmitter and interrupt. */
2553 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2557 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2559 /* Set full-duplex, 1000 mbps. */
2560 tg3_writephy(tp, MII_BMCR,
2561 BMCR_FULLDPLX | BMCR_SPEED1000);
2563 /* Set to master mode. */
2564 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2567 tg3_writephy(tp, MII_CTRL1000,
2568 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2570 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2574 /* Block the PHY control access. */
2575 tg3_phydsp_write(tp, 0x8005, 0x0800);
2577 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2580 } while (--retries);
2582 err = tg3_phy_reset_chanpat(tp);
2586 tg3_phydsp_write(tp, 0x8005, 0x0000);
2588 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2591 tg3_phy_toggle_auxctl_smdsp(tp, false);
2593 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2595 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2597 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2604 static void tg3_carrier_off(struct tg3 *tp)
2606 netif_carrier_off(tp->dev);
2607 tp->link_up = false;
2610 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2612 if (tg3_flag(tp, ENABLE_ASF))
2613 netdev_warn(tp->dev,
2614 "Management side-band traffic will be interrupted during phy settings change\n");
2617 /* This will reset the tigon3 PHY if there is no valid
2618 * link unless the FORCE argument is non-zero.
2620 static int tg3_phy_reset(struct tg3 *tp)
2625 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2626 val = tr32(GRC_MISC_CFG);
2627 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2630 err = tg3_readphy(tp, MII_BMSR, &val);
2631 err |= tg3_readphy(tp, MII_BMSR, &val);
2635 if (netif_running(tp->dev) && tp->link_up) {
2636 netif_carrier_off(tp->dev);
2637 tg3_link_report(tp);
2640 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2641 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2642 tg3_asic_rev(tp) == ASIC_REV_5705) {
2643 err = tg3_phy_reset_5703_4_5(tp);
2650 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2651 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2652 cpmuctrl = tr32(TG3_CPMU_CTRL);
2653 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2655 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2658 err = tg3_bmcr_reset(tp);
2662 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2663 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2664 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2666 tw32(TG3_CPMU_CTRL, cpmuctrl);
2669 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2670 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2671 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2672 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2673 CPMU_LSPD_1000MB_MACCLK_12_5) {
2674 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2676 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2680 if (tg3_flag(tp, 5717_PLUS) &&
2681 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2684 tg3_phy_apply_otp(tp);
2686 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2687 tg3_phy_toggle_apd(tp, true);
2689 tg3_phy_toggle_apd(tp, false);
2692 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2693 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2694 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2695 tg3_phydsp_write(tp, 0x000a, 0x0323);
2696 tg3_phy_toggle_auxctl_smdsp(tp, false);
2699 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2700 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2701 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2704 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2705 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 tg3_phydsp_write(tp, 0x000a, 0x310b);
2707 tg3_phydsp_write(tp, 0x201f, 0x9506);
2708 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2709 tg3_phy_toggle_auxctl_smdsp(tp, false);
2711 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2712 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2714 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2715 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2716 tg3_writephy(tp, MII_TG3_TEST1,
2717 MII_TG3_TEST1_TRIM_EN | 0x4);
2719 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2721 tg3_phy_toggle_auxctl_smdsp(tp, false);
2725 /* Set Extended packet length bit (bit 14) on all chips that */
2726 /* support jumbo frames */
2727 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2728 /* Cannot do read-modify-write on 5401 */
2729 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2730 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2731 /* Set bit 14 with read-modify-write to preserve other bits */
2732 err = tg3_phy_auxctl_read(tp,
2733 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2735 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2736 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2739 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740 * jumbo frames transmission.
2742 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2744 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2745 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2748 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2749 /* adjust output voltage */
2750 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2753 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2754 tg3_phydsp_write(tp, 0xffb, 0x4000);
2756 tg3_phy_toggle_automdix(tp, true);
2757 tg3_phy_set_wirespeed(tp);
2761 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2763 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2764 TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769 (TG3_GPIO_MSG_DRVR_PRES << 12))
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775 (TG3_GPIO_MSG_NEED_VAUX << 12))
2777 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2781 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2782 tg3_asic_rev(tp) == ASIC_REV_5719)
2783 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2785 status = tr32(TG3_CPMU_DRV_STATUS);
2787 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2788 status &= ~(TG3_GPIO_MSG_MASK << shift);
2789 status |= (newstat << shift);
2791 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792 tg3_asic_rev(tp) == ASIC_REV_5719)
2793 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2795 tw32(TG3_CPMU_DRV_STATUS, status);
2797 return status >> TG3_APE_GPIO_MSG_SHIFT;
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2802 if (!tg3_flag(tp, IS_NIC))
2805 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5720) {
2808 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2811 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2813 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2816 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2818 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY);
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2829 if (!tg3_flag(tp, IS_NIC) ||
2830 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2831 tg3_asic_rev(tp) == ASIC_REV_5701)
2834 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2836 tw32_wait_f(GRC_LOCAL_CTRL,
2837 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2838 TG3_GRC_LCLCTL_PWRSW_DELAY);
2840 tw32_wait_f(GRC_LOCAL_CTRL,
2842 TG3_GRC_LCLCTL_PWRSW_DELAY);
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2851 if (!tg3_flag(tp, IS_NIC))
2854 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855 tg3_asic_rev(tp) == ASIC_REV_5701) {
2856 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2857 (GRC_LCLCTRL_GPIO_OE0 |
2858 GRC_LCLCTRL_GPIO_OE1 |
2859 GRC_LCLCTRL_GPIO_OE2 |
2860 GRC_LCLCTRL_GPIO_OUTPUT0 |
2861 GRC_LCLCTRL_GPIO_OUTPUT1),
2862 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2864 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2865 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2867 GRC_LCLCTRL_GPIO_OE1 |
2868 GRC_LCLCTRL_GPIO_OE2 |
2869 GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 GRC_LCLCTRL_GPIO_OUTPUT1 |
2872 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2875 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2876 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877 TG3_GRC_LCLCTL_PWRSW_DELAY);
2879 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2884 u32 grc_local_ctrl = 0;
2886 /* Workaround to prevent overdrawing Amps. */
2887 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2888 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2889 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2894 /* On 5753 and variants, GPIO2 cannot be used. */
2895 no_gpio2 = tp->nic_sram_data_cfg &
2896 NIC_SRAM_DATA_CFG_NO_GPIO2;
2898 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2899 GRC_LCLCTRL_GPIO_OE1 |
2900 GRC_LCLCTRL_GPIO_OE2 |
2901 GRC_LCLCTRL_GPIO_OUTPUT1 |
2902 GRC_LCLCTRL_GPIO_OUTPUT2;
2904 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2905 GRC_LCLCTRL_GPIO_OUTPUT2);
2907 tw32_wait_f(GRC_LOCAL_CTRL,
2908 tp->grc_local_ctrl | grc_local_ctrl,
2909 TG3_GRC_LCLCTL_PWRSW_DELAY);
2911 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2913 tw32_wait_f(GRC_LOCAL_CTRL,
2914 tp->grc_local_ctrl | grc_local_ctrl,
2915 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2919 tw32_wait_f(GRC_LOCAL_CTRL,
2920 tp->grc_local_ctrl | grc_local_ctrl,
2921 TG3_GRC_LCLCTL_PWRSW_DELAY);
2926 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2930 /* Serialize power state transitions */
2931 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2934 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2935 msg = TG3_GPIO_MSG_NEED_VAUX;
2937 msg = tg3_set_function_status(tp, msg);
2939 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2942 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2943 tg3_pwrsrc_switch_to_vaux(tp);
2945 tg3_pwrsrc_die_with_vmain(tp);
2948 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2951 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2953 bool need_vaux = false;
2955 /* The GPIOs do something completely different on 57765. */
2956 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2959 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2960 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2961 tg3_asic_rev(tp) == ASIC_REV_5720) {
2962 tg3_frob_aux_power_5717(tp, include_wol ?
2963 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2967 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2968 struct net_device *dev_peer;
2970 dev_peer = pci_get_drvdata(tp->pdev_peer);
2972 /* remove_one() may have been run on the peer. */
2974 struct tg3 *tp_peer = netdev_priv(dev_peer);
2976 if (tg3_flag(tp_peer, INIT_COMPLETE))
2979 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2980 tg3_flag(tp_peer, ENABLE_ASF))
2985 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2986 tg3_flag(tp, ENABLE_ASF))
2990 tg3_pwrsrc_switch_to_vaux(tp);
2992 tg3_pwrsrc_die_with_vmain(tp);
2995 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2997 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2999 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3000 if (speed != SPEED_10)
3002 } else if (speed == SPEED_10)
3008 static bool tg3_phy_power_bug(struct tg3 *tp)
3010 switch (tg3_asic_rev(tp)) {
3015 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 static bool tg3_phy_led_bug(struct tg3 *tp)
3035 switch (tg3_asic_rev(tp)) {
3037 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3046 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3050 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3053 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3054 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3055 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3056 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3059 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3060 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3061 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3066 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3068 val = tr32(GRC_MISC_CFG);
3069 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3072 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3074 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3077 tg3_writephy(tp, MII_ADVERTISE, 0);
3078 tg3_writephy(tp, MII_BMCR,
3079 BMCR_ANENABLE | BMCR_ANRESTART);
3081 tg3_writephy(tp, MII_TG3_FET_TEST,
3082 phytest | MII_TG3_FET_SHADOW_EN);
3083 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3084 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3086 MII_TG3_FET_SHDW_AUXMODE4,
3089 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3092 } else if (do_low_power) {
3093 if (!tg3_phy_led_bug(tp))
3094 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3095 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3097 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3098 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3099 MII_TG3_AUXCTL_PCTL_VREG_11V;
3100 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3103 /* The PHY should not be powered down on some chips because
3106 if (tg3_phy_power_bug(tp))
3109 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3110 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3111 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3112 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3113 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3114 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3117 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3120 /* tp->lock is held. */
3121 static int tg3_nvram_lock(struct tg3 *tp)
3123 if (tg3_flag(tp, NVRAM)) {
3126 if (tp->nvram_lock_cnt == 0) {
3127 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3128 for (i = 0; i < 8000; i++) {
3129 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3134 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3138 tp->nvram_lock_cnt++;
3143 /* tp->lock is held. */
3144 static void tg3_nvram_unlock(struct tg3 *tp)
3146 if (tg3_flag(tp, NVRAM)) {
3147 if (tp->nvram_lock_cnt > 0)
3148 tp->nvram_lock_cnt--;
3149 if (tp->nvram_lock_cnt == 0)
3150 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3154 /* tp->lock is held. */
3155 static void tg3_enable_nvram_access(struct tg3 *tp)
3157 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3158 u32 nvaccess = tr32(NVRAM_ACCESS);
3160 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3164 /* tp->lock is held. */
3165 static void tg3_disable_nvram_access(struct tg3 *tp)
3167 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 u32 nvaccess = tr32(NVRAM_ACCESS);
3170 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3174 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3175 u32 offset, u32 *val)
3180 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3183 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3184 EEPROM_ADDR_DEVID_MASK |
3186 tw32(GRC_EEPROM_ADDR,
3188 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3189 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3190 EEPROM_ADDR_ADDR_MASK) |
3191 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3193 for (i = 0; i < 1000; i++) {
3194 tmp = tr32(GRC_EEPROM_ADDR);
3196 if (tmp & EEPROM_ADDR_COMPLETE)
3200 if (!(tmp & EEPROM_ADDR_COMPLETE))
3203 tmp = tr32(GRC_EEPROM_DATA);
3206 * The data will always be opposite the native endian
3207 * format. Perform a blind byteswap to compensate.
3214 #define NVRAM_CMD_TIMEOUT 10000
3216 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3220 tw32(NVRAM_CMD, nvram_cmd);
3221 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3223 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3229 if (i == NVRAM_CMD_TIMEOUT)
3235 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3237 if (tg3_flag(tp, NVRAM) &&
3238 tg3_flag(tp, NVRAM_BUFFERED) &&
3239 tg3_flag(tp, FLASH) &&
3240 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3241 (tp->nvram_jedecnum == JEDEC_ATMEL))
3243 addr = ((addr / tp->nvram_pagesize) <<
3244 ATMEL_AT45DB0X1B_PAGE_POS) +
3245 (addr % tp->nvram_pagesize);
3250 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3252 if (tg3_flag(tp, NVRAM) &&
3253 tg3_flag(tp, NVRAM_BUFFERED) &&
3254 tg3_flag(tp, FLASH) &&
3255 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 (tp->nvram_jedecnum == JEDEC_ATMEL))
3258 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3259 tp->nvram_pagesize) +
3260 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3265 /* NOTE: Data read in from NVRAM is byteswapped according to
3266 * the byteswapping settings for all other register accesses.
3267 * tg3 devices are BE devices, so on a BE machine, the data
3268 * returned will be exactly as it is seen in NVRAM. On a LE
3269 * machine, the 32-bit value will be byteswapped.
3271 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3275 if (!tg3_flag(tp, NVRAM))
3276 return tg3_nvram_read_using_eeprom(tp, offset, val);
3278 offset = tg3_nvram_phys_addr(tp, offset);
3280 if (offset > NVRAM_ADDR_MSK)
3283 ret = tg3_nvram_lock(tp);
3287 tg3_enable_nvram_access(tp);
3289 tw32(NVRAM_ADDR, offset);
3290 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3291 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3294 *val = tr32(NVRAM_RDDATA);
3296 tg3_disable_nvram_access(tp);
3298 tg3_nvram_unlock(tp);
3303 /* Ensures NVRAM data is in bytestream format. */
3304 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3307 int res = tg3_nvram_read(tp, offset, &v);
3309 *val = cpu_to_be32(v);
3313 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3314 u32 offset, u32 len, u8 *buf)
3319 for (i = 0; i < len; i += 4) {
3325 memcpy(&data, buf + i, 4);
3328 * The SEEPROM interface expects the data to always be opposite
3329 * the native endian format. We accomplish this by reversing
3330 * all the operations that would have been performed on the
3331 * data from a call to tg3_nvram_read_be32().
3333 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3335 val = tr32(GRC_EEPROM_ADDR);
3336 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3338 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3340 tw32(GRC_EEPROM_ADDR, val |
3341 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3342 (addr & EEPROM_ADDR_ADDR_MASK) |
3346 for (j = 0; j < 1000; j++) {
3347 val = tr32(GRC_EEPROM_ADDR);
3349 if (val & EEPROM_ADDR_COMPLETE)
3353 if (!(val & EEPROM_ADDR_COMPLETE)) {
3362 /* offset and length are dword aligned */
3363 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3367 u32 pagesize = tp->nvram_pagesize;
3368 u32 pagemask = pagesize - 1;
3372 tmp = kmalloc(pagesize, GFP_KERNEL);
3378 u32 phy_addr, page_off, size;
3380 phy_addr = offset & ~pagemask;
3382 for (j = 0; j < pagesize; j += 4) {
3383 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3384 (__be32 *) (tmp + j));
3391 page_off = offset & pagemask;
3398 memcpy(tmp + page_off, buf, size);
3400 offset = offset + (pagesize - page_off);
3402 tg3_enable_nvram_access(tp);
3405 * Before we can erase the flash page, we need
3406 * to issue a special "write enable" command.
3408 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3410 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3413 /* Erase the target page */
3414 tw32(NVRAM_ADDR, phy_addr);
3416 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3417 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3422 /* Issue another write enable to start the write. */
3423 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3425 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428 for (j = 0; j < pagesize; j += 4) {
3431 data = *((__be32 *) (tmp + j));
3433 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3435 tw32(NVRAM_ADDR, phy_addr + j);
3437 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3441 nvram_cmd |= NVRAM_CMD_FIRST;
3442 else if (j == (pagesize - 4))
3443 nvram_cmd |= NVRAM_CMD_LAST;
3445 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3453 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3454 tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 /* offset and length are dword aligned */
3462 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3467 for (i = 0; i < len; i += 4, offset += 4) {
3468 u32 page_off, phy_addr, nvram_cmd;
3471 memcpy(&data, buf + i, 4);
3472 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3474 page_off = offset % tp->nvram_pagesize;
3476 phy_addr = tg3_nvram_phys_addr(tp, offset);
3478 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3480 if (page_off == 0 || i == 0)
3481 nvram_cmd |= NVRAM_CMD_FIRST;
3482 if (page_off == (tp->nvram_pagesize - 4))
3483 nvram_cmd |= NVRAM_CMD_LAST;
3486 nvram_cmd |= NVRAM_CMD_LAST;
3488 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3489 !tg3_flag(tp, FLASH) ||
3490 !tg3_flag(tp, 57765_PLUS))
3491 tw32(NVRAM_ADDR, phy_addr);
3493 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3494 !tg3_flag(tp, 5755_PLUS) &&
3495 (tp->nvram_jedecnum == JEDEC_ST) &&
3496 (nvram_cmd & NVRAM_CMD_FIRST)) {
3499 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3500 ret = tg3_nvram_exec_cmd(tp, cmd);
3504 if (!tg3_flag(tp, FLASH)) {
3505 /* We always do complete word writes to eeprom. */
3506 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3509 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3516 /* offset and length are dword aligned */
3517 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3521 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3522 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3523 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3527 if (!tg3_flag(tp, NVRAM)) {
3528 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3532 ret = tg3_nvram_lock(tp);
3536 tg3_enable_nvram_access(tp);
3537 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3538 tw32(NVRAM_WRITE1, 0x406);
3540 grc_mode = tr32(GRC_MODE);
3541 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3543 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3544 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3547 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3551 grc_mode = tr32(GRC_MODE);
3552 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3554 tg3_disable_nvram_access(tp);
3555 tg3_nvram_unlock(tp);
3558 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3559 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3566 #define RX_CPU_SCRATCH_BASE 0x30000
3567 #define RX_CPU_SCRATCH_SIZE 0x04000
3568 #define TX_CPU_SCRATCH_BASE 0x34000
3569 #define TX_CPU_SCRATCH_SIZE 0x04000
3571 /* tp->lock is held. */
3572 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3575 const int iters = 10000;
3577 for (i = 0; i < iters; i++) {
3578 tw32(cpu_base + CPU_STATE, 0xffffffff);
3579 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3580 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3582 if (pci_channel_offline(tp->pdev))
3586 return (i == iters) ? -EBUSY : 0;
3589 /* tp->lock is held. */
3590 static int tg3_rxcpu_pause(struct tg3 *tp)
3592 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3594 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3595 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3601 /* tp->lock is held. */
3602 static int tg3_txcpu_pause(struct tg3 *tp)
3604 return tg3_pause_cpu(tp, TX_CPU_BASE);
3607 /* tp->lock is held. */
3608 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3610 tw32(cpu_base + CPU_STATE, 0xffffffff);
3611 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3614 /* tp->lock is held. */
3615 static void tg3_rxcpu_resume(struct tg3 *tp)
3617 tg3_resume_cpu(tp, RX_CPU_BASE);
3620 /* tp->lock is held. */
3621 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3625 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3627 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3628 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3630 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3633 if (cpu_base == RX_CPU_BASE) {
3634 rc = tg3_rxcpu_pause(tp);
3637 * There is only an Rx CPU for the 5750 derivative in the
3640 if (tg3_flag(tp, IS_SSB_CORE))
3643 rc = tg3_txcpu_pause(tp);
3647 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3648 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3652 /* Clear firmware's nvram arbitration. */
3653 if (tg3_flag(tp, NVRAM))
3654 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3658 static int tg3_fw_data_len(struct tg3 *tp,
3659 const struct tg3_firmware_hdr *fw_hdr)
3663 /* Non fragmented firmware have one firmware header followed by a
3664 * contiguous chunk of data to be written. The length field in that
3665 * header is not the length of data to be written but the complete
3666 * length of the bss. The data length is determined based on
3667 * tp->fw->size minus headers.
3669 * Fragmented firmware have a main header followed by multiple
3670 * fragments. Each fragment is identical to non fragmented firmware
3671 * with a firmware header followed by a contiguous chunk of data. In
3672 * the main header, the length field is unused and set to 0xffffffff.
3673 * In each fragment header the length is the entire size of that
3674 * fragment i.e. fragment data + header length. Data length is
3675 * therefore length field in the header minus TG3_FW_HDR_LEN.
3677 if (tp->fw_len == 0xffffffff)
3678 fw_len = be32_to_cpu(fw_hdr->len);
3680 fw_len = tp->fw->size;
3682 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3685 /* tp->lock is held. */
3686 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3687 u32 cpu_scratch_base, int cpu_scratch_size,
3688 const struct tg3_firmware_hdr *fw_hdr)
3691 void (*write_op)(struct tg3 *, u32, u32);
3692 int total_len = tp->fw->size;
3694 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3696 "%s: Trying to load TX cpu firmware which is 5705\n",
3701 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3702 write_op = tg3_write_mem;
3704 write_op = tg3_write_indirect_reg32;
3706 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3707 /* It is possible that bootcode is still loading at this point.
3708 * Get the nvram lock first before halting the cpu.
3710 int lock_err = tg3_nvram_lock(tp);
3711 err = tg3_halt_cpu(tp, cpu_base);
3713 tg3_nvram_unlock(tp);
3717 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3718 write_op(tp, cpu_scratch_base + i, 0);
3719 tw32(cpu_base + CPU_STATE, 0xffffffff);
3720 tw32(cpu_base + CPU_MODE,
3721 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3723 /* Subtract additional main header for fragmented firmware and
3724 * advance to the first fragment
3726 total_len -= TG3_FW_HDR_LEN;
3731 u32 *fw_data = (u32 *)(fw_hdr + 1);
3732 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3733 write_op(tp, cpu_scratch_base +
3734 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3736 be32_to_cpu(fw_data[i]));
3738 total_len -= be32_to_cpu(fw_hdr->len);
3740 /* Advance to next fragment */
3741 fw_hdr = (struct tg3_firmware_hdr *)
3742 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3743 } while (total_len > 0);
3751 /* tp->lock is held. */
3752 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3755 const int iters = 5;
3757 tw32(cpu_base + CPU_STATE, 0xffffffff);
3758 tw32_f(cpu_base + CPU_PC, pc);
3760 for (i = 0; i < iters; i++) {
3761 if (tr32(cpu_base + CPU_PC) == pc)
3763 tw32(cpu_base + CPU_STATE, 0xffffffff);
3764 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3765 tw32_f(cpu_base + CPU_PC, pc);
3769 return (i == iters) ? -EBUSY : 0;
3772 /* tp->lock is held. */
3773 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3775 const struct tg3_firmware_hdr *fw_hdr;
3778 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3780 /* Firmware blob starts with version numbers, followed by
3781 start address and length. We are setting complete length.
3782 length = end_address_of_bss - start_address_of_text.
3783 Remainder is the blob to be loaded contiguously
3784 from start address. */
3786 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3787 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3792 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3793 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3798 /* Now startup only the RX cpu. */
3799 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3800 be32_to_cpu(fw_hdr->base_addr));
3802 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3803 "should be %08x\n", __func__,
3804 tr32(RX_CPU_BASE + CPU_PC),
3805 be32_to_cpu(fw_hdr->base_addr));
3809 tg3_rxcpu_resume(tp);
3814 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3816 const int iters = 1000;
3820 /* Wait for boot code to complete initialization and enter service
3821 * loop. It is then safe to download service patches
3823 for (i = 0; i < iters; i++) {
3824 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3831 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3835 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3837 netdev_warn(tp->dev,
3838 "Other patches exist. Not downloading EEE patch\n");
3845 /* tp->lock is held. */
3846 static void tg3_load_57766_firmware(struct tg3 *tp)
3848 struct tg3_firmware_hdr *fw_hdr;
3850 if (!tg3_flag(tp, NO_NVRAM))
3853 if (tg3_validate_rxcpu_state(tp))
3859 /* This firmware blob has a different format than older firmware
3860 * releases as given below. The main difference is we have fragmented
3861 * data to be written to non-contiguous locations.
3863 * In the beginning we have a firmware header identical to other
3864 * firmware which consists of version, base addr and length. The length
3865 * here is unused and set to 0xffffffff.
3867 * This is followed by a series of firmware fragments which are
3868 * individually identical to previous firmware. i.e. they have the
3869 * firmware header and followed by data for that fragment. The version
3870 * field of the individual fragment header is unused.
3873 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3874 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3877 if (tg3_rxcpu_pause(tp))
3880 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3881 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3883 tg3_rxcpu_resume(tp);
3886 /* tp->lock is held. */
3887 static int tg3_load_tso_firmware(struct tg3 *tp)
3889 const struct tg3_firmware_hdr *fw_hdr;
3890 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3893 if (!tg3_flag(tp, FW_TSO))
3896 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3898 /* Firmware blob starts with version numbers, followed by
3899 start address and length. We are setting complete length.
3900 length = end_address_of_bss - start_address_of_text.
3901 Remainder is the blob to be loaded contiguously
3902 from start address. */
3904 cpu_scratch_size = tp->fw_len;
3906 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3907 cpu_base = RX_CPU_BASE;
3908 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3910 cpu_base = TX_CPU_BASE;
3911 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3912 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3915 err = tg3_load_firmware_cpu(tp, cpu_base,
3916 cpu_scratch_base, cpu_scratch_size,
3921 /* Now startup the cpu. */
3922 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3923 be32_to_cpu(fw_hdr->base_addr));
3926 "%s fails to set CPU PC, is %08x should be %08x\n",
3927 __func__, tr32(cpu_base + CPU_PC),
3928 be32_to_cpu(fw_hdr->base_addr));
3932 tg3_resume_cpu(tp, cpu_base);
3937 /* tp->lock is held. */
3938 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3940 u32 addr_high, addr_low;
3943 addr_high = ((tp->dev->dev_addr[0] << 8) |
3944 tp->dev->dev_addr[1]);
3945 addr_low = ((tp->dev->dev_addr[2] << 24) |
3946 (tp->dev->dev_addr[3] << 16) |
3947 (tp->dev->dev_addr[4] << 8) |
3948 (tp->dev->dev_addr[5] << 0));
3949 for (i = 0; i < 4; i++) {
3950 if (i == 1 && skip_mac_1)
3952 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3953 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3956 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3957 tg3_asic_rev(tp) == ASIC_REV_5704) {
3958 for (i = 0; i < 12; i++) {
3959 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3960 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3964 addr_high = (tp->dev->dev_addr[0] +
3965 tp->dev->dev_addr[1] +
3966 tp->dev->dev_addr[2] +
3967 tp->dev->dev_addr[3] +
3968 tp->dev->dev_addr[4] +
3969 tp->dev->dev_addr[5]) &
3970 TX_BACKOFF_SEED_MASK;
3971 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3974 static void tg3_enable_register_access(struct tg3 *tp)
3977 * Make sure register accesses (indirect or otherwise) will function
3980 pci_write_config_dword(tp->pdev,
3981 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3984 static int tg3_power_up(struct tg3 *tp)
3988 tg3_enable_register_access(tp);
3990 err = pci_set_power_state(tp->pdev, PCI_D0);
3992 /* Switch out of Vaux if it is a NIC */
3993 tg3_pwrsrc_switch_to_vmain(tp);
3995 netdev_err(tp->dev, "Transition to D0 failed\n");
4001 static int tg3_setup_phy(struct tg3 *, bool);
4003 static int tg3_power_down_prepare(struct tg3 *tp)
4006 bool device_should_wake, do_low_power;
4008 tg3_enable_register_access(tp);
4010 /* Restore the CLKREQ setting. */
4011 if (tg3_flag(tp, CLKREQ_BUG))
4012 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4013 PCI_EXP_LNKCTL_CLKREQ_EN);
4015 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4016 tw32(TG3PCI_MISC_HOST_CTRL,
4017 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4019 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4020 tg3_flag(tp, WOL_ENABLE);
4022 if (tg3_flag(tp, USE_PHYLIB)) {
4023 do_low_power = false;
4024 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4025 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4026 struct phy_device *phydev;
4027 u32 phyid, advertising;
4029 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4031 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4033 tp->link_config.speed = phydev->speed;
4034 tp->link_config.duplex = phydev->duplex;
4035 tp->link_config.autoneg = phydev->autoneg;
4036 tp->link_config.advertising = phydev->advertising;
4038 advertising = ADVERTISED_TP |
4040 ADVERTISED_Autoneg |
4041 ADVERTISED_10baseT_Half;
4043 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4044 if (tg3_flag(tp, WOL_SPEED_100MB))
4046 ADVERTISED_100baseT_Half |
4047 ADVERTISED_100baseT_Full |
4048 ADVERTISED_10baseT_Full;
4050 advertising |= ADVERTISED_10baseT_Full;
4053 phydev->advertising = advertising;
4055 phy_start_aneg(phydev);
4057 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4058 if (phyid != PHY_ID_BCMAC131) {
4059 phyid &= PHY_BCM_OUI_MASK;
4060 if (phyid == PHY_BCM_OUI_1 ||
4061 phyid == PHY_BCM_OUI_2 ||
4062 phyid == PHY_BCM_OUI_3)
4063 do_low_power = true;
4067 do_low_power = true;
4069 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4070 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4072 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4073 tg3_setup_phy(tp, false);
4076 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4079 val = tr32(GRC_VCPU_EXT_CTRL);
4080 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4081 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4085 for (i = 0; i < 200; i++) {
4086 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4087 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4092 if (tg3_flag(tp, WOL_CAP))
4093 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4094 WOL_DRV_STATE_SHUTDOWN |
4098 if (device_should_wake) {
4101 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4103 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4104 tg3_phy_auxctl_write(tp,
4105 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4106 MII_TG3_AUXCTL_PCTL_WOL_EN |
4107 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4108 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4112 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4113 mac_mode = MAC_MODE_PORT_MODE_GMII;
4114 else if (tp->phy_flags &
4115 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4116 if (tp->link_config.active_speed == SPEED_1000)
4117 mac_mode = MAC_MODE_PORT_MODE_GMII;
4119 mac_mode = MAC_MODE_PORT_MODE_MII;
4121 mac_mode = MAC_MODE_PORT_MODE_MII;
4123 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4124 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4125 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4126 SPEED_100 : SPEED_10;
4127 if (tg3_5700_link_polarity(tp, speed))
4128 mac_mode |= MAC_MODE_LINK_POLARITY;
4130 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4133 mac_mode = MAC_MODE_PORT_MODE_TBI;
4136 if (!tg3_flag(tp, 5750_PLUS))
4137 tw32(MAC_LED_CTRL, tp->led_ctrl);
4139 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4140 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4141 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4142 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4144 if (tg3_flag(tp, ENABLE_APE))
4145 mac_mode |= MAC_MODE_APE_TX_EN |
4146 MAC_MODE_APE_RX_EN |
4147 MAC_MODE_TDE_ENABLE;
4149 tw32_f(MAC_MODE, mac_mode);
4152 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4156 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4157 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4158 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4161 base_val = tp->pci_clock_ctrl;
4162 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4163 CLOCK_CTRL_TXCLK_DISABLE);
4165 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4166 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4167 } else if (tg3_flag(tp, 5780_CLASS) ||
4168 tg3_flag(tp, CPMU_PRESENT) ||
4169 tg3_asic_rev(tp) == ASIC_REV_5906) {
4171 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4172 u32 newbits1, newbits2;
4174 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4175 tg3_asic_rev(tp) == ASIC_REV_5701) {
4176 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4177 CLOCK_CTRL_TXCLK_DISABLE |
4179 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4180 } else if (tg3_flag(tp, 5705_PLUS)) {
4181 newbits1 = CLOCK_CTRL_625_CORE;
4182 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4184 newbits1 = CLOCK_CTRL_ALTCLK;
4185 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4188 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4191 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4194 if (!tg3_flag(tp, 5705_PLUS)) {
4197 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4198 tg3_asic_rev(tp) == ASIC_REV_5701) {
4199 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4200 CLOCK_CTRL_TXCLK_DISABLE |
4201 CLOCK_CTRL_44MHZ_CORE);
4203 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4206 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4207 tp->pci_clock_ctrl | newbits3, 40);
4211 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4212 tg3_power_down_phy(tp, do_low_power);
4214 tg3_frob_aux_power(tp, true);
4216 /* Workaround for unstable PLL clock */
4217 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4218 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4219 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4220 u32 val = tr32(0x7d00);
4222 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4224 if (!tg3_flag(tp, ENABLE_ASF)) {
4227 err = tg3_nvram_lock(tp);
4228 tg3_halt_cpu(tp, RX_CPU_BASE);
4230 tg3_nvram_unlock(tp);
4234 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4236 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4241 static void tg3_power_down(struct tg3 *tp)
4243 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4244 pci_set_power_state(tp->pdev, PCI_D3hot);
4247 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4249 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4250 case MII_TG3_AUX_STAT_10HALF:
4252 *duplex = DUPLEX_HALF;
4255 case MII_TG3_AUX_STAT_10FULL:
4257 *duplex = DUPLEX_FULL;
4260 case MII_TG3_AUX_STAT_100HALF:
4262 *duplex = DUPLEX_HALF;
4265 case MII_TG3_AUX_STAT_100FULL:
4267 *duplex = DUPLEX_FULL;
4270 case MII_TG3_AUX_STAT_1000HALF:
4271 *speed = SPEED_1000;
4272 *duplex = DUPLEX_HALF;
4275 case MII_TG3_AUX_STAT_1000FULL:
4276 *speed = SPEED_1000;
4277 *duplex = DUPLEX_FULL;
4281 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4282 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4284 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4288 *speed = SPEED_UNKNOWN;
4289 *duplex = DUPLEX_UNKNOWN;
4294 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4299 new_adv = ADVERTISE_CSMA;
4300 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4301 new_adv |= mii_advertise_flowctrl(flowctrl);
4303 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4307 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4308 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4310 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4311 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4312 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4314 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4319 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4322 tw32(TG3_CPMU_EEE_MODE,
4323 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4325 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4330 /* Advertise 100-BaseTX EEE ability */
4331 if (advertise & ADVERTISED_100baseT_Full)
4332 val |= MDIO_AN_EEE_ADV_100TX;
4333 /* Advertise 1000-BaseT EEE ability */
4334 if (advertise & ADVERTISED_1000baseT_Full)
4335 val |= MDIO_AN_EEE_ADV_1000T;
4337 if (!tp->eee.eee_enabled) {
4339 tp->eee.advertised = 0;
4341 tp->eee.advertised = advertise &
4342 (ADVERTISED_100baseT_Full |
4343 ADVERTISED_1000baseT_Full);
4346 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4350 switch (tg3_asic_rev(tp)) {
4352 case ASIC_REV_57765:
4353 case ASIC_REV_57766:
4355 /* If we advertised any eee advertisements above... */
4357 val = MII_TG3_DSP_TAP26_ALNOKO |
4358 MII_TG3_DSP_TAP26_RMRXSTO |
4359 MII_TG3_DSP_TAP26_OPCSINPT;
4360 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4364 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4365 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4366 MII_TG3_DSP_CH34TP2_HIBW01);
4369 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4378 static void tg3_phy_copper_begin(struct tg3 *tp)
4380 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4381 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4384 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4385 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4386 adv = ADVERTISED_10baseT_Half |
4387 ADVERTISED_10baseT_Full;
4388 if (tg3_flag(tp, WOL_SPEED_100MB))
4389 adv |= ADVERTISED_100baseT_Half |
4390 ADVERTISED_100baseT_Full;
4391 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4392 adv |= ADVERTISED_1000baseT_Half |
4393 ADVERTISED_1000baseT_Full;
4395 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4397 adv = tp->link_config.advertising;
4398 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4399 adv &= ~(ADVERTISED_1000baseT_Half |
4400 ADVERTISED_1000baseT_Full);
4402 fc = tp->link_config.flowctrl;
4405 tg3_phy_autoneg_cfg(tp, adv, fc);
4407 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4408 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4409 /* Normally during power down we want to autonegotiate
4410 * the lowest possible speed for WOL. However, to avoid
4411 * link flap, we leave it untouched.
4416 tg3_writephy(tp, MII_BMCR,
4417 BMCR_ANENABLE | BMCR_ANRESTART);
4420 u32 bmcr, orig_bmcr;
4422 tp->link_config.active_speed = tp->link_config.speed;
4423 tp->link_config.active_duplex = tp->link_config.duplex;
4425 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4426 /* With autoneg disabled, 5715 only links up when the
4427 * advertisement register has the configured speed
4430 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4434 switch (tp->link_config.speed) {
4440 bmcr |= BMCR_SPEED100;
4444 bmcr |= BMCR_SPEED1000;
4448 if (tp->link_config.duplex == DUPLEX_FULL)
4449 bmcr |= BMCR_FULLDPLX;
4451 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4452 (bmcr != orig_bmcr)) {
4453 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4454 for (i = 0; i < 1500; i++) {
4458 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4459 tg3_readphy(tp, MII_BMSR, &tmp))
4461 if (!(tmp & BMSR_LSTATUS)) {
4466 tg3_writephy(tp, MII_BMCR, bmcr);
4472 static int tg3_phy_pull_config(struct tg3 *tp)
4477 err = tg3_readphy(tp, MII_BMCR, &val);
4481 if (!(val & BMCR_ANENABLE)) {
4482 tp->link_config.autoneg = AUTONEG_DISABLE;
4483 tp->link_config.advertising = 0;
4484 tg3_flag_clear(tp, PAUSE_AUTONEG);
4488 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4490 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4493 tp->link_config.speed = SPEED_10;
4496 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4499 tp->link_config.speed = SPEED_100;
4501 case BMCR_SPEED1000:
4502 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4503 tp->link_config.speed = SPEED_1000;
4511 if (val & BMCR_FULLDPLX)
4512 tp->link_config.duplex = DUPLEX_FULL;
4514 tp->link_config.duplex = DUPLEX_HALF;
4516 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4522 tp->link_config.autoneg = AUTONEG_ENABLE;
4523 tp->link_config.advertising = ADVERTISED_Autoneg;
4524 tg3_flag_set(tp, PAUSE_AUTONEG);
4526 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4529 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4533 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4534 tp->link_config.advertising |= adv | ADVERTISED_TP;
4536 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4538 tp->link_config.advertising |= ADVERTISED_FIBRE;
4541 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4544 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4545 err = tg3_readphy(tp, MII_CTRL1000, &val);
4549 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4551 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4555 adv = tg3_decode_flowctrl_1000X(val);
4556 tp->link_config.flowctrl = adv;
4558 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4559 adv = mii_adv_to_ethtool_adv_x(val);
4562 tp->link_config.advertising |= adv;
4569 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4573 /* Turn off tap power management. */
4574 /* Set Extended packet length bit */
4575 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4577 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4578 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4579 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4580 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4581 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4588 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4590 struct ethtool_eee eee;
4592 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4595 tg3_eee_pull_config(tp, &eee);
4597 if (tp->eee.eee_enabled) {
4598 if (tp->eee.advertised != eee.advertised ||
4599 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4600 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4603 /* EEE is disabled but we're advertising */
4611 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4613 u32 advmsk, tgtadv, advertising;
4615 advertising = tp->link_config.advertising;
4616 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4618 advmsk = ADVERTISE_ALL;
4619 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4620 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4621 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4624 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4627 if ((*lcladv & advmsk) != tgtadv)
4630 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4633 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4635 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4639 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4640 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4641 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4642 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4643 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4645 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4648 if (tg3_ctrl != tgtadv)
4655 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4659 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4662 if (tg3_readphy(tp, MII_STAT1000, &val))
4665 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4668 if (tg3_readphy(tp, MII_LPA, rmtadv))
4671 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4672 tp->link_config.rmt_adv = lpeth;
4677 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4679 if (curr_link_up != tp->link_up) {
4681 netif_carrier_on(tp->dev);
4683 netif_carrier_off(tp->dev);
4684 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4685 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4688 tg3_link_report(tp);
4695 static void tg3_clear_mac_status(struct tg3 *tp)
4700 MAC_STATUS_SYNC_CHANGED |
4701 MAC_STATUS_CFG_CHANGED |
4702 MAC_STATUS_MI_COMPLETION |
4703 MAC_STATUS_LNKSTATE_CHANGED);
4707 static void tg3_setup_eee(struct tg3 *tp)
4711 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4712 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4713 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4714 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4716 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4718 tw32_f(TG3_CPMU_EEE_CTRL,
4719 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4721 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4722 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4723 TG3_CPMU_EEEMD_LPI_IN_RX |
4724 TG3_CPMU_EEEMD_EEE_ENABLE;
4726 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4727 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4729 if (tg3_flag(tp, ENABLE_APE))
4730 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4732 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4734 tw32_f(TG3_CPMU_EEE_DBTMR1,
4735 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4736 (tp->eee.tx_lpi_timer & 0xffff));
4738 tw32_f(TG3_CPMU_EEE_DBTMR2,
4739 TG3_CPMU_DBTMR2_APE_TX_2047US |
4740 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4743 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4745 bool current_link_up;
4747 u32 lcl_adv, rmt_adv;
4752 tg3_clear_mac_status(tp);
4754 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4756 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4760 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4762 /* Some third-party PHYs need to be reset on link going
4765 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4766 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4767 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4769 tg3_readphy(tp, MII_BMSR, &bmsr);
4770 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4771 !(bmsr & BMSR_LSTATUS))
4777 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4778 tg3_readphy(tp, MII_BMSR, &bmsr);
4779 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4780 !tg3_flag(tp, INIT_COMPLETE))
4783 if (!(bmsr & BMSR_LSTATUS)) {
4784 err = tg3_init_5401phy_dsp(tp);
4788 tg3_readphy(tp, MII_BMSR, &bmsr);
4789 for (i = 0; i < 1000; i++) {
4791 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4792 (bmsr & BMSR_LSTATUS)) {
4798 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4799 TG3_PHY_REV_BCM5401_B0 &&
4800 !(bmsr & BMSR_LSTATUS) &&
4801 tp->link_config.active_speed == SPEED_1000) {
4802 err = tg3_phy_reset(tp);
4804 err = tg3_init_5401phy_dsp(tp);
4809 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4810 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4811 /* 5701 {A0,B0} CRC bug workaround */
4812 tg3_writephy(tp, 0x15, 0x0a75);
4813 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4814 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4815 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4818 /* Clear pending interrupts... */
4819 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4820 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4822 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4823 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4824 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4825 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4827 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4828 tg3_asic_rev(tp) == ASIC_REV_5701) {
4829 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4830 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4831 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4833 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4836 current_link_up = false;
4837 current_speed = SPEED_UNKNOWN;
4838 current_duplex = DUPLEX_UNKNOWN;
4839 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4840 tp->link_config.rmt_adv = 0;
4842 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4843 err = tg3_phy_auxctl_read(tp,
4844 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4846 if (!err && !(val & (1 << 10))) {
4847 tg3_phy_auxctl_write(tp,
4848 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4855 for (i = 0; i < 100; i++) {
4856 tg3_readphy(tp, MII_BMSR, &bmsr);
4857 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4858 (bmsr & BMSR_LSTATUS))
4863 if (bmsr & BMSR_LSTATUS) {
4866 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4867 for (i = 0; i < 2000; i++) {
4869 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4874 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4879 for (i = 0; i < 200; i++) {
4880 tg3_readphy(tp, MII_BMCR, &bmcr);
4881 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4883 if (bmcr && bmcr != 0x7fff)
4891 tp->link_config.active_speed = current_speed;
4892 tp->link_config.active_duplex = current_duplex;
4894 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4895 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4897 if ((bmcr & BMCR_ANENABLE) &&
4899 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4900 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4901 current_link_up = true;
4903 /* EEE settings changes take effect only after a phy
4904 * reset. If we have skipped a reset due to Link Flap
4905 * Avoidance being enabled, do it now.
4907 if (!eee_config_ok &&
4908 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4914 if (!(bmcr & BMCR_ANENABLE) &&
4915 tp->link_config.speed == current_speed &&
4916 tp->link_config.duplex == current_duplex) {
4917 current_link_up = true;
4921 if (current_link_up &&
4922 tp->link_config.active_duplex == DUPLEX_FULL) {
4925 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4926 reg = MII_TG3_FET_GEN_STAT;
4927 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4929 reg = MII_TG3_EXT_STAT;
4930 bit = MII_TG3_EXT_STAT_MDIX;
4933 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4934 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4936 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4941 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4942 tg3_phy_copper_begin(tp);
4944 if (tg3_flag(tp, ROBOSWITCH)) {
4945 current_link_up = true;
4946 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4947 current_speed = SPEED_1000;
4948 current_duplex = DUPLEX_FULL;
4949 tp->link_config.active_speed = current_speed;
4950 tp->link_config.active_duplex = current_duplex;
4953 tg3_readphy(tp, MII_BMSR, &bmsr);
4954 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4955 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4956 current_link_up = true;
4959 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4960 if (current_link_up) {
4961 if (tp->link_config.active_speed == SPEED_100 ||
4962 tp->link_config.active_speed == SPEED_10)
4963 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4965 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4966 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4967 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4969 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4971 /* In order for the 5750 core in BCM4785 chip to work properly
4972 * in RGMII mode, the Led Control Register must be set up.
4974 if (tg3_flag(tp, RGMII_MODE)) {
4975 u32 led_ctrl = tr32(MAC_LED_CTRL);
4976 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4978 if (tp->link_config.active_speed == SPEED_10)
4979 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4980 else if (tp->link_config.active_speed == SPEED_100)
4981 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4982 LED_CTRL_100MBPS_ON);
4983 else if (tp->link_config.active_speed == SPEED_1000)
4984 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4985 LED_CTRL_1000MBPS_ON);
4987 tw32(MAC_LED_CTRL, led_ctrl);
4991 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4992 if (tp->link_config.active_duplex == DUPLEX_HALF)
4993 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4995 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4996 if (current_link_up &&
4997 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4998 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5000 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5003 /* ??? Without this setting Netgear GA302T PHY does not
5004 * ??? send/receive packets...
5006 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5007 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5008 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5009 tw32_f(MAC_MI_MODE, tp->mi_mode);
5013 tw32_f(MAC_MODE, tp->mac_mode);
5016 tg3_phy_eee_adjust(tp, current_link_up);
5018 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5019 /* Polled via timer. */
5020 tw32_f(MAC_EVENT, 0);
5022 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5026 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5028 tp->link_config.active_speed == SPEED_1000 &&
5029 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5032 (MAC_STATUS_SYNC_CHANGED |
5033 MAC_STATUS_CFG_CHANGED));
5036 NIC_SRAM_FIRMWARE_MBOX,
5037 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5040 /* Prevent send BD corruption. */
5041 if (tg3_flag(tp, CLKREQ_BUG)) {
5042 if (tp->link_config.active_speed == SPEED_100 ||
5043 tp->link_config.active_speed == SPEED_10)
5044 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5045 PCI_EXP_LNKCTL_CLKREQ_EN);
5047 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5048 PCI_EXP_LNKCTL_CLKREQ_EN);
5051 tg3_test_and_report_link_chg(tp, current_link_up);
5056 struct tg3_fiber_aneginfo {
5058 #define ANEG_STATE_UNKNOWN 0
5059 #define ANEG_STATE_AN_ENABLE 1
5060 #define ANEG_STATE_RESTART_INIT 2
5061 #define ANEG_STATE_RESTART 3
5062 #define ANEG_STATE_DISABLE_LINK_OK 4
5063 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5064 #define ANEG_STATE_ABILITY_DETECT 6
5065 #define ANEG_STATE_ACK_DETECT_INIT 7
5066 #define ANEG_STATE_ACK_DETECT 8
5067 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5068 #define ANEG_STATE_COMPLETE_ACK 10
5069 #define ANEG_STATE_IDLE_DETECT_INIT 11
5070 #define ANEG_STATE_IDLE_DETECT 12
5071 #define ANEG_STATE_LINK_OK 13
5072 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5073 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5076 #define MR_AN_ENABLE 0x00000001
5077 #define MR_RESTART_AN 0x00000002
5078 #define MR_AN_COMPLETE 0x00000004
5079 #define MR_PAGE_RX 0x00000008
5080 #define MR_NP_LOADED 0x00000010
5081 #define MR_TOGGLE_TX 0x00000020
5082 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5083 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5084 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5085 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5086 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5087 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5088 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5089 #define MR_TOGGLE_RX 0x00002000
5090 #define MR_NP_RX 0x00004000
5092 #define MR_LINK_OK 0x80000000
5094 unsigned long link_time, cur_time;
5096 u32 ability_match_cfg;
5097 int ability_match_count;
5099 char ability_match, idle_match, ack_match;
5101 u32 txconfig, rxconfig;
5102 #define ANEG_CFG_NP 0x00000080
5103 #define ANEG_CFG_ACK 0x00000040
5104 #define ANEG_CFG_RF2 0x00000020
5105 #define ANEG_CFG_RF1 0x00000010
5106 #define ANEG_CFG_PS2 0x00000001
5107 #define ANEG_CFG_PS1 0x00008000
5108 #define ANEG_CFG_HD 0x00004000
5109 #define ANEG_CFG_FD 0x00002000
5110 #define ANEG_CFG_INVAL 0x00001f06
5115 #define ANEG_TIMER_ENAB 2
5116 #define ANEG_FAILED -1
5118 #define ANEG_STATE_SETTLE_TIME 10000
5120 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5121 struct tg3_fiber_aneginfo *ap)
5124 unsigned long delta;
5128 if (ap->state == ANEG_STATE_UNKNOWN) {
5132 ap->ability_match_cfg = 0;
5133 ap->ability_match_count = 0;
5134 ap->ability_match = 0;
5140 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5141 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5143 if (rx_cfg_reg != ap->ability_match_cfg) {
5144 ap->ability_match_cfg = rx_cfg_reg;
5145 ap->ability_match = 0;
5146 ap->ability_match_count = 0;
5148 if (++ap->ability_match_count > 1) {
5149 ap->ability_match = 1;
5150 ap->ability_match_cfg = rx_cfg_reg;
5153 if (rx_cfg_reg & ANEG_CFG_ACK)
5161 ap->ability_match_cfg = 0;
5162 ap->ability_match_count = 0;
5163 ap->ability_match = 0;
5169 ap->rxconfig = rx_cfg_reg;
5172 switch (ap->state) {
5173 case ANEG_STATE_UNKNOWN:
5174 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5175 ap->state = ANEG_STATE_AN_ENABLE;
5178 case ANEG_STATE_AN_ENABLE:
5179 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5180 if (ap->flags & MR_AN_ENABLE) {
5183 ap->ability_match_cfg = 0;
5184 ap->ability_match_count = 0;
5185 ap->ability_match = 0;
5189 ap->state = ANEG_STATE_RESTART_INIT;
5191 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5195 case ANEG_STATE_RESTART_INIT:
5196 ap->link_time = ap->cur_time;
5197 ap->flags &= ~(MR_NP_LOADED);
5199 tw32(MAC_TX_AUTO_NEG, 0);
5200 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5201 tw32_f(MAC_MODE, tp->mac_mode);
5204 ret = ANEG_TIMER_ENAB;
5205 ap->state = ANEG_STATE_RESTART;
5208 case ANEG_STATE_RESTART:
5209 delta = ap->cur_time - ap->link_time;
5210 if (delta > ANEG_STATE_SETTLE_TIME)
5211 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5213 ret = ANEG_TIMER_ENAB;
5216 case ANEG_STATE_DISABLE_LINK_OK:
5220 case ANEG_STATE_ABILITY_DETECT_INIT:
5221 ap->flags &= ~(MR_TOGGLE_TX);
5222 ap->txconfig = ANEG_CFG_FD;
5223 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5224 if (flowctrl & ADVERTISE_1000XPAUSE)
5225 ap->txconfig |= ANEG_CFG_PS1;
5226 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5227 ap->txconfig |= ANEG_CFG_PS2;
5228 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5229 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5230 tw32_f(MAC_MODE, tp->mac_mode);
5233 ap->state = ANEG_STATE_ABILITY_DETECT;
5236 case ANEG_STATE_ABILITY_DETECT:
5237 if (ap->ability_match != 0 && ap->rxconfig != 0)
5238 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5241 case ANEG_STATE_ACK_DETECT_INIT:
5242 ap->txconfig |= ANEG_CFG_ACK;
5243 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5244 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5245 tw32_f(MAC_MODE, tp->mac_mode);
5248 ap->state = ANEG_STATE_ACK_DETECT;
5251 case ANEG_STATE_ACK_DETECT:
5252 if (ap->ack_match != 0) {
5253 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5254 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5255 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5257 ap->state = ANEG_STATE_AN_ENABLE;
5259 } else if (ap->ability_match != 0 &&
5260 ap->rxconfig == 0) {
5261 ap->state = ANEG_STATE_AN_ENABLE;
5265 case ANEG_STATE_COMPLETE_ACK_INIT:
5266 if (ap->rxconfig & ANEG_CFG_INVAL) {
5270 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5271 MR_LP_ADV_HALF_DUPLEX |
5272 MR_LP_ADV_SYM_PAUSE |
5273 MR_LP_ADV_ASYM_PAUSE |
5274 MR_LP_ADV_REMOTE_FAULT1 |
5275 MR_LP_ADV_REMOTE_FAULT2 |
5276 MR_LP_ADV_NEXT_PAGE |
5279 if (ap->rxconfig & ANEG_CFG_FD)
5280 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5281 if (ap->rxconfig & ANEG_CFG_HD)
5282 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5283 if (ap->rxconfig & ANEG_CFG_PS1)
5284 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5285 if (ap->rxconfig & ANEG_CFG_PS2)
5286 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5287 if (ap->rxconfig & ANEG_CFG_RF1)
5288 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5289 if (ap->rxconfig & ANEG_CFG_RF2)
5290 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5291 if (ap->rxconfig & ANEG_CFG_NP)
5292 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5294 ap->link_time = ap->cur_time;
5296 ap->flags ^= (MR_TOGGLE_TX);
5297 if (ap->rxconfig & 0x0008)
5298 ap->flags |= MR_TOGGLE_RX;
5299 if (ap->rxconfig & ANEG_CFG_NP)
5300 ap->flags |= MR_NP_RX;
5301 ap->flags |= MR_PAGE_RX;
5303 ap->state = ANEG_STATE_COMPLETE_ACK;
5304 ret = ANEG_TIMER_ENAB;
5307 case ANEG_STATE_COMPLETE_ACK:
5308 if (ap->ability_match != 0 &&
5309 ap->rxconfig == 0) {
5310 ap->state = ANEG_STATE_AN_ENABLE;
5313 delta = ap->cur_time - ap->link_time;
5314 if (delta > ANEG_STATE_SETTLE_TIME) {
5315 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5316 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5318 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5319 !(ap->flags & MR_NP_RX)) {
5320 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5328 case ANEG_STATE_IDLE_DETECT_INIT:
5329 ap->link_time = ap->cur_time;
5330 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5331 tw32_f(MAC_MODE, tp->mac_mode);
5334 ap->state = ANEG_STATE_IDLE_DETECT;
5335 ret = ANEG_TIMER_ENAB;
5338 case ANEG_STATE_IDLE_DETECT:
5339 if (ap->ability_match != 0 &&
5340 ap->rxconfig == 0) {
5341 ap->state = ANEG_STATE_AN_ENABLE;
5344 delta = ap->cur_time - ap->link_time;
5345 if (delta > ANEG_STATE_SETTLE_TIME) {
5346 /* XXX another gem from the Broadcom driver :( */
5347 ap->state = ANEG_STATE_LINK_OK;
5351 case ANEG_STATE_LINK_OK:
5352 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5356 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5357 /* ??? unimplemented */
5360 case ANEG_STATE_NEXT_PAGE_WAIT:
5361 /* ??? unimplemented */
5372 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5375 struct tg3_fiber_aneginfo aninfo;
5376 int status = ANEG_FAILED;
5380 tw32_f(MAC_TX_AUTO_NEG, 0);
5382 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5383 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5386 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5389 memset(&aninfo, 0, sizeof(aninfo));
5390 aninfo.flags |= MR_AN_ENABLE;
5391 aninfo.state = ANEG_STATE_UNKNOWN;
5392 aninfo.cur_time = 0;
5394 while (++tick < 195000) {
5395 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5396 if (status == ANEG_DONE || status == ANEG_FAILED)
5402 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5403 tw32_f(MAC_MODE, tp->mac_mode);
5406 *txflags = aninfo.txconfig;
5407 *rxflags = aninfo.flags;
5409 if (status == ANEG_DONE &&
5410 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5411 MR_LP_ADV_FULL_DUPLEX)))
5417 static void tg3_init_bcm8002(struct tg3 *tp)
5419 u32 mac_status = tr32(MAC_STATUS);
5422 /* Reset when initting first time or we have a link. */
5423 if (tg3_flag(tp, INIT_COMPLETE) &&
5424 !(mac_status & MAC_STATUS_PCS_SYNCED))
5427 /* Set PLL lock range. */
5428 tg3_writephy(tp, 0x16, 0x8007);
5431 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5433 /* Wait for reset to complete. */
5434 /* XXX schedule_timeout() ... */
5435 for (i = 0; i < 500; i++)
5438 /* Config mode; select PMA/Ch 1 regs. */
5439 tg3_writephy(tp, 0x10, 0x8411);
5441 /* Enable auto-lock and comdet, select txclk for tx. */
5442 tg3_writephy(tp, 0x11, 0x0a10);
5444 tg3_writephy(tp, 0x18, 0x00a0);
5445 tg3_writephy(tp, 0x16, 0x41ff);
5447 /* Assert and deassert POR. */
5448 tg3_writephy(tp, 0x13, 0x0400);
5450 tg3_writephy(tp, 0x13, 0x0000);
5452 tg3_writephy(tp, 0x11, 0x0a50);
5454 tg3_writephy(tp, 0x11, 0x0a10);
5456 /* Wait for signal to stabilize */
5457 /* XXX schedule_timeout() ... */
5458 for (i = 0; i < 15000; i++)
5461 /* Deselect the channel register so we can read the PHYID
5464 tg3_writephy(tp, 0x10, 0x8011);
5467 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5470 bool current_link_up;
5471 u32 sg_dig_ctrl, sg_dig_status;
5472 u32 serdes_cfg, expected_sg_dig_ctrl;
5473 int workaround, port_a;
5476 expected_sg_dig_ctrl = 0;
5479 current_link_up = false;
5481 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5482 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5484 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5487 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5488 /* preserve bits 20-23 for voltage regulator */
5489 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5492 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5494 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5495 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5497 u32 val = serdes_cfg;
5503 tw32_f(MAC_SERDES_CFG, val);
5506 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5508 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5509 tg3_setup_flow_control(tp, 0, 0);
5510 current_link_up = true;
5515 /* Want auto-negotiation. */
5516 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5518 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5519 if (flowctrl & ADVERTISE_1000XPAUSE)
5520 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5521 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5522 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5524 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5525 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5526 tp->serdes_counter &&
5527 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5528 MAC_STATUS_RCVD_CFG)) ==
5529 MAC_STATUS_PCS_SYNCED)) {
5530 tp->serdes_counter--;
5531 current_link_up = true;
5536 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5537 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5539 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5541 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5542 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5543 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5544 MAC_STATUS_SIGNAL_DET)) {
5545 sg_dig_status = tr32(SG_DIG_STATUS);
5546 mac_status = tr32(MAC_STATUS);
5548 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5549 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5550 u32 local_adv = 0, remote_adv = 0;
5552 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5553 local_adv |= ADVERTISE_1000XPAUSE;
5554 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5555 local_adv |= ADVERTISE_1000XPSE_ASYM;
5557 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5558 remote_adv |= LPA_1000XPAUSE;
5559 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5560 remote_adv |= LPA_1000XPAUSE_ASYM;
5562 tp->link_config.rmt_adv =
5563 mii_adv_to_ethtool_adv_x(remote_adv);
5565 tg3_setup_flow_control(tp, local_adv, remote_adv);
5566 current_link_up = true;
5567 tp->serdes_counter = 0;
5568 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5569 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5570 if (tp->serdes_counter)
5571 tp->serdes_counter--;
5574 u32 val = serdes_cfg;
5581 tw32_f(MAC_SERDES_CFG, val);
5584 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5587 /* Link parallel detection - link is up */
5588 /* only if we have PCS_SYNC and not */
5589 /* receiving config code words */
5590 mac_status = tr32(MAC_STATUS);
5591 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5592 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5593 tg3_setup_flow_control(tp, 0, 0);
5594 current_link_up = true;
5596 TG3_PHYFLG_PARALLEL_DETECT;
5597 tp->serdes_counter =
5598 SERDES_PARALLEL_DET_TIMEOUT;
5600 goto restart_autoneg;
5604 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5605 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5609 return current_link_up;
5612 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5614 bool current_link_up = false;
5616 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5619 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5620 u32 txflags, rxflags;
5623 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5624 u32 local_adv = 0, remote_adv = 0;
5626 if (txflags & ANEG_CFG_PS1)
5627 local_adv |= ADVERTISE_1000XPAUSE;
5628 if (txflags & ANEG_CFG_PS2)
5629 local_adv |= ADVERTISE_1000XPSE_ASYM;
5631 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5632 remote_adv |= LPA_1000XPAUSE;
5633 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5634 remote_adv |= LPA_1000XPAUSE_ASYM;
5636 tp->link_config.rmt_adv =
5637 mii_adv_to_ethtool_adv_x(remote_adv);
5639 tg3_setup_flow_control(tp, local_adv, remote_adv);
5641 current_link_up = true;
5643 for (i = 0; i < 30; i++) {
5646 (MAC_STATUS_SYNC_CHANGED |
5647 MAC_STATUS_CFG_CHANGED));
5649 if ((tr32(MAC_STATUS) &
5650 (MAC_STATUS_SYNC_CHANGED |
5651 MAC_STATUS_CFG_CHANGED)) == 0)
5655 mac_status = tr32(MAC_STATUS);
5656 if (!current_link_up &&
5657 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5658 !(mac_status & MAC_STATUS_RCVD_CFG))
5659 current_link_up = true;
5661 tg3_setup_flow_control(tp, 0, 0);
5663 /* Forcing 1000FD link up. */
5664 current_link_up = true;
5666 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5669 tw32_f(MAC_MODE, tp->mac_mode);
5674 return current_link_up;
5677 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5680 u16 orig_active_speed;
5681 u8 orig_active_duplex;
5683 bool current_link_up;
5686 orig_pause_cfg = tp->link_config.active_flowctrl;
5687 orig_active_speed = tp->link_config.active_speed;
5688 orig_active_duplex = tp->link_config.active_duplex;
5690 if (!tg3_flag(tp, HW_AUTONEG) &&
5692 tg3_flag(tp, INIT_COMPLETE)) {
5693 mac_status = tr32(MAC_STATUS);
5694 mac_status &= (MAC_STATUS_PCS_SYNCED |
5695 MAC_STATUS_SIGNAL_DET |
5696 MAC_STATUS_CFG_CHANGED |
5697 MAC_STATUS_RCVD_CFG);
5698 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5699 MAC_STATUS_SIGNAL_DET)) {
5700 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5701 MAC_STATUS_CFG_CHANGED));
5706 tw32_f(MAC_TX_AUTO_NEG, 0);
5708 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5709 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5710 tw32_f(MAC_MODE, tp->mac_mode);
5713 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5714 tg3_init_bcm8002(tp);
5716 /* Enable link change event even when serdes polling. */
5717 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5720 current_link_up = false;
5721 tp->link_config.rmt_adv = 0;
5722 mac_status = tr32(MAC_STATUS);
5724 if (tg3_flag(tp, HW_AUTONEG))
5725 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5727 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5729 tp->napi[0].hw_status->status =
5730 (SD_STATUS_UPDATED |
5731 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5733 for (i = 0; i < 100; i++) {
5734 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5735 MAC_STATUS_CFG_CHANGED));
5737 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5738 MAC_STATUS_CFG_CHANGED |
5739 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5743 mac_status = tr32(MAC_STATUS);
5744 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5745 current_link_up = false;
5746 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5747 tp->serdes_counter == 0) {
5748 tw32_f(MAC_MODE, (tp->mac_mode |
5749 MAC_MODE_SEND_CONFIGS));
5751 tw32_f(MAC_MODE, tp->mac_mode);
5755 if (current_link_up) {
5756 tp->link_config.active_speed = SPEED_1000;
5757 tp->link_config.active_duplex = DUPLEX_FULL;
5758 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5759 LED_CTRL_LNKLED_OVERRIDE |
5760 LED_CTRL_1000MBPS_ON));
5762 tp->link_config.active_speed = SPEED_UNKNOWN;
5763 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5764 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5765 LED_CTRL_LNKLED_OVERRIDE |
5766 LED_CTRL_TRAFFIC_OVERRIDE));
5769 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5770 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5771 if (orig_pause_cfg != now_pause_cfg ||
5772 orig_active_speed != tp->link_config.active_speed ||
5773 orig_active_duplex != tp->link_config.active_duplex)
5774 tg3_link_report(tp);
5780 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5784 u16 current_speed = SPEED_UNKNOWN;
5785 u8 current_duplex = DUPLEX_UNKNOWN;
5786 bool current_link_up = false;
5787 u32 local_adv, remote_adv, sgsr;
5789 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5790 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5791 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5792 (sgsr & SERDES_TG3_SGMII_MODE)) {
5797 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5799 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5800 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5802 current_link_up = true;
5803 if (sgsr & SERDES_TG3_SPEED_1000) {
5804 current_speed = SPEED_1000;
5805 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5806 } else if (sgsr & SERDES_TG3_SPEED_100) {
5807 current_speed = SPEED_100;
5808 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5810 current_speed = SPEED_10;
5811 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5814 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5815 current_duplex = DUPLEX_FULL;
5817 current_duplex = DUPLEX_HALF;
5820 tw32_f(MAC_MODE, tp->mac_mode);
5823 tg3_clear_mac_status(tp);
5825 goto fiber_setup_done;
5828 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 tw32_f(MAC_MODE, tp->mac_mode);
5832 tg3_clear_mac_status(tp);
5837 tp->link_config.rmt_adv = 0;
5839 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5840 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5841 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5842 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5843 bmsr |= BMSR_LSTATUS;
5845 bmsr &= ~BMSR_LSTATUS;
5848 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5850 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5851 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5852 /* do nothing, just check for link up at the end */
5853 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5856 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5857 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5858 ADVERTISE_1000XPAUSE |
5859 ADVERTISE_1000XPSE_ASYM |
5862 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5863 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5865 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5866 tg3_writephy(tp, MII_ADVERTISE, newadv);
5867 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5868 tg3_writephy(tp, MII_BMCR, bmcr);
5870 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5871 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5872 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5879 bmcr &= ~BMCR_SPEED1000;
5880 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5882 if (tp->link_config.duplex == DUPLEX_FULL)
5883 new_bmcr |= BMCR_FULLDPLX;
5885 if (new_bmcr != bmcr) {
5886 /* BMCR_SPEED1000 is a reserved bit that needs
5887 * to be set on write.
5889 new_bmcr |= BMCR_SPEED1000;
5891 /* Force a linkdown */
5895 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5896 adv &= ~(ADVERTISE_1000XFULL |
5897 ADVERTISE_1000XHALF |
5899 tg3_writephy(tp, MII_ADVERTISE, adv);
5900 tg3_writephy(tp, MII_BMCR, bmcr |
5904 tg3_carrier_off(tp);
5906 tg3_writephy(tp, MII_BMCR, new_bmcr);
5908 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5909 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5910 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5911 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5912 bmsr |= BMSR_LSTATUS;
5914 bmsr &= ~BMSR_LSTATUS;
5916 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5920 if (bmsr & BMSR_LSTATUS) {
5921 current_speed = SPEED_1000;
5922 current_link_up = true;
5923 if (bmcr & BMCR_FULLDPLX)
5924 current_duplex = DUPLEX_FULL;
5926 current_duplex = DUPLEX_HALF;
5931 if (bmcr & BMCR_ANENABLE) {
5934 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5935 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5936 common = local_adv & remote_adv;
5937 if (common & (ADVERTISE_1000XHALF |
5938 ADVERTISE_1000XFULL)) {
5939 if (common & ADVERTISE_1000XFULL)
5940 current_duplex = DUPLEX_FULL;
5942 current_duplex = DUPLEX_HALF;
5944 tp->link_config.rmt_adv =
5945 mii_adv_to_ethtool_adv_x(remote_adv);
5946 } else if (!tg3_flag(tp, 5780_CLASS)) {
5947 /* Link is up via parallel detect */
5949 current_link_up = false;
5955 if (current_link_up && current_duplex == DUPLEX_FULL)
5956 tg3_setup_flow_control(tp, local_adv, remote_adv);
5958 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5959 if (tp->link_config.active_duplex == DUPLEX_HALF)
5960 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5962 tw32_f(MAC_MODE, tp->mac_mode);
5965 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5967 tp->link_config.active_speed = current_speed;
5968 tp->link_config.active_duplex = current_duplex;
5970 tg3_test_and_report_link_chg(tp, current_link_up);
5974 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5976 if (tp->serdes_counter) {
5977 /* Give autoneg time to complete. */
5978 tp->serdes_counter--;
5983 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5986 tg3_readphy(tp, MII_BMCR, &bmcr);
5987 if (bmcr & BMCR_ANENABLE) {
5990 /* Select shadow register 0x1f */
5991 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5992 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5994 /* Select expansion interrupt status register */
5995 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5996 MII_TG3_DSP_EXP1_INT_STAT);
5997 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5998 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6000 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6001 /* We have signal detect and not receiving
6002 * config code words, link is up by parallel
6006 bmcr &= ~BMCR_ANENABLE;
6007 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6008 tg3_writephy(tp, MII_BMCR, bmcr);
6009 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6012 } else if (tp->link_up &&
6013 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6014 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6017 /* Select expansion interrupt status register */
6018 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6019 MII_TG3_DSP_EXP1_INT_STAT);
6020 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024 /* Config code words received, turn on autoneg. */
6025 tg3_readphy(tp, MII_BMCR, &bmcr);
6026 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6028 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6034 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6039 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6040 err = tg3_setup_fiber_phy(tp, force_reset);
6041 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6042 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6044 err = tg3_setup_copper_phy(tp, force_reset);
6046 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6049 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6050 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6052 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6057 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6058 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6059 tw32(GRC_MISC_CFG, val);
6062 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6063 (6 << TX_LENGTHS_IPG_SHIFT);
6064 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6065 tg3_asic_rev(tp) == ASIC_REV_5762)
6066 val |= tr32(MAC_TX_LENGTHS) &
6067 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6068 TX_LENGTHS_CNT_DWN_VAL_MSK);
6070 if (tp->link_config.active_speed == SPEED_1000 &&
6071 tp->link_config.active_duplex == DUPLEX_HALF)
6072 tw32(MAC_TX_LENGTHS, val |
6073 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6075 tw32(MAC_TX_LENGTHS, val |
6076 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6078 if (!tg3_flag(tp, 5705_PLUS)) {
6080 tw32(HOSTCC_STAT_COAL_TICKS,
6081 tp->coal.stats_block_coalesce_usecs);
6083 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6087 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6088 val = tr32(PCIE_PWR_MGMT_THRESH);
6090 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6093 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6094 tw32(PCIE_PWR_MGMT_THRESH, val);
6100 /* tp->lock must be held */
6101 static u64 tg3_refclk_read(struct tg3 *tp)
6103 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6104 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6107 /* tp->lock must be held */
6108 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6110 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6112 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6113 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6114 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6115 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6118 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6119 static inline void tg3_full_unlock(struct tg3 *tp);
6120 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6122 struct tg3 *tp = netdev_priv(dev);
6124 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6125 SOF_TIMESTAMPING_RX_SOFTWARE |
6126 SOF_TIMESTAMPING_SOFTWARE;
6128 if (tg3_flag(tp, PTP_CAPABLE)) {
6129 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6130 SOF_TIMESTAMPING_RX_HARDWARE |
6131 SOF_TIMESTAMPING_RAW_HARDWARE;
6135 info->phc_index = ptp_clock_index(tp->ptp_clock);
6137 info->phc_index = -1;
6139 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6141 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6142 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6143 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6144 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6148 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6150 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6151 bool neg_adj = false;
6159 /* Frequency adjustment is performed using hardware with a 24 bit
6160 * accumulator and a programmable correction value. On each clk, the
6161 * correction value gets added to the accumulator and when it
6162 * overflows, the time counter is incremented/decremented.
6164 * So conversion from ppb to correction value is
6165 * ppb * (1 << 24) / 1000000000
6167 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6168 TG3_EAV_REF_CLK_CORRECT_MASK;
6170 tg3_full_lock(tp, 0);
6173 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6174 TG3_EAV_REF_CLK_CORRECT_EN |
6175 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6177 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6179 tg3_full_unlock(tp);
6184 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6186 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6188 tg3_full_lock(tp, 0);
6189 tp->ptp_adjust += delta;
6190 tg3_full_unlock(tp);
6195 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6199 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6201 tg3_full_lock(tp, 0);
6202 ns = tg3_refclk_read(tp);
6203 ns += tp->ptp_adjust;
6204 tg3_full_unlock(tp);
6206 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6207 ts->tv_nsec = remainder;
6212 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6213 const struct timespec *ts)
6216 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6218 ns = timespec_to_ns(ts);
6220 tg3_full_lock(tp, 0);
6221 tg3_refclk_write(tp, ns);
6223 tg3_full_unlock(tp);
6228 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6229 struct ptp_clock_request *rq, int on)
6231 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6236 case PTP_CLK_REQ_PEROUT:
6237 if (rq->perout.index != 0)
6240 tg3_full_lock(tp, 0);
6241 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6242 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6247 nsec = rq->perout.start.sec * 1000000000ULL +
6248 rq->perout.start.nsec;
6250 if (rq->perout.period.sec || rq->perout.period.nsec) {
6251 netdev_warn(tp->dev,
6252 "Device supports only a one-shot timesync output, period must be 0\n");
6257 if (nsec & (1ULL << 63)) {
6258 netdev_warn(tp->dev,
6259 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6264 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6265 tw32(TG3_EAV_WATCHDOG0_MSB,
6266 TG3_EAV_WATCHDOG0_EN |
6267 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6269 tw32(TG3_EAV_REF_CLCK_CTL,
6270 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6272 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6273 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6277 tg3_full_unlock(tp);
6287 static const struct ptp_clock_info tg3_ptp_caps = {
6288 .owner = THIS_MODULE,
6289 .name = "tg3 clock",
6290 .max_adj = 250000000,
6295 .adjfreq = tg3_ptp_adjfreq,
6296 .adjtime = tg3_ptp_adjtime,
6297 .gettime = tg3_ptp_gettime,
6298 .settime = tg3_ptp_settime,
6299 .enable = tg3_ptp_enable,
6302 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6303 struct skb_shared_hwtstamps *timestamp)
6305 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6306 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6310 /* tp->lock must be held */
6311 static void tg3_ptp_init(struct tg3 *tp)
6313 if (!tg3_flag(tp, PTP_CAPABLE))
6316 /* Initialize the hardware clock to the system time. */
6317 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6319 tp->ptp_info = tg3_ptp_caps;
6322 /* tp->lock must be held */
6323 static void tg3_ptp_resume(struct tg3 *tp)
6325 if (!tg3_flag(tp, PTP_CAPABLE))
6328 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6332 static void tg3_ptp_fini(struct tg3 *tp)
6334 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6337 ptp_clock_unregister(tp->ptp_clock);
6338 tp->ptp_clock = NULL;
6342 static inline int tg3_irq_sync(struct tg3 *tp)
6344 return tp->irq_sync;
6347 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6351 dst = (u32 *)((u8 *)dst + off);
6352 for (i = 0; i < len; i += sizeof(u32))
6353 *dst++ = tr32(off + i);
6356 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6358 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6359 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6360 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6361 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6362 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6363 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6364 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6365 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6366 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6367 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6368 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6369 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6370 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6371 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6372 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6373 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6374 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6375 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6376 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6378 if (tg3_flag(tp, SUPPORT_MSIX))
6379 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6381 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6382 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6383 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6384 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6385 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6386 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6387 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6388 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6390 if (!tg3_flag(tp, 5705_PLUS)) {
6391 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6392 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6393 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6396 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6397 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6398 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6399 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6400 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6402 if (tg3_flag(tp, NVRAM))
6403 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6406 static void tg3_dump_state(struct tg3 *tp)
6411 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6415 if (tg3_flag(tp, PCI_EXPRESS)) {
6416 /* Read up to but not including private PCI registers */
6417 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6418 regs[i / sizeof(u32)] = tr32(i);
6420 tg3_dump_legacy_regs(tp, regs);
6422 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6423 if (!regs[i + 0] && !regs[i + 1] &&
6424 !regs[i + 2] && !regs[i + 3])
6427 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6429 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6434 for (i = 0; i < tp->irq_cnt; i++) {
6435 struct tg3_napi *tnapi = &tp->napi[i];
6437 /* SW status block */
6439 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6441 tnapi->hw_status->status,
6442 tnapi->hw_status->status_tag,
6443 tnapi->hw_status->rx_jumbo_consumer,
6444 tnapi->hw_status->rx_consumer,
6445 tnapi->hw_status->rx_mini_consumer,
6446 tnapi->hw_status->idx[0].rx_producer,
6447 tnapi->hw_status->idx[0].tx_consumer);
6450 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6452 tnapi->last_tag, tnapi->last_irq_tag,
6453 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6455 tnapi->prodring.rx_std_prod_idx,
6456 tnapi->prodring.rx_std_cons_idx,
6457 tnapi->prodring.rx_jmb_prod_idx,
6458 tnapi->prodring.rx_jmb_cons_idx);
6462 /* This is called whenever we suspect that the system chipset is re-
6463 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6464 * is bogus tx completions. We try to recover by setting the
6465 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6468 static void tg3_tx_recover(struct tg3 *tp)
6470 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6471 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6473 netdev_warn(tp->dev,
6474 "The system may be re-ordering memory-mapped I/O "
6475 "cycles to the network device, attempting to recover. "
6476 "Please report the problem to the driver maintainer "
6477 "and include system chipset information.\n");
6479 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6482 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6484 /* Tell compiler to fetch tx indices from memory. */
6486 return tnapi->tx_pending -
6487 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6490 /* Tigon3 never reports partial packet sends. So we do not
6491 * need special logic to handle SKBs that have not had all
6492 * of their frags sent yet, like SunGEM does.
6494 static void tg3_tx(struct tg3_napi *tnapi)
6496 struct tg3 *tp = tnapi->tp;
6497 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6498 u32 sw_idx = tnapi->tx_cons;
6499 struct netdev_queue *txq;
6500 int index = tnapi - tp->napi;
6501 unsigned int pkts_compl = 0, bytes_compl = 0;
6503 if (tg3_flag(tp, ENABLE_TSS))
6506 txq = netdev_get_tx_queue(tp->dev, index);
6508 while (sw_idx != hw_idx) {
6509 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6510 struct sk_buff *skb = ri->skb;
6513 if (unlikely(skb == NULL)) {
6518 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6519 struct skb_shared_hwtstamps timestamp;
6520 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6521 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6523 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6525 skb_tstamp_tx(skb, ×tamp);
6528 pci_unmap_single(tp->pdev,
6529 dma_unmap_addr(ri, mapping),
6535 while (ri->fragmented) {
6536 ri->fragmented = false;
6537 sw_idx = NEXT_TX(sw_idx);
6538 ri = &tnapi->tx_buffers[sw_idx];
6541 sw_idx = NEXT_TX(sw_idx);
6543 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6544 ri = &tnapi->tx_buffers[sw_idx];
6545 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6548 pci_unmap_page(tp->pdev,
6549 dma_unmap_addr(ri, mapping),
6550 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6553 while (ri->fragmented) {
6554 ri->fragmented = false;
6555 sw_idx = NEXT_TX(sw_idx);
6556 ri = &tnapi->tx_buffers[sw_idx];
6559 sw_idx = NEXT_TX(sw_idx);
6563 bytes_compl += skb->len;
6567 if (unlikely(tx_bug)) {
6573 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6575 tnapi->tx_cons = sw_idx;
6577 /* Need to make the tx_cons update visible to tg3_start_xmit()
6578 * before checking for netif_queue_stopped(). Without the
6579 * memory barrier, there is a small possibility that tg3_start_xmit()
6580 * will miss it and cause the queue to be stopped forever.
6584 if (unlikely(netif_tx_queue_stopped(txq) &&
6585 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6586 __netif_tx_lock(txq, smp_processor_id());
6587 if (netif_tx_queue_stopped(txq) &&
6588 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6589 netif_tx_wake_queue(txq);
6590 __netif_tx_unlock(txq);
6594 static void tg3_frag_free(bool is_frag, void *data)
6597 put_page(virt_to_head_page(data));
6602 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6604 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6605 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6610 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6611 map_sz, PCI_DMA_FROMDEVICE);
6612 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6617 /* Returns size of skb allocated or < 0 on error.
6619 * We only need to fill in the address because the other members
6620 * of the RX descriptor are invariant, see tg3_init_rings.
6622 * Note the purposeful assymetry of cpu vs. chip accesses. For
6623 * posting buffers we only dirty the first cache line of the RX
6624 * descriptor (containing the address). Whereas for the RX status
6625 * buffers the cpu only reads the last cacheline of the RX descriptor
6626 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6628 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6629 u32 opaque_key, u32 dest_idx_unmasked,
6630 unsigned int *frag_size)
6632 struct tg3_rx_buffer_desc *desc;
6633 struct ring_info *map;
6636 int skb_size, data_size, dest_idx;
6638 switch (opaque_key) {
6639 case RXD_OPAQUE_RING_STD:
6640 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6641 desc = &tpr->rx_std[dest_idx];
6642 map = &tpr->rx_std_buffers[dest_idx];
6643 data_size = tp->rx_pkt_map_sz;
6646 case RXD_OPAQUE_RING_JUMBO:
6647 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6648 desc = &tpr->rx_jmb[dest_idx].std;
6649 map = &tpr->rx_jmb_buffers[dest_idx];
6650 data_size = TG3_RX_JMB_MAP_SZ;
6657 /* Do not overwrite any of the map or rp information
6658 * until we are sure we can commit to a new buffer.
6660 * Callers depend upon this behavior and assume that
6661 * we leave everything unchanged if we fail.
6663 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6664 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6665 if (skb_size <= PAGE_SIZE) {
6666 data = netdev_alloc_frag(skb_size);
6667 *frag_size = skb_size;
6669 data = kmalloc(skb_size, GFP_ATOMIC);
6675 mapping = pci_map_single(tp->pdev,
6676 data + TG3_RX_OFFSET(tp),
6678 PCI_DMA_FROMDEVICE);
6679 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6680 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6685 dma_unmap_addr_set(map, mapping, mapping);
6687 desc->addr_hi = ((u64)mapping >> 32);
6688 desc->addr_lo = ((u64)mapping & 0xffffffff);
6693 /* We only need to move over in the address because the other
6694 * members of the RX descriptor are invariant. See notes above
6695 * tg3_alloc_rx_data for full details.
6697 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6698 struct tg3_rx_prodring_set *dpr,
6699 u32 opaque_key, int src_idx,
6700 u32 dest_idx_unmasked)
6702 struct tg3 *tp = tnapi->tp;
6703 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6704 struct ring_info *src_map, *dest_map;
6705 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6708 switch (opaque_key) {
6709 case RXD_OPAQUE_RING_STD:
6710 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6711 dest_desc = &dpr->rx_std[dest_idx];
6712 dest_map = &dpr->rx_std_buffers[dest_idx];
6713 src_desc = &spr->rx_std[src_idx];
6714 src_map = &spr->rx_std_buffers[src_idx];
6717 case RXD_OPAQUE_RING_JUMBO:
6718 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6719 dest_desc = &dpr->rx_jmb[dest_idx].std;
6720 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6721 src_desc = &spr->rx_jmb[src_idx].std;
6722 src_map = &spr->rx_jmb_buffers[src_idx];
6729 dest_map->data = src_map->data;
6730 dma_unmap_addr_set(dest_map, mapping,
6731 dma_unmap_addr(src_map, mapping));
6732 dest_desc->addr_hi = src_desc->addr_hi;
6733 dest_desc->addr_lo = src_desc->addr_lo;
6735 /* Ensure that the update to the skb happens after the physical
6736 * addresses have been transferred to the new BD location.
6740 src_map->data = NULL;
6743 /* The RX ring scheme is composed of multiple rings which post fresh
6744 * buffers to the chip, and one special ring the chip uses to report
6745 * status back to the host.
6747 * The special ring reports the status of received packets to the
6748 * host. The chip does not write into the original descriptor the
6749 * RX buffer was obtained from. The chip simply takes the original
6750 * descriptor as provided by the host, updates the status and length
6751 * field, then writes this into the next status ring entry.
6753 * Each ring the host uses to post buffers to the chip is described
6754 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6755 * it is first placed into the on-chip ram. When the packet's length
6756 * is known, it walks down the TG3_BDINFO entries to select the ring.
6757 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6758 * which is within the range of the new packet's length is chosen.
6760 * The "separate ring for rx status" scheme may sound queer, but it makes
6761 * sense from a cache coherency perspective. If only the host writes
6762 * to the buffer post rings, and only the chip writes to the rx status
6763 * rings, then cache lines never move beyond shared-modified state.
6764 * If both the host and chip were to write into the same ring, cache line
6765 * eviction could occur since both entities want it in an exclusive state.
6767 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6769 struct tg3 *tp = tnapi->tp;
6770 u32 work_mask, rx_std_posted = 0;
6771 u32 std_prod_idx, jmb_prod_idx;
6772 u32 sw_idx = tnapi->rx_rcb_ptr;
6775 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6777 hw_idx = *(tnapi->rx_rcb_prod_idx);
6779 * We need to order the read of hw_idx and the read of
6780 * the opaque cookie.
6785 std_prod_idx = tpr->rx_std_prod_idx;
6786 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6787 while (sw_idx != hw_idx && budget > 0) {
6788 struct ring_info *ri;
6789 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6791 struct sk_buff *skb;
6792 dma_addr_t dma_addr;
6793 u32 opaque_key, desc_idx, *post_ptr;
6797 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6798 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6799 if (opaque_key == RXD_OPAQUE_RING_STD) {
6800 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6801 dma_addr = dma_unmap_addr(ri, mapping);
6803 post_ptr = &std_prod_idx;
6805 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6806 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6807 dma_addr = dma_unmap_addr(ri, mapping);
6809 post_ptr = &jmb_prod_idx;
6811 goto next_pkt_nopost;
6813 work_mask |= opaque_key;
6815 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6816 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6818 tg3_recycle_rx(tnapi, tpr, opaque_key,
6819 desc_idx, *post_ptr);
6821 /* Other statistics kept track of by card. */
6826 prefetch(data + TG3_RX_OFFSET(tp));
6827 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6830 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6831 RXD_FLAG_PTPSTAT_PTPV1 ||
6832 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6833 RXD_FLAG_PTPSTAT_PTPV2) {
6834 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6835 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6838 if (len > TG3_RX_COPY_THRESH(tp)) {
6840 unsigned int frag_size;
6842 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6843 *post_ptr, &frag_size);
6847 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6848 PCI_DMA_FROMDEVICE);
6850 skb = build_skb(data, frag_size);
6852 tg3_frag_free(frag_size != 0, data);
6853 goto drop_it_no_recycle;
6855 skb_reserve(skb, TG3_RX_OFFSET(tp));
6856 /* Ensure that the update to the data happens
6857 * after the usage of the old DMA mapping.
6864 tg3_recycle_rx(tnapi, tpr, opaque_key,
6865 desc_idx, *post_ptr);
6867 skb = netdev_alloc_skb(tp->dev,
6868 len + TG3_RAW_IP_ALIGN);
6870 goto drop_it_no_recycle;
6872 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6873 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6875 data + TG3_RX_OFFSET(tp),
6877 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6882 tg3_hwclock_to_timestamp(tp, tstamp,
6883 skb_hwtstamps(skb));
6885 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6886 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6887 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6888 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6889 skb->ip_summed = CHECKSUM_UNNECESSARY;
6891 skb_checksum_none_assert(skb);
6893 skb->protocol = eth_type_trans(skb, tp->dev);
6895 if (len > (tp->dev->mtu + ETH_HLEN) &&
6896 skb->protocol != htons(ETH_P_8021Q)) {
6898 goto drop_it_no_recycle;
6901 if (desc->type_flags & RXD_FLAG_VLAN &&
6902 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6903 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6904 desc->err_vlan & RXD_VLAN_MASK);
6906 napi_gro_receive(&tnapi->napi, skb);
6914 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6915 tpr->rx_std_prod_idx = std_prod_idx &
6916 tp->rx_std_ring_mask;
6917 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6918 tpr->rx_std_prod_idx);
6919 work_mask &= ~RXD_OPAQUE_RING_STD;
6924 sw_idx &= tp->rx_ret_ring_mask;
6926 /* Refresh hw_idx to see if there is new work */
6927 if (sw_idx == hw_idx) {
6928 hw_idx = *(tnapi->rx_rcb_prod_idx);
6933 /* ACK the status ring. */
6934 tnapi->rx_rcb_ptr = sw_idx;
6935 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6937 /* Refill RX ring(s). */
6938 if (!tg3_flag(tp, ENABLE_RSS)) {
6939 /* Sync BD data before updating mailbox */
6942 if (work_mask & RXD_OPAQUE_RING_STD) {
6943 tpr->rx_std_prod_idx = std_prod_idx &
6944 tp->rx_std_ring_mask;
6945 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6946 tpr->rx_std_prod_idx);
6948 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6949 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6950 tp->rx_jmb_ring_mask;
6951 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6952 tpr->rx_jmb_prod_idx);
6955 } else if (work_mask) {
6956 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6957 * updated before the producer indices can be updated.
6961 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6962 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6964 if (tnapi != &tp->napi[1]) {
6965 tp->rx_refill = true;
6966 napi_schedule(&tp->napi[1].napi);
6973 static void tg3_poll_link(struct tg3 *tp)
6975 /* handle link change and other phy events */
6976 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6977 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6979 if (sblk->status & SD_STATUS_LINK_CHG) {
6980 sblk->status = SD_STATUS_UPDATED |
6981 (sblk->status & ~SD_STATUS_LINK_CHG);
6982 spin_lock(&tp->lock);
6983 if (tg3_flag(tp, USE_PHYLIB)) {
6985 (MAC_STATUS_SYNC_CHANGED |
6986 MAC_STATUS_CFG_CHANGED |
6987 MAC_STATUS_MI_COMPLETION |
6988 MAC_STATUS_LNKSTATE_CHANGED));
6991 tg3_setup_phy(tp, false);
6992 spin_unlock(&tp->lock);
6997 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6998 struct tg3_rx_prodring_set *dpr,
6999 struct tg3_rx_prodring_set *spr)
7001 u32 si, di, cpycnt, src_prod_idx;
7005 src_prod_idx = spr->rx_std_prod_idx;
7007 /* Make sure updates to the rx_std_buffers[] entries and the
7008 * standard producer index are seen in the correct order.
7012 if (spr->rx_std_cons_idx == src_prod_idx)
7015 if (spr->rx_std_cons_idx < src_prod_idx)
7016 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7018 cpycnt = tp->rx_std_ring_mask + 1 -
7019 spr->rx_std_cons_idx;
7021 cpycnt = min(cpycnt,
7022 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7024 si = spr->rx_std_cons_idx;
7025 di = dpr->rx_std_prod_idx;
7027 for (i = di; i < di + cpycnt; i++) {
7028 if (dpr->rx_std_buffers[i].data) {
7038 /* Ensure that updates to the rx_std_buffers ring and the
7039 * shadowed hardware producer ring from tg3_recycle_skb() are
7040 * ordered correctly WRT the skb check above.
7044 memcpy(&dpr->rx_std_buffers[di],
7045 &spr->rx_std_buffers[si],
7046 cpycnt * sizeof(struct ring_info));
7048 for (i = 0; i < cpycnt; i++, di++, si++) {
7049 struct tg3_rx_buffer_desc *sbd, *dbd;
7050 sbd = &spr->rx_std[si];
7051 dbd = &dpr->rx_std[di];
7052 dbd->addr_hi = sbd->addr_hi;
7053 dbd->addr_lo = sbd->addr_lo;
7056 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7057 tp->rx_std_ring_mask;
7058 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7059 tp->rx_std_ring_mask;
7063 src_prod_idx = spr->rx_jmb_prod_idx;
7065 /* Make sure updates to the rx_jmb_buffers[] entries and
7066 * the jumbo producer index are seen in the correct order.
7070 if (spr->rx_jmb_cons_idx == src_prod_idx)
7073 if (spr->rx_jmb_cons_idx < src_prod_idx)
7074 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7076 cpycnt = tp->rx_jmb_ring_mask + 1 -
7077 spr->rx_jmb_cons_idx;
7079 cpycnt = min(cpycnt,
7080 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7082 si = spr->rx_jmb_cons_idx;
7083 di = dpr->rx_jmb_prod_idx;
7085 for (i = di; i < di + cpycnt; i++) {
7086 if (dpr->rx_jmb_buffers[i].data) {
7096 /* Ensure that updates to the rx_jmb_buffers ring and the
7097 * shadowed hardware producer ring from tg3_recycle_skb() are
7098 * ordered correctly WRT the skb check above.
7102 memcpy(&dpr->rx_jmb_buffers[di],
7103 &spr->rx_jmb_buffers[si],
7104 cpycnt * sizeof(struct ring_info));
7106 for (i = 0; i < cpycnt; i++, di++, si++) {
7107 struct tg3_rx_buffer_desc *sbd, *dbd;
7108 sbd = &spr->rx_jmb[si].std;
7109 dbd = &dpr->rx_jmb[di].std;
7110 dbd->addr_hi = sbd->addr_hi;
7111 dbd->addr_lo = sbd->addr_lo;
7114 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7115 tp->rx_jmb_ring_mask;
7116 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7117 tp->rx_jmb_ring_mask;
7123 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7125 struct tg3 *tp = tnapi->tp;
7127 /* run TX completion thread */
7128 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7130 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7134 if (!tnapi->rx_rcb_prod_idx)
7137 /* run RX thread, within the bounds set by NAPI.
7138 * All RX "locking" is done by ensuring outside
7139 * code synchronizes with tg3->napi.poll()
7141 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7142 work_done += tg3_rx(tnapi, budget - work_done);
7144 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7145 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7147 u32 std_prod_idx = dpr->rx_std_prod_idx;
7148 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7150 tp->rx_refill = false;
7151 for (i = 1; i <= tp->rxq_cnt; i++)
7152 err |= tg3_rx_prodring_xfer(tp, dpr,
7153 &tp->napi[i].prodring);
7157 if (std_prod_idx != dpr->rx_std_prod_idx)
7158 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7159 dpr->rx_std_prod_idx);
7161 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7162 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7163 dpr->rx_jmb_prod_idx);
7168 tw32_f(HOSTCC_MODE, tp->coal_now);
7174 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7176 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7177 schedule_work(&tp->reset_task);
7180 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7182 cancel_work_sync(&tp->reset_task);
7183 tg3_flag_clear(tp, RESET_TASK_PENDING);
7184 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7187 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7189 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7190 struct tg3 *tp = tnapi->tp;
7192 struct tg3_hw_status *sblk = tnapi->hw_status;
7195 work_done = tg3_poll_work(tnapi, work_done, budget);
7197 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7200 if (unlikely(work_done >= budget))
7203 /* tp->last_tag is used in tg3_int_reenable() below
7204 * to tell the hw how much work has been processed,
7205 * so we must read it before checking for more work.
7207 tnapi->last_tag = sblk->status_tag;
7208 tnapi->last_irq_tag = tnapi->last_tag;
7211 /* check for RX/TX work to do */
7212 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7213 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7215 /* This test here is not race free, but will reduce
7216 * the number of interrupts by looping again.
7218 if (tnapi == &tp->napi[1] && tp->rx_refill)
7221 napi_complete(napi);
7222 /* Reenable interrupts. */
7223 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7225 /* This test here is synchronized by napi_schedule()
7226 * and napi_complete() to close the race condition.
7228 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7229 tw32(HOSTCC_MODE, tp->coalesce_mode |
7230 HOSTCC_MODE_ENABLE |
7241 /* work_done is guaranteed to be less than budget. */
7242 napi_complete(napi);
7243 tg3_reset_task_schedule(tp);
7247 static void tg3_process_error(struct tg3 *tp)
7250 bool real_error = false;
7252 if (tg3_flag(tp, ERROR_PROCESSED))
7255 /* Check Flow Attention register */
7256 val = tr32(HOSTCC_FLOW_ATTN);
7257 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7258 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7262 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7263 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7267 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7268 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7277 tg3_flag_set(tp, ERROR_PROCESSED);
7278 tg3_reset_task_schedule(tp);
7281 static int tg3_poll(struct napi_struct *napi, int budget)
7283 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7284 struct tg3 *tp = tnapi->tp;
7286 struct tg3_hw_status *sblk = tnapi->hw_status;
7289 if (sblk->status & SD_STATUS_ERROR)
7290 tg3_process_error(tp);
7294 work_done = tg3_poll_work(tnapi, work_done, budget);
7296 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7299 if (unlikely(work_done >= budget))
7302 if (tg3_flag(tp, TAGGED_STATUS)) {
7303 /* tp->last_tag is used in tg3_int_reenable() below
7304 * to tell the hw how much work has been processed,
7305 * so we must read it before checking for more work.
7307 tnapi->last_tag = sblk->status_tag;
7308 tnapi->last_irq_tag = tnapi->last_tag;
7311 sblk->status &= ~SD_STATUS_UPDATED;
7313 if (likely(!tg3_has_work(tnapi))) {
7314 napi_complete(napi);
7315 tg3_int_reenable(tnapi);
7323 /* work_done is guaranteed to be less than budget. */
7324 napi_complete(napi);
7325 tg3_reset_task_schedule(tp);
7329 static void tg3_napi_disable(struct tg3 *tp)
7333 for (i = tp->irq_cnt - 1; i >= 0; i--)
7334 napi_disable(&tp->napi[i].napi);
7337 static void tg3_napi_enable(struct tg3 *tp)
7341 for (i = 0; i < tp->irq_cnt; i++)
7342 napi_enable(&tp->napi[i].napi);
7345 static void tg3_napi_init(struct tg3 *tp)
7349 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7350 for (i = 1; i < tp->irq_cnt; i++)
7351 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7354 static void tg3_napi_fini(struct tg3 *tp)
7358 for (i = 0; i < tp->irq_cnt; i++)
7359 netif_napi_del(&tp->napi[i].napi);
7362 static inline void tg3_netif_stop(struct tg3 *tp)
7364 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7365 tg3_napi_disable(tp);
7366 netif_carrier_off(tp->dev);
7367 netif_tx_disable(tp->dev);
7370 /* tp->lock must be held */
7371 static inline void tg3_netif_start(struct tg3 *tp)
7375 /* NOTE: unconditional netif_tx_wake_all_queues is only
7376 * appropriate so long as all callers are assured to
7377 * have free tx slots (such as after tg3_init_hw)
7379 netif_tx_wake_all_queues(tp->dev);
7382 netif_carrier_on(tp->dev);
7384 tg3_napi_enable(tp);
7385 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7386 tg3_enable_ints(tp);
7389 static void tg3_irq_quiesce(struct tg3 *tp)
7393 BUG_ON(tp->irq_sync);
7398 for (i = 0; i < tp->irq_cnt; i++)
7399 synchronize_irq(tp->napi[i].irq_vec);
7402 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7403 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7404 * with as well. Most of the time, this is not necessary except when
7405 * shutting down the device.
7407 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7409 spin_lock_bh(&tp->lock);
7411 tg3_irq_quiesce(tp);
7414 static inline void tg3_full_unlock(struct tg3 *tp)
7416 spin_unlock_bh(&tp->lock);
7419 /* One-shot MSI handler - Chip automatically disables interrupt
7420 * after sending MSI so driver doesn't have to do it.
7422 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7424 struct tg3_napi *tnapi = dev_id;
7425 struct tg3 *tp = tnapi->tp;
7427 prefetch(tnapi->hw_status);
7429 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7431 if (likely(!tg3_irq_sync(tp)))
7432 napi_schedule(&tnapi->napi);
7437 /* MSI ISR - No need to check for interrupt sharing and no need to
7438 * flush status block and interrupt mailbox. PCI ordering rules
7439 * guarantee that MSI will arrive after the status block.
7441 static irqreturn_t tg3_msi(int irq, void *dev_id)
7443 struct tg3_napi *tnapi = dev_id;
7444 struct tg3 *tp = tnapi->tp;
7446 prefetch(tnapi->hw_status);
7448 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7450 * Writing any value to intr-mbox-0 clears PCI INTA# and
7451 * chip-internal interrupt pending events.
7452 * Writing non-zero to intr-mbox-0 additional tells the
7453 * NIC to stop sending us irqs, engaging "in-intr-handler"
7456 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7457 if (likely(!tg3_irq_sync(tp)))
7458 napi_schedule(&tnapi->napi);
7460 return IRQ_RETVAL(1);
7463 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7465 struct tg3_napi *tnapi = dev_id;
7466 struct tg3 *tp = tnapi->tp;
7467 struct tg3_hw_status *sblk = tnapi->hw_status;
7468 unsigned int handled = 1;
7470 /* In INTx mode, it is possible for the interrupt to arrive at
7471 * the CPU before the status block posted prior to the interrupt.
7472 * Reading the PCI State register will confirm whether the
7473 * interrupt is ours and will flush the status block.
7475 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7476 if (tg3_flag(tp, CHIP_RESETTING) ||
7477 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7484 * Writing any value to intr-mbox-0 clears PCI INTA# and
7485 * chip-internal interrupt pending events.
7486 * Writing non-zero to intr-mbox-0 additional tells the
7487 * NIC to stop sending us irqs, engaging "in-intr-handler"
7490 * Flush the mailbox to de-assert the IRQ immediately to prevent
7491 * spurious interrupts. The flush impacts performance but
7492 * excessive spurious interrupts can be worse in some cases.
7494 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7495 if (tg3_irq_sync(tp))
7497 sblk->status &= ~SD_STATUS_UPDATED;
7498 if (likely(tg3_has_work(tnapi))) {
7499 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7500 napi_schedule(&tnapi->napi);
7502 /* No work, shared interrupt perhaps? re-enable
7503 * interrupts, and flush that PCI write
7505 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7509 return IRQ_RETVAL(handled);
7512 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7514 struct tg3_napi *tnapi = dev_id;
7515 struct tg3 *tp = tnapi->tp;
7516 struct tg3_hw_status *sblk = tnapi->hw_status;
7517 unsigned int handled = 1;
7519 /* In INTx mode, it is possible for the interrupt to arrive at
7520 * the CPU before the status block posted prior to the interrupt.
7521 * Reading the PCI State register will confirm whether the
7522 * interrupt is ours and will flush the status block.
7524 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7525 if (tg3_flag(tp, CHIP_RESETTING) ||
7526 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7533 * writing any value to intr-mbox-0 clears PCI INTA# and
7534 * chip-internal interrupt pending events.
7535 * writing non-zero to intr-mbox-0 additional tells the
7536 * NIC to stop sending us irqs, engaging "in-intr-handler"
7539 * Flush the mailbox to de-assert the IRQ immediately to prevent
7540 * spurious interrupts. The flush impacts performance but
7541 * excessive spurious interrupts can be worse in some cases.
7543 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7546 * In a shared interrupt configuration, sometimes other devices'
7547 * interrupts will scream. We record the current status tag here
7548 * so that the above check can report that the screaming interrupts
7549 * are unhandled. Eventually they will be silenced.
7551 tnapi->last_irq_tag = sblk->status_tag;
7553 if (tg3_irq_sync(tp))
7556 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7558 napi_schedule(&tnapi->napi);
7561 return IRQ_RETVAL(handled);
7564 /* ISR for interrupt test */
7565 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7567 struct tg3_napi *tnapi = dev_id;
7568 struct tg3 *tp = tnapi->tp;
7569 struct tg3_hw_status *sblk = tnapi->hw_status;
7571 if ((sblk->status & SD_STATUS_UPDATED) ||
7572 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7573 tg3_disable_ints(tp);
7574 return IRQ_RETVAL(1);
7576 return IRQ_RETVAL(0);
7579 #ifdef CONFIG_NET_POLL_CONTROLLER
7580 static void tg3_poll_controller(struct net_device *dev)
7583 struct tg3 *tp = netdev_priv(dev);
7585 if (tg3_irq_sync(tp))
7588 for (i = 0; i < tp->irq_cnt; i++)
7589 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7593 static void tg3_tx_timeout(struct net_device *dev)
7595 struct tg3 *tp = netdev_priv(dev);
7597 if (netif_msg_tx_err(tp)) {
7598 netdev_err(dev, "transmit timed out, resetting\n");
7602 tg3_reset_task_schedule(tp);
7605 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7606 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7608 u32 base = (u32) mapping & 0xffffffff;
7610 return (base > 0xffffdcc0) && (base + len + 8 < base);
7613 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7614 * of any 4GB boundaries: 4G, 8G, etc
7616 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7619 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7620 u32 base = (u32) mapping & 0xffffffff;
7622 return ((base + len + (mss & 0x3fff)) < base);
7627 /* Test for DMA addresses > 40-bit */
7628 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7631 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7632 if (tg3_flag(tp, 40BIT_DMA_BUG))
7633 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7640 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7641 dma_addr_t mapping, u32 len, u32 flags,
7644 txbd->addr_hi = ((u64) mapping >> 32);
7645 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7646 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7647 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7650 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7651 dma_addr_t map, u32 len, u32 flags,
7654 struct tg3 *tp = tnapi->tp;
7657 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7660 if (tg3_4g_overflow_test(map, len))
7663 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7666 if (tg3_40bit_overflow_test(tp, map, len))
7669 if (tp->dma_limit) {
7670 u32 prvidx = *entry;
7671 u32 tmp_flag = flags & ~TXD_FLAG_END;
7672 while (len > tp->dma_limit && *budget) {
7673 u32 frag_len = tp->dma_limit;
7674 len -= tp->dma_limit;
7676 /* Avoid the 8byte DMA problem */
7678 len += tp->dma_limit / 2;
7679 frag_len = tp->dma_limit / 2;
7682 tnapi->tx_buffers[*entry].fragmented = true;
7684 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7685 frag_len, tmp_flag, mss, vlan);
7688 *entry = NEXT_TX(*entry);
7695 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7696 len, flags, mss, vlan);
7698 *entry = NEXT_TX(*entry);
7701 tnapi->tx_buffers[prvidx].fragmented = false;
7705 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7706 len, flags, mss, vlan);
7707 *entry = NEXT_TX(*entry);
7713 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7716 struct sk_buff *skb;
7717 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7722 pci_unmap_single(tnapi->tp->pdev,
7723 dma_unmap_addr(txb, mapping),
7727 while (txb->fragmented) {
7728 txb->fragmented = false;
7729 entry = NEXT_TX(entry);
7730 txb = &tnapi->tx_buffers[entry];
7733 for (i = 0; i <= last; i++) {
7734 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7736 entry = NEXT_TX(entry);
7737 txb = &tnapi->tx_buffers[entry];
7739 pci_unmap_page(tnapi->tp->pdev,
7740 dma_unmap_addr(txb, mapping),
7741 skb_frag_size(frag), PCI_DMA_TODEVICE);
7743 while (txb->fragmented) {
7744 txb->fragmented = false;
7745 entry = NEXT_TX(entry);
7746 txb = &tnapi->tx_buffers[entry];
7751 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7752 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7753 struct sk_buff **pskb,
7754 u32 *entry, u32 *budget,
7755 u32 base_flags, u32 mss, u32 vlan)
7757 struct tg3 *tp = tnapi->tp;
7758 struct sk_buff *new_skb, *skb = *pskb;
7759 dma_addr_t new_addr = 0;
7762 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7763 new_skb = skb_copy(skb, GFP_ATOMIC);
7765 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7767 new_skb = skb_copy_expand(skb,
7768 skb_headroom(skb) + more_headroom,
7769 skb_tailroom(skb), GFP_ATOMIC);
7775 /* New SKB is guaranteed to be linear. */
7776 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7778 /* Make sure the mapping succeeded */
7779 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7780 dev_kfree_skb(new_skb);
7783 u32 save_entry = *entry;
7785 base_flags |= TXD_FLAG_END;
7787 tnapi->tx_buffers[*entry].skb = new_skb;
7788 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7791 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7792 new_skb->len, base_flags,
7794 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7795 dev_kfree_skb(new_skb);
7806 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7808 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7809 * TSO header is greater than 80 bytes.
7811 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7813 struct sk_buff *segs, *nskb;
7814 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7816 /* Estimate the number of fragments in the worst case */
7817 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7818 netif_stop_queue(tp->dev);
7820 /* netif_tx_stop_queue() must be done before checking
7821 * checking tx index in tg3_tx_avail() below, because in
7822 * tg3_tx(), we update tx index before checking for
7823 * netif_tx_queue_stopped().
7826 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7827 return NETDEV_TX_BUSY;
7829 netif_wake_queue(tp->dev);
7832 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7834 goto tg3_tso_bug_end;
7840 tg3_start_xmit(nskb, tp->dev);
7846 return NETDEV_TX_OK;
7849 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7850 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7852 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7854 struct tg3 *tp = netdev_priv(dev);
7855 u32 len, entry, base_flags, mss, vlan = 0;
7857 int i = -1, would_hit_hwbug;
7859 struct tg3_napi *tnapi;
7860 struct netdev_queue *txq;
7863 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7864 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7865 if (tg3_flag(tp, ENABLE_TSS))
7868 budget = tg3_tx_avail(tnapi);
7870 /* We are running in BH disabled context with netif_tx_lock
7871 * and TX reclaim runs via tp->napi.poll inside of a software
7872 * interrupt. Furthermore, IRQ processing runs lockless so we have
7873 * no IRQ context deadlocks to worry about either. Rejoice!
7875 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7876 if (!netif_tx_queue_stopped(txq)) {
7877 netif_tx_stop_queue(txq);
7879 /* This is a hard error, log it. */
7881 "BUG! Tx Ring full when queue awake!\n");
7883 return NETDEV_TX_BUSY;
7886 entry = tnapi->tx_prod;
7888 if (skb->ip_summed == CHECKSUM_PARTIAL)
7889 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7891 mss = skb_shinfo(skb)->gso_size;
7894 u32 tcp_opt_len, hdr_len;
7896 if (skb_header_cloned(skb) &&
7897 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7901 tcp_opt_len = tcp_optlen(skb);
7903 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7905 if (!skb_is_gso_v6(skb)) {
7907 iph->tot_len = htons(mss + hdr_len);
7910 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7911 tg3_flag(tp, TSO_BUG))
7912 return tg3_tso_bug(tp, skb);
7914 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7915 TXD_FLAG_CPU_POST_DMA);
7917 if (tg3_flag(tp, HW_TSO_1) ||
7918 tg3_flag(tp, HW_TSO_2) ||
7919 tg3_flag(tp, HW_TSO_3)) {
7920 tcp_hdr(skb)->check = 0;
7921 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7923 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7928 if (tg3_flag(tp, HW_TSO_3)) {
7929 mss |= (hdr_len & 0xc) << 12;
7931 base_flags |= 0x00000010;
7932 base_flags |= (hdr_len & 0x3e0) << 5;
7933 } else if (tg3_flag(tp, HW_TSO_2))
7934 mss |= hdr_len << 9;
7935 else if (tg3_flag(tp, HW_TSO_1) ||
7936 tg3_asic_rev(tp) == ASIC_REV_5705) {
7937 if (tcp_opt_len || iph->ihl > 5) {
7940 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7941 mss |= (tsflags << 11);
7944 if (tcp_opt_len || iph->ihl > 5) {
7947 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7948 base_flags |= tsflags << 12;
7953 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7954 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7955 base_flags |= TXD_FLAG_JMB_PKT;
7957 if (vlan_tx_tag_present(skb)) {
7958 base_flags |= TXD_FLAG_VLAN;
7959 vlan = vlan_tx_tag_get(skb);
7962 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7963 tg3_flag(tp, TX_TSTAMP_EN)) {
7964 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7965 base_flags |= TXD_FLAG_HWTSTAMP;
7968 len = skb_headlen(skb);
7970 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7971 if (pci_dma_mapping_error(tp->pdev, mapping))
7975 tnapi->tx_buffers[entry].skb = skb;
7976 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7978 would_hit_hwbug = 0;
7980 if (tg3_flag(tp, 5701_DMA_BUG))
7981 would_hit_hwbug = 1;
7983 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7984 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7986 would_hit_hwbug = 1;
7987 } else if (skb_shinfo(skb)->nr_frags > 0) {
7990 if (!tg3_flag(tp, HW_TSO_1) &&
7991 !tg3_flag(tp, HW_TSO_2) &&
7992 !tg3_flag(tp, HW_TSO_3))
7995 /* Now loop through additional data
7996 * fragments, and queue them.
7998 last = skb_shinfo(skb)->nr_frags - 1;
7999 for (i = 0; i <= last; i++) {
8000 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8002 len = skb_frag_size(frag);
8003 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8004 len, DMA_TO_DEVICE);
8006 tnapi->tx_buffers[entry].skb = NULL;
8007 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8009 if (dma_mapping_error(&tp->pdev->dev, mapping))
8013 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8015 ((i == last) ? TXD_FLAG_END : 0),
8017 would_hit_hwbug = 1;
8023 if (would_hit_hwbug) {
8024 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8026 /* If the workaround fails due to memory/mapping
8027 * failure, silently drop this packet.
8029 entry = tnapi->tx_prod;
8030 budget = tg3_tx_avail(tnapi);
8031 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8032 base_flags, mss, vlan))
8036 skb_tx_timestamp(skb);
8037 netdev_tx_sent_queue(txq, skb->len);
8039 /* Sync BD data before updating mailbox */
8042 /* Packets are ready, update Tx producer idx local and on card. */
8043 tw32_tx_mbox(tnapi->prodmbox, entry);
8045 tnapi->tx_prod = entry;
8046 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8047 netif_tx_stop_queue(txq);
8049 /* netif_tx_stop_queue() must be done before checking
8050 * checking tx index in tg3_tx_avail() below, because in
8051 * tg3_tx(), we update tx index before checking for
8052 * netif_tx_queue_stopped().
8055 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8056 netif_tx_wake_queue(txq);
8060 return NETDEV_TX_OK;
8063 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8064 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8069 return NETDEV_TX_OK;
8072 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8075 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8076 MAC_MODE_PORT_MODE_MASK);
8078 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8080 if (!tg3_flag(tp, 5705_PLUS))
8081 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8083 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8084 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8086 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8088 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8090 if (tg3_flag(tp, 5705_PLUS) ||
8091 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8092 tg3_asic_rev(tp) == ASIC_REV_5700)
8093 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8096 tw32(MAC_MODE, tp->mac_mode);
8100 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8102 u32 val, bmcr, mac_mode, ptest = 0;
8104 tg3_phy_toggle_apd(tp, false);
8105 tg3_phy_toggle_automdix(tp, false);
8107 if (extlpbk && tg3_phy_set_extloopbk(tp))
8110 bmcr = BMCR_FULLDPLX;
8115 bmcr |= BMCR_SPEED100;
8119 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8121 bmcr |= BMCR_SPEED100;
8124 bmcr |= BMCR_SPEED1000;
8129 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8130 tg3_readphy(tp, MII_CTRL1000, &val);
8131 val |= CTL1000_AS_MASTER |
8132 CTL1000_ENABLE_MASTER;
8133 tg3_writephy(tp, MII_CTRL1000, val);
8135 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8136 MII_TG3_FET_PTEST_TRIM_2;
8137 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8140 bmcr |= BMCR_LOOPBACK;
8142 tg3_writephy(tp, MII_BMCR, bmcr);
8144 /* The write needs to be flushed for the FETs */
8145 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8146 tg3_readphy(tp, MII_BMCR, &bmcr);
8150 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8151 tg3_asic_rev(tp) == ASIC_REV_5785) {
8152 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8153 MII_TG3_FET_PTEST_FRC_TX_LINK |
8154 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8156 /* The write needs to be flushed for the AC131 */
8157 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8160 /* Reset to prevent losing 1st rx packet intermittently */
8161 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8162 tg3_flag(tp, 5780_CLASS)) {
8163 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8165 tw32_f(MAC_RX_MODE, tp->rx_mode);
8168 mac_mode = tp->mac_mode &
8169 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8170 if (speed == SPEED_1000)
8171 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8173 mac_mode |= MAC_MODE_PORT_MODE_MII;
8175 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8176 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8178 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8179 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8180 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8181 mac_mode |= MAC_MODE_LINK_POLARITY;
8183 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8184 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8187 tw32(MAC_MODE, mac_mode);
8193 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8195 struct tg3 *tp = netdev_priv(dev);
8197 if (features & NETIF_F_LOOPBACK) {
8198 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8201 spin_lock_bh(&tp->lock);
8202 tg3_mac_loopback(tp, true);
8203 netif_carrier_on(tp->dev);
8204 spin_unlock_bh(&tp->lock);
8205 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8207 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8210 spin_lock_bh(&tp->lock);
8211 tg3_mac_loopback(tp, false);
8212 /* Force link status check */
8213 tg3_setup_phy(tp, true);
8214 spin_unlock_bh(&tp->lock);
8215 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8219 static netdev_features_t tg3_fix_features(struct net_device *dev,
8220 netdev_features_t features)
8222 struct tg3 *tp = netdev_priv(dev);
8224 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8225 features &= ~NETIF_F_ALL_TSO;
8230 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8232 netdev_features_t changed = dev->features ^ features;
8234 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8235 tg3_set_loopback(dev, features);
8240 static void tg3_rx_prodring_free(struct tg3 *tp,
8241 struct tg3_rx_prodring_set *tpr)
8245 if (tpr != &tp->napi[0].prodring) {
8246 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8247 i = (i + 1) & tp->rx_std_ring_mask)
8248 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8251 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8252 for (i = tpr->rx_jmb_cons_idx;
8253 i != tpr->rx_jmb_prod_idx;
8254 i = (i + 1) & tp->rx_jmb_ring_mask) {
8255 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8263 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8264 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8267 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8268 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8269 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8274 /* Initialize rx rings for packet processing.
8276 * The chip has been shut down and the driver detached from
8277 * the networking, so no interrupts or new tx packets will
8278 * end up in the driver. tp->{tx,}lock are held and thus
8281 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8282 struct tg3_rx_prodring_set *tpr)
8284 u32 i, rx_pkt_dma_sz;
8286 tpr->rx_std_cons_idx = 0;
8287 tpr->rx_std_prod_idx = 0;
8288 tpr->rx_jmb_cons_idx = 0;
8289 tpr->rx_jmb_prod_idx = 0;
8291 if (tpr != &tp->napi[0].prodring) {
8292 memset(&tpr->rx_std_buffers[0], 0,
8293 TG3_RX_STD_BUFF_RING_SIZE(tp));
8294 if (tpr->rx_jmb_buffers)
8295 memset(&tpr->rx_jmb_buffers[0], 0,
8296 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8300 /* Zero out all descriptors. */
8301 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8303 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8304 if (tg3_flag(tp, 5780_CLASS) &&
8305 tp->dev->mtu > ETH_DATA_LEN)
8306 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8307 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8309 /* Initialize invariants of the rings, we only set this
8310 * stuff once. This works because the card does not
8311 * write into the rx buffer posting rings.
8313 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8314 struct tg3_rx_buffer_desc *rxd;
8316 rxd = &tpr->rx_std[i];
8317 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8318 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8319 rxd->opaque = (RXD_OPAQUE_RING_STD |
8320 (i << RXD_OPAQUE_INDEX_SHIFT));
8323 /* Now allocate fresh SKBs for each rx ring. */
8324 for (i = 0; i < tp->rx_pending; i++) {
8325 unsigned int frag_size;
8327 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8329 netdev_warn(tp->dev,
8330 "Using a smaller RX standard ring. Only "
8331 "%d out of %d buffers were allocated "
8332 "successfully\n", i, tp->rx_pending);
8340 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8343 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8345 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8348 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8349 struct tg3_rx_buffer_desc *rxd;
8351 rxd = &tpr->rx_jmb[i].std;
8352 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8353 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8355 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8356 (i << RXD_OPAQUE_INDEX_SHIFT));
8359 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8360 unsigned int frag_size;
8362 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8364 netdev_warn(tp->dev,
8365 "Using a smaller RX jumbo ring. Only %d "
8366 "out of %d buffers were allocated "
8367 "successfully\n", i, tp->rx_jumbo_pending);
8370 tp->rx_jumbo_pending = i;
8379 tg3_rx_prodring_free(tp, tpr);
8383 static void tg3_rx_prodring_fini(struct tg3 *tp,
8384 struct tg3_rx_prodring_set *tpr)
8386 kfree(tpr->rx_std_buffers);
8387 tpr->rx_std_buffers = NULL;
8388 kfree(tpr->rx_jmb_buffers);
8389 tpr->rx_jmb_buffers = NULL;
8391 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8392 tpr->rx_std, tpr->rx_std_mapping);
8396 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8397 tpr->rx_jmb, tpr->rx_jmb_mapping);
8402 static int tg3_rx_prodring_init(struct tg3 *tp,
8403 struct tg3_rx_prodring_set *tpr)
8405 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8407 if (!tpr->rx_std_buffers)
8410 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8411 TG3_RX_STD_RING_BYTES(tp),
8412 &tpr->rx_std_mapping,
8417 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8418 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8420 if (!tpr->rx_jmb_buffers)
8423 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8424 TG3_RX_JMB_RING_BYTES(tp),
8425 &tpr->rx_jmb_mapping,
8434 tg3_rx_prodring_fini(tp, tpr);
8438 /* Free up pending packets in all rx/tx rings.
8440 * The chip has been shut down and the driver detached from
8441 * the networking, so no interrupts or new tx packets will
8442 * end up in the driver. tp->{tx,}lock is not held and we are not
8443 * in an interrupt context and thus may sleep.
8445 static void tg3_free_rings(struct tg3 *tp)
8449 for (j = 0; j < tp->irq_cnt; j++) {
8450 struct tg3_napi *tnapi = &tp->napi[j];
8452 tg3_rx_prodring_free(tp, &tnapi->prodring);
8454 if (!tnapi->tx_buffers)
8457 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8458 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8463 tg3_tx_skb_unmap(tnapi, i,
8464 skb_shinfo(skb)->nr_frags - 1);
8466 dev_kfree_skb_any(skb);
8468 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8472 /* Initialize tx/rx rings for packet processing.
8474 * The chip has been shut down and the driver detached from
8475 * the networking, so no interrupts or new tx packets will
8476 * end up in the driver. tp->{tx,}lock are held and thus
8479 static int tg3_init_rings(struct tg3 *tp)
8483 /* Free up all the SKBs. */
8486 for (i = 0; i < tp->irq_cnt; i++) {
8487 struct tg3_napi *tnapi = &tp->napi[i];
8489 tnapi->last_tag = 0;
8490 tnapi->last_irq_tag = 0;
8491 tnapi->hw_status->status = 0;
8492 tnapi->hw_status->status_tag = 0;
8493 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8498 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8500 tnapi->rx_rcb_ptr = 0;
8502 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8504 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8513 static void tg3_mem_tx_release(struct tg3 *tp)
8517 for (i = 0; i < tp->irq_max; i++) {
8518 struct tg3_napi *tnapi = &tp->napi[i];
8520 if (tnapi->tx_ring) {
8521 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8522 tnapi->tx_ring, tnapi->tx_desc_mapping);
8523 tnapi->tx_ring = NULL;
8526 kfree(tnapi->tx_buffers);
8527 tnapi->tx_buffers = NULL;
8531 static int tg3_mem_tx_acquire(struct tg3 *tp)
8534 struct tg3_napi *tnapi = &tp->napi[0];
8536 /* If multivector TSS is enabled, vector 0 does not handle
8537 * tx interrupts. Don't allocate any resources for it.
8539 if (tg3_flag(tp, ENABLE_TSS))
8542 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8543 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8544 TG3_TX_RING_SIZE, GFP_KERNEL);
8545 if (!tnapi->tx_buffers)
8548 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8550 &tnapi->tx_desc_mapping,
8552 if (!tnapi->tx_ring)
8559 tg3_mem_tx_release(tp);
8563 static void tg3_mem_rx_release(struct tg3 *tp)
8567 for (i = 0; i < tp->irq_max; i++) {
8568 struct tg3_napi *tnapi = &tp->napi[i];
8570 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8575 dma_free_coherent(&tp->pdev->dev,
8576 TG3_RX_RCB_RING_BYTES(tp),
8578 tnapi->rx_rcb_mapping);
8579 tnapi->rx_rcb = NULL;
8583 static int tg3_mem_rx_acquire(struct tg3 *tp)
8585 unsigned int i, limit;
8587 limit = tp->rxq_cnt;
8589 /* If RSS is enabled, we need a (dummy) producer ring
8590 * set on vector zero. This is the true hw prodring.
8592 if (tg3_flag(tp, ENABLE_RSS))
8595 for (i = 0; i < limit; i++) {
8596 struct tg3_napi *tnapi = &tp->napi[i];
8598 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8601 /* If multivector RSS is enabled, vector 0
8602 * does not handle rx or tx interrupts.
8603 * Don't allocate any resources for it.
8605 if (!i && tg3_flag(tp, ENABLE_RSS))
8608 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8609 TG3_RX_RCB_RING_BYTES(tp),
8610 &tnapi->rx_rcb_mapping,
8619 tg3_mem_rx_release(tp);
8624 * Must not be invoked with interrupt sources disabled and
8625 * the hardware shutdown down.
8627 static void tg3_free_consistent(struct tg3 *tp)
8631 for (i = 0; i < tp->irq_cnt; i++) {
8632 struct tg3_napi *tnapi = &tp->napi[i];
8634 if (tnapi->hw_status) {
8635 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8637 tnapi->status_mapping);
8638 tnapi->hw_status = NULL;
8642 tg3_mem_rx_release(tp);
8643 tg3_mem_tx_release(tp);
8646 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8647 tp->hw_stats, tp->stats_mapping);
8648 tp->hw_stats = NULL;
8653 * Must not be invoked with interrupt sources disabled and
8654 * the hardware shutdown down. Can sleep.
8656 static int tg3_alloc_consistent(struct tg3 *tp)
8660 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8661 sizeof(struct tg3_hw_stats),
8662 &tp->stats_mapping, GFP_KERNEL);
8666 for (i = 0; i < tp->irq_cnt; i++) {
8667 struct tg3_napi *tnapi = &tp->napi[i];
8668 struct tg3_hw_status *sblk;
8670 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8672 &tnapi->status_mapping,
8674 if (!tnapi->hw_status)
8677 sblk = tnapi->hw_status;
8679 if (tg3_flag(tp, ENABLE_RSS)) {
8680 u16 *prodptr = NULL;
8683 * When RSS is enabled, the status block format changes
8684 * slightly. The "rx_jumbo_consumer", "reserved",
8685 * and "rx_mini_consumer" members get mapped to the
8686 * other three rx return ring producer indexes.
8690 prodptr = &sblk->idx[0].rx_producer;
8693 prodptr = &sblk->rx_jumbo_consumer;
8696 prodptr = &sblk->reserved;
8699 prodptr = &sblk->rx_mini_consumer;
8702 tnapi->rx_rcb_prod_idx = prodptr;
8704 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8708 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8714 tg3_free_consistent(tp);
8718 #define MAX_WAIT_CNT 1000
8720 /* To stop a block, clear the enable bit and poll till it
8721 * clears. tp->lock is held.
8723 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8728 if (tg3_flag(tp, 5705_PLUS)) {
8735 /* We can't enable/disable these bits of the
8736 * 5705/5750, just say success.
8749 for (i = 0; i < MAX_WAIT_CNT; i++) {
8750 if (pci_channel_offline(tp->pdev)) {
8751 dev_err(&tp->pdev->dev,
8752 "tg3_stop_block device offline, "
8753 "ofs=%lx enable_bit=%x\n",
8760 if ((val & enable_bit) == 0)
8764 if (i == MAX_WAIT_CNT && !silent) {
8765 dev_err(&tp->pdev->dev,
8766 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8774 /* tp->lock is held. */
8775 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8779 tg3_disable_ints(tp);
8781 if (pci_channel_offline(tp->pdev)) {
8782 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8783 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8788 tp->rx_mode &= ~RX_MODE_ENABLE;
8789 tw32_f(MAC_RX_MODE, tp->rx_mode);
8792 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8793 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8794 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8795 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8796 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8797 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8799 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8800 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8801 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8802 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8803 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8804 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8805 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8807 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8808 tw32_f(MAC_MODE, tp->mac_mode);
8811 tp->tx_mode &= ~TX_MODE_ENABLE;
8812 tw32_f(MAC_TX_MODE, tp->tx_mode);
8814 for (i = 0; i < MAX_WAIT_CNT; i++) {
8816 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8819 if (i >= MAX_WAIT_CNT) {
8820 dev_err(&tp->pdev->dev,
8821 "%s timed out, TX_MODE_ENABLE will not clear "
8822 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8826 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8827 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8828 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8830 tw32(FTQ_RESET, 0xffffffff);
8831 tw32(FTQ_RESET, 0x00000000);
8833 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8834 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8837 for (i = 0; i < tp->irq_cnt; i++) {
8838 struct tg3_napi *tnapi = &tp->napi[i];
8839 if (tnapi->hw_status)
8840 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8846 /* Save PCI command register before chip reset */
8847 static void tg3_save_pci_state(struct tg3 *tp)
8849 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8852 /* Restore PCI state after chip reset */
8853 static void tg3_restore_pci_state(struct tg3 *tp)
8857 /* Re-enable indirect register accesses. */
8858 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8859 tp->misc_host_ctrl);
8861 /* Set MAX PCI retry to zero. */
8862 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8864 tg3_flag(tp, PCIX_MODE))
8865 val |= PCISTATE_RETRY_SAME_DMA;
8866 /* Allow reads and writes to the APE register and memory space. */
8867 if (tg3_flag(tp, ENABLE_APE))
8868 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8869 PCISTATE_ALLOW_APE_SHMEM_WR |
8870 PCISTATE_ALLOW_APE_PSPACE_WR;
8871 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8873 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8875 if (!tg3_flag(tp, PCI_EXPRESS)) {
8876 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8877 tp->pci_cacheline_sz);
8878 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8882 /* Make sure PCI-X relaxed ordering bit is clear. */
8883 if (tg3_flag(tp, PCIX_MODE)) {
8886 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8888 pcix_cmd &= ~PCI_X_CMD_ERO;
8889 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8893 if (tg3_flag(tp, 5780_CLASS)) {
8895 /* Chip reset on 5780 will reset MSI enable bit,
8896 * so need to restore it.
8898 if (tg3_flag(tp, USING_MSI)) {
8901 pci_read_config_word(tp->pdev,
8902 tp->msi_cap + PCI_MSI_FLAGS,
8904 pci_write_config_word(tp->pdev,
8905 tp->msi_cap + PCI_MSI_FLAGS,
8906 ctrl | PCI_MSI_FLAGS_ENABLE);
8907 val = tr32(MSGINT_MODE);
8908 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8913 /* tp->lock is held. */
8914 static int tg3_chip_reset(struct tg3 *tp)
8917 void (*write_op)(struct tg3 *, u32, u32);
8922 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8924 /* No matching tg3_nvram_unlock() after this because
8925 * chip reset below will undo the nvram lock.
8927 tp->nvram_lock_cnt = 0;
8929 /* GRC_MISC_CFG core clock reset will clear the memory
8930 * enable bit in PCI register 4 and the MSI enable bit
8931 * on some chips, so we save relevant registers here.
8933 tg3_save_pci_state(tp);
8935 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8936 tg3_flag(tp, 5755_PLUS))
8937 tw32(GRC_FASTBOOT_PC, 0);
8940 * We must avoid the readl() that normally takes place.
8941 * It locks machines, causes machine checks, and other
8942 * fun things. So, temporarily disable the 5701
8943 * hardware workaround, while we do the reset.
8945 write_op = tp->write32;
8946 if (write_op == tg3_write_flush_reg32)
8947 tp->write32 = tg3_write32;
8949 /* Prevent the irq handler from reading or writing PCI registers
8950 * during chip reset when the memory enable bit in the PCI command
8951 * register may be cleared. The chip does not generate interrupt
8952 * at this time, but the irq handler may still be called due to irq
8953 * sharing or irqpoll.
8955 tg3_flag_set(tp, CHIP_RESETTING);
8956 for (i = 0; i < tp->irq_cnt; i++) {
8957 struct tg3_napi *tnapi = &tp->napi[i];
8958 if (tnapi->hw_status) {
8959 tnapi->hw_status->status = 0;
8960 tnapi->hw_status->status_tag = 0;
8962 tnapi->last_tag = 0;
8963 tnapi->last_irq_tag = 0;
8967 for (i = 0; i < tp->irq_cnt; i++)
8968 synchronize_irq(tp->napi[i].irq_vec);
8970 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8971 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8972 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8976 val = GRC_MISC_CFG_CORECLK_RESET;
8978 if (tg3_flag(tp, PCI_EXPRESS)) {
8979 /* Force PCIe 1.0a mode */
8980 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8981 !tg3_flag(tp, 57765_PLUS) &&
8982 tr32(TG3_PCIE_PHY_TSTCTL) ==
8983 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8984 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8986 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8987 tw32(GRC_MISC_CFG, (1 << 29));
8992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8993 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8994 tw32(GRC_VCPU_EXT_CTRL,
8995 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8998 /* Manage gphy power for all CPMU absent PCIe devices. */
8999 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9000 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9002 tw32(GRC_MISC_CFG, val);
9004 /* restore 5701 hardware bug workaround write method */
9005 tp->write32 = write_op;
9007 /* Unfortunately, we have to delay before the PCI read back.
9008 * Some 575X chips even will not respond to a PCI cfg access
9009 * when the reset command is given to the chip.
9011 * How do these hardware designers expect things to work
9012 * properly if the PCI write is posted for a long period
9013 * of time? It is always necessary to have some method by
9014 * which a register read back can occur to push the write
9015 * out which does the reset.
9017 * For most tg3 variants the trick below was working.
9022 /* Flush PCI posted writes. The normal MMIO registers
9023 * are inaccessible at this time so this is the only
9024 * way to make this reliably (actually, this is no longer
9025 * the case, see above). I tried to use indirect
9026 * register read/write but this upset some 5701 variants.
9028 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9032 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9035 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9039 /* Wait for link training to complete. */
9040 for (j = 0; j < 5000; j++)
9043 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9044 pci_write_config_dword(tp->pdev, 0xc4,
9045 cfg_val | (1 << 15));
9048 /* Clear the "no snoop" and "relaxed ordering" bits. */
9049 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9051 * Older PCIe devices only support the 128 byte
9052 * MPS setting. Enforce the restriction.
9054 if (!tg3_flag(tp, CPMU_PRESENT))
9055 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9056 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9058 /* Clear error status */
9059 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9060 PCI_EXP_DEVSTA_CED |
9061 PCI_EXP_DEVSTA_NFED |
9062 PCI_EXP_DEVSTA_FED |
9063 PCI_EXP_DEVSTA_URD);
9066 tg3_restore_pci_state(tp);
9068 tg3_flag_clear(tp, CHIP_RESETTING);
9069 tg3_flag_clear(tp, ERROR_PROCESSED);
9072 if (tg3_flag(tp, 5780_CLASS))
9073 val = tr32(MEMARB_MODE);
9074 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9076 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9078 tw32(0x5000, 0x400);
9081 if (tg3_flag(tp, IS_SSB_CORE)) {
9083 * BCM4785: In order to avoid repercussions from using
9084 * potentially defective internal ROM, stop the Rx RISC CPU,
9085 * which is not required.
9088 tg3_halt_cpu(tp, RX_CPU_BASE);
9091 err = tg3_poll_fw(tp);
9095 tw32(GRC_MODE, tp->grc_mode);
9097 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9100 tw32(0xc4, val | (1 << 15));
9103 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9104 tg3_asic_rev(tp) == ASIC_REV_5705) {
9105 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9106 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9107 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9108 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9111 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9112 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9114 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9115 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9120 tw32_f(MAC_MODE, val);
9123 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9127 if (tg3_flag(tp, PCI_EXPRESS) &&
9128 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9129 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9130 !tg3_flag(tp, 57765_PLUS)) {
9133 tw32(0x7c00, val | (1 << 25));
9136 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9137 val = tr32(TG3_CPMU_CLCK_ORIDE);
9138 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9141 /* Reprobe ASF enable state. */
9142 tg3_flag_clear(tp, ENABLE_ASF);
9143 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9144 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9146 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9147 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9148 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9151 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9152 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9153 tg3_flag_set(tp, ENABLE_ASF);
9154 tp->last_event_jiffies = jiffies;
9155 if (tg3_flag(tp, 5750_PLUS))
9156 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9158 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9159 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9160 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9161 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9162 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9169 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9170 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9172 /* tp->lock is held. */
9173 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9179 tg3_write_sig_pre_reset(tp, kind);
9181 tg3_abort_hw(tp, silent);
9182 err = tg3_chip_reset(tp);
9184 __tg3_set_mac_addr(tp, false);
9186 tg3_write_sig_legacy(tp, kind);
9187 tg3_write_sig_post_reset(tp, kind);
9190 /* Save the stats across chip resets... */
9191 tg3_get_nstats(tp, &tp->net_stats_prev);
9192 tg3_get_estats(tp, &tp->estats_prev);
9194 /* And make sure the next sample is new data */
9195 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9204 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9206 struct tg3 *tp = netdev_priv(dev);
9207 struct sockaddr *addr = p;
9209 bool skip_mac_1 = false;
9211 if (!is_valid_ether_addr(addr->sa_data))
9212 return -EADDRNOTAVAIL;
9214 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9216 if (!netif_running(dev))
9219 if (tg3_flag(tp, ENABLE_ASF)) {
9220 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9222 addr0_high = tr32(MAC_ADDR_0_HIGH);
9223 addr0_low = tr32(MAC_ADDR_0_LOW);
9224 addr1_high = tr32(MAC_ADDR_1_HIGH);
9225 addr1_low = tr32(MAC_ADDR_1_LOW);
9227 /* Skip MAC addr 1 if ASF is using it. */
9228 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9229 !(addr1_high == 0 && addr1_low == 0))
9232 spin_lock_bh(&tp->lock);
9233 __tg3_set_mac_addr(tp, skip_mac_1);
9234 spin_unlock_bh(&tp->lock);
9239 /* tp->lock is held. */
9240 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9241 dma_addr_t mapping, u32 maxlen_flags,
9245 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9246 ((u64) mapping >> 32));
9248 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9249 ((u64) mapping & 0xffffffff));
9251 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9254 if (!tg3_flag(tp, 5705_PLUS))
9256 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9261 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9265 if (!tg3_flag(tp, ENABLE_TSS)) {
9266 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9267 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9268 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9270 tw32(HOSTCC_TXCOL_TICKS, 0);
9271 tw32(HOSTCC_TXMAX_FRAMES, 0);
9272 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9274 for (; i < tp->txq_cnt; i++) {
9277 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9278 tw32(reg, ec->tx_coalesce_usecs);
9279 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9280 tw32(reg, ec->tx_max_coalesced_frames);
9281 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9282 tw32(reg, ec->tx_max_coalesced_frames_irq);
9286 for (; i < tp->irq_max - 1; i++) {
9287 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9288 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9289 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9293 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9296 u32 limit = tp->rxq_cnt;
9298 if (!tg3_flag(tp, ENABLE_RSS)) {
9299 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9300 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9301 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9304 tw32(HOSTCC_RXCOL_TICKS, 0);
9305 tw32(HOSTCC_RXMAX_FRAMES, 0);
9306 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9309 for (; i < limit; i++) {
9312 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9313 tw32(reg, ec->rx_coalesce_usecs);
9314 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9315 tw32(reg, ec->rx_max_coalesced_frames);
9316 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9317 tw32(reg, ec->rx_max_coalesced_frames_irq);
9320 for (; i < tp->irq_max - 1; i++) {
9321 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9322 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9323 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9327 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9329 tg3_coal_tx_init(tp, ec);
9330 tg3_coal_rx_init(tp, ec);
9332 if (!tg3_flag(tp, 5705_PLUS)) {
9333 u32 val = ec->stats_block_coalesce_usecs;
9335 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9336 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9341 tw32(HOSTCC_STAT_COAL_TICKS, val);
9345 /* tp->lock is held. */
9346 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9350 /* Disable all transmit rings but the first. */
9351 if (!tg3_flag(tp, 5705_PLUS))
9352 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9353 else if (tg3_flag(tp, 5717_PLUS))
9354 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9355 else if (tg3_flag(tp, 57765_CLASS) ||
9356 tg3_asic_rev(tp) == ASIC_REV_5762)
9357 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9359 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9361 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9362 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9363 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9364 BDINFO_FLAGS_DISABLED);
9367 /* tp->lock is held. */
9368 static void tg3_tx_rcbs_init(struct tg3 *tp)
9371 u32 txrcb = NIC_SRAM_SEND_RCB;
9373 if (tg3_flag(tp, ENABLE_TSS))
9376 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9377 struct tg3_napi *tnapi = &tp->napi[i];
9379 if (!tnapi->tx_ring)
9382 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9383 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9384 NIC_SRAM_TX_BUFFER_DESC);
9388 /* tp->lock is held. */
9389 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9393 /* Disable all receive return rings but the first. */
9394 if (tg3_flag(tp, 5717_PLUS))
9395 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9396 else if (!tg3_flag(tp, 5705_PLUS))
9397 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9398 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9399 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9400 tg3_flag(tp, 57765_CLASS))
9401 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9403 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9405 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9406 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9407 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9408 BDINFO_FLAGS_DISABLED);
9411 /* tp->lock is held. */
9412 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9415 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9417 if (tg3_flag(tp, ENABLE_RSS))
9420 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9421 struct tg3_napi *tnapi = &tp->napi[i];
9426 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9427 (tp->rx_ret_ring_mask + 1) <<
9428 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9432 /* tp->lock is held. */
9433 static void tg3_rings_reset(struct tg3 *tp)
9437 struct tg3_napi *tnapi = &tp->napi[0];
9439 tg3_tx_rcbs_disable(tp);
9441 tg3_rx_ret_rcbs_disable(tp);
9443 /* Disable interrupts */
9444 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9445 tp->napi[0].chk_msi_cnt = 0;
9446 tp->napi[0].last_rx_cons = 0;
9447 tp->napi[0].last_tx_cons = 0;
9449 /* Zero mailbox registers. */
9450 if (tg3_flag(tp, SUPPORT_MSIX)) {
9451 for (i = 1; i < tp->irq_max; i++) {
9452 tp->napi[i].tx_prod = 0;
9453 tp->napi[i].tx_cons = 0;
9454 if (tg3_flag(tp, ENABLE_TSS))
9455 tw32_mailbox(tp->napi[i].prodmbox, 0);
9456 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9457 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9458 tp->napi[i].chk_msi_cnt = 0;
9459 tp->napi[i].last_rx_cons = 0;
9460 tp->napi[i].last_tx_cons = 0;
9462 if (!tg3_flag(tp, ENABLE_TSS))
9463 tw32_mailbox(tp->napi[0].prodmbox, 0);
9465 tp->napi[0].tx_prod = 0;
9466 tp->napi[0].tx_cons = 0;
9467 tw32_mailbox(tp->napi[0].prodmbox, 0);
9468 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9471 /* Make sure the NIC-based send BD rings are disabled. */
9472 if (!tg3_flag(tp, 5705_PLUS)) {
9473 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9474 for (i = 0; i < 16; i++)
9475 tw32_tx_mbox(mbox + i * 8, 0);
9478 /* Clear status block in ram. */
9479 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9481 /* Set status block DMA address */
9482 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9483 ((u64) tnapi->status_mapping >> 32));
9484 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9485 ((u64) tnapi->status_mapping & 0xffffffff));
9487 stblk = HOSTCC_STATBLCK_RING1;
9489 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9490 u64 mapping = (u64)tnapi->status_mapping;
9491 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9492 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9495 /* Clear status block in ram. */
9496 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9499 tg3_tx_rcbs_init(tp);
9500 tg3_rx_ret_rcbs_init(tp);
9503 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9505 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9507 if (!tg3_flag(tp, 5750_PLUS) ||
9508 tg3_flag(tp, 5780_CLASS) ||
9509 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9510 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9511 tg3_flag(tp, 57765_PLUS))
9512 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9513 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9514 tg3_asic_rev(tp) == ASIC_REV_5787)
9515 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9517 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9519 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9520 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9522 val = min(nic_rep_thresh, host_rep_thresh);
9523 tw32(RCVBDI_STD_THRESH, val);
9525 if (tg3_flag(tp, 57765_PLUS))
9526 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9528 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9531 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9533 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9535 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9536 tw32(RCVBDI_JUMBO_THRESH, val);
9538 if (tg3_flag(tp, 57765_PLUS))
9539 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9542 static inline u32 calc_crc(unsigned char *buf, int len)
9550 for (j = 0; j < len; j++) {
9553 for (k = 0; k < 8; k++) {
9566 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9568 /* accept or reject all multicast frames */
9569 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9570 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9571 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9572 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9575 static void __tg3_set_rx_mode(struct net_device *dev)
9577 struct tg3 *tp = netdev_priv(dev);
9580 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9581 RX_MODE_KEEP_VLAN_TAG);
9583 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9584 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9587 if (!tg3_flag(tp, ENABLE_ASF))
9588 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9591 if (dev->flags & IFF_PROMISC) {
9592 /* Promiscuous mode. */
9593 rx_mode |= RX_MODE_PROMISC;
9594 } else if (dev->flags & IFF_ALLMULTI) {
9595 /* Accept all multicast. */
9596 tg3_set_multi(tp, 1);
9597 } else if (netdev_mc_empty(dev)) {
9598 /* Reject all multicast. */
9599 tg3_set_multi(tp, 0);
9601 /* Accept one or more multicast(s). */
9602 struct netdev_hw_addr *ha;
9603 u32 mc_filter[4] = { 0, };
9608 netdev_for_each_mc_addr(ha, dev) {
9609 crc = calc_crc(ha->addr, ETH_ALEN);
9611 regidx = (bit & 0x60) >> 5;
9613 mc_filter[regidx] |= (1 << bit);
9616 tw32(MAC_HASH_REG_0, mc_filter[0]);
9617 tw32(MAC_HASH_REG_1, mc_filter[1]);
9618 tw32(MAC_HASH_REG_2, mc_filter[2]);
9619 tw32(MAC_HASH_REG_3, mc_filter[3]);
9622 if (rx_mode != tp->rx_mode) {
9623 tp->rx_mode = rx_mode;
9624 tw32_f(MAC_RX_MODE, rx_mode);
9629 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9633 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9634 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9637 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9641 if (!tg3_flag(tp, SUPPORT_MSIX))
9644 if (tp->rxq_cnt == 1) {
9645 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9649 /* Validate table against current IRQ count */
9650 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9651 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9655 if (i != TG3_RSS_INDIR_TBL_SIZE)
9656 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9659 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9662 u32 reg = MAC_RSS_INDIR_TBL_0;
9664 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9665 u32 val = tp->rss_ind_tbl[i];
9667 for (; i % 8; i++) {
9669 val |= tp->rss_ind_tbl[i];
9676 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9678 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9679 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9681 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9684 /* tp->lock is held. */
9685 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9687 u32 val, rdmac_mode;
9689 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9691 tg3_disable_ints(tp);
9695 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9697 if (tg3_flag(tp, INIT_COMPLETE))
9698 tg3_abort_hw(tp, 1);
9700 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9701 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9702 tg3_phy_pull_config(tp);
9703 tg3_eee_pull_config(tp, NULL);
9704 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9707 /* Enable MAC control of LPI */
9708 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9714 err = tg3_chip_reset(tp);
9718 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9720 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9721 val = tr32(TG3_CPMU_CTRL);
9722 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9723 tw32(TG3_CPMU_CTRL, val);
9725 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9726 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9727 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9728 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9730 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9731 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9732 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9733 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9735 val = tr32(TG3_CPMU_HST_ACC);
9736 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9737 val |= CPMU_HST_ACC_MACCLK_6_25;
9738 tw32(TG3_CPMU_HST_ACC, val);
9741 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9742 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9743 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9744 PCIE_PWR_MGMT_L1_THRESH_4MS;
9745 tw32(PCIE_PWR_MGMT_THRESH, val);
9747 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9748 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9750 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9752 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9753 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9756 if (tg3_flag(tp, L1PLLPD_EN)) {
9757 u32 grc_mode = tr32(GRC_MODE);
9759 /* Access the lower 1K of PL PCIE block registers. */
9760 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9761 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9763 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9764 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9765 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9767 tw32(GRC_MODE, grc_mode);
9770 if (tg3_flag(tp, 57765_CLASS)) {
9771 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9772 u32 grc_mode = tr32(GRC_MODE);
9774 /* Access the lower 1K of PL PCIE block registers. */
9775 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9776 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9778 val = tr32(TG3_PCIE_TLDLPL_PORT +
9779 TG3_PCIE_PL_LO_PHYCTL5);
9780 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9781 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9783 tw32(GRC_MODE, grc_mode);
9786 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9789 /* Fix transmit hangs */
9790 val = tr32(TG3_CPMU_PADRNG_CTL);
9791 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9792 tw32(TG3_CPMU_PADRNG_CTL, val);
9794 grc_mode = tr32(GRC_MODE);
9796 /* Access the lower 1K of DL PCIE block registers. */
9797 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9798 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9800 val = tr32(TG3_PCIE_TLDLPL_PORT +
9801 TG3_PCIE_DL_LO_FTSMAX);
9802 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9803 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9804 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9806 tw32(GRC_MODE, grc_mode);
9809 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9810 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9811 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9812 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9815 /* This works around an issue with Athlon chipsets on
9816 * B3 tigon3 silicon. This bit has no effect on any
9817 * other revision. But do not set this on PCI Express
9818 * chips and don't even touch the clocks if the CPMU is present.
9820 if (!tg3_flag(tp, CPMU_PRESENT)) {
9821 if (!tg3_flag(tp, PCI_EXPRESS))
9822 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9823 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9826 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9827 tg3_flag(tp, PCIX_MODE)) {
9828 val = tr32(TG3PCI_PCISTATE);
9829 val |= PCISTATE_RETRY_SAME_DMA;
9830 tw32(TG3PCI_PCISTATE, val);
9833 if (tg3_flag(tp, ENABLE_APE)) {
9834 /* Allow reads and writes to the
9835 * APE register and memory space.
9837 val = tr32(TG3PCI_PCISTATE);
9838 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9839 PCISTATE_ALLOW_APE_SHMEM_WR |
9840 PCISTATE_ALLOW_APE_PSPACE_WR;
9841 tw32(TG3PCI_PCISTATE, val);
9844 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9845 /* Enable some hw fixes. */
9846 val = tr32(TG3PCI_MSI_DATA);
9847 val |= (1 << 26) | (1 << 28) | (1 << 29);
9848 tw32(TG3PCI_MSI_DATA, val);
9851 /* Descriptor ring init may make accesses to the
9852 * NIC SRAM area to setup the TX descriptors, so we
9853 * can only do this after the hardware has been
9854 * successfully reset.
9856 err = tg3_init_rings(tp);
9860 if (tg3_flag(tp, 57765_PLUS)) {
9861 val = tr32(TG3PCI_DMA_RW_CTRL) &
9862 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9863 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9864 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9865 if (!tg3_flag(tp, 57765_CLASS) &&
9866 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9867 tg3_asic_rev(tp) != ASIC_REV_5762)
9868 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9869 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9870 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9871 tg3_asic_rev(tp) != ASIC_REV_5761) {
9872 /* This value is determined during the probe time DMA
9873 * engine test, tg3_test_dma.
9875 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9878 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9879 GRC_MODE_4X_NIC_SEND_RINGS |
9880 GRC_MODE_NO_TX_PHDR_CSUM |
9881 GRC_MODE_NO_RX_PHDR_CSUM);
9882 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9884 /* Pseudo-header checksum is done by hardware logic and not
9885 * the offload processers, so make the chip do the pseudo-
9886 * header checksums on receive. For transmit it is more
9887 * convenient to do the pseudo-header checksum in software
9888 * as Linux does that on transmit for us in all cases.
9890 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9892 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9894 tw32(TG3_RX_PTP_CTL,
9895 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9897 if (tg3_flag(tp, PTP_CAPABLE))
9898 val |= GRC_MODE_TIME_SYNC_ENABLE;
9900 tw32(GRC_MODE, tp->grc_mode | val);
9902 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9903 val = tr32(GRC_MISC_CFG);
9905 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9906 tw32(GRC_MISC_CFG, val);
9908 /* Initialize MBUF/DESC pool. */
9909 if (tg3_flag(tp, 5750_PLUS)) {
9911 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9912 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9913 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9914 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9916 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9917 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9918 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9919 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9922 fw_len = tp->fw_len;
9923 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9924 tw32(BUFMGR_MB_POOL_ADDR,
9925 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9926 tw32(BUFMGR_MB_POOL_SIZE,
9927 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9930 if (tp->dev->mtu <= ETH_DATA_LEN) {
9931 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9932 tp->bufmgr_config.mbuf_read_dma_low_water);
9933 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9934 tp->bufmgr_config.mbuf_mac_rx_low_water);
9935 tw32(BUFMGR_MB_HIGH_WATER,
9936 tp->bufmgr_config.mbuf_high_water);
9938 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9939 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9940 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9941 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9942 tw32(BUFMGR_MB_HIGH_WATER,
9943 tp->bufmgr_config.mbuf_high_water_jumbo);
9945 tw32(BUFMGR_DMA_LOW_WATER,
9946 tp->bufmgr_config.dma_low_water);
9947 tw32(BUFMGR_DMA_HIGH_WATER,
9948 tp->bufmgr_config.dma_high_water);
9950 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9951 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9952 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9953 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9954 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9955 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9956 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9957 tw32(BUFMGR_MODE, val);
9958 for (i = 0; i < 2000; i++) {
9959 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9964 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9968 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9969 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9971 tg3_setup_rxbd_thresholds(tp);
9973 /* Initialize TG3_BDINFO's at:
9974 * RCVDBDI_STD_BD: standard eth size rx ring
9975 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9976 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9979 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9980 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9981 * ring attribute flags
9982 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9984 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9985 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9987 * The size of each ring is fixed in the firmware, but the location is
9990 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9991 ((u64) tpr->rx_std_mapping >> 32));
9992 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9993 ((u64) tpr->rx_std_mapping & 0xffffffff));
9994 if (!tg3_flag(tp, 5717_PLUS))
9995 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9996 NIC_SRAM_RX_BUFFER_DESC);
9998 /* Disable the mini ring */
9999 if (!tg3_flag(tp, 5705_PLUS))
10000 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10001 BDINFO_FLAGS_DISABLED);
10003 /* Program the jumbo buffer descriptor ring control
10004 * blocks on those devices that have them.
10006 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10007 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10009 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10010 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10011 ((u64) tpr->rx_jmb_mapping >> 32));
10012 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10013 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10014 val = TG3_RX_JMB_RING_SIZE(tp) <<
10015 BDINFO_FLAGS_MAXLEN_SHIFT;
10016 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10017 val | BDINFO_FLAGS_USE_EXT_RECV);
10018 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10019 tg3_flag(tp, 57765_CLASS) ||
10020 tg3_asic_rev(tp) == ASIC_REV_5762)
10021 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10022 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10024 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10025 BDINFO_FLAGS_DISABLED);
10028 if (tg3_flag(tp, 57765_PLUS)) {
10029 val = TG3_RX_STD_RING_SIZE(tp);
10030 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10031 val |= (TG3_RX_STD_DMA_SZ << 2);
10033 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10035 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10037 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10039 tpr->rx_std_prod_idx = tp->rx_pending;
10040 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10042 tpr->rx_jmb_prod_idx =
10043 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10044 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10046 tg3_rings_reset(tp);
10048 /* Initialize MAC address and backoff seed. */
10049 __tg3_set_mac_addr(tp, false);
10051 /* MTU + ethernet header + FCS + optional VLAN tag */
10052 tw32(MAC_RX_MTU_SIZE,
10053 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10055 /* The slot time is changed by tg3_setup_phy if we
10056 * run at gigabit with half duplex.
10058 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10059 (6 << TX_LENGTHS_IPG_SHIFT) |
10060 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10062 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10063 tg3_asic_rev(tp) == ASIC_REV_5762)
10064 val |= tr32(MAC_TX_LENGTHS) &
10065 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10066 TX_LENGTHS_CNT_DWN_VAL_MSK);
10068 tw32(MAC_TX_LENGTHS, val);
10070 /* Receive rules. */
10071 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10072 tw32(RCVLPC_CONFIG, 0x0181);
10074 /* Calculate RDMAC_MODE setting early, we need it to determine
10075 * the RCVLPC_STATE_ENABLE mask.
10077 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10078 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10079 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10080 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10081 RDMAC_MODE_LNGREAD_ENAB);
10083 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10084 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10086 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10087 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10088 tg3_asic_rev(tp) == ASIC_REV_57780)
10089 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10090 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10091 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10093 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10094 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10095 if (tg3_flag(tp, TSO_CAPABLE) &&
10096 tg3_asic_rev(tp) == ASIC_REV_5705) {
10097 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10098 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10099 !tg3_flag(tp, IS_5788)) {
10100 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10104 if (tg3_flag(tp, PCI_EXPRESS))
10105 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10107 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10109 if (tp->dev->mtu <= ETH_DATA_LEN) {
10110 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10111 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10115 if (tg3_flag(tp, HW_TSO_1) ||
10116 tg3_flag(tp, HW_TSO_2) ||
10117 tg3_flag(tp, HW_TSO_3))
10118 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10120 if (tg3_flag(tp, 57765_PLUS) ||
10121 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10122 tg3_asic_rev(tp) == ASIC_REV_57780)
10123 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10125 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10126 tg3_asic_rev(tp) == ASIC_REV_5762)
10127 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10129 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10130 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10131 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10132 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10133 tg3_flag(tp, 57765_PLUS)) {
10136 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10137 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10139 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10141 val = tr32(tgtreg);
10142 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10143 tg3_asic_rev(tp) == ASIC_REV_5762) {
10144 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10145 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10146 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10147 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10148 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10149 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10151 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10154 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10155 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10156 tg3_asic_rev(tp) == ASIC_REV_5762) {
10159 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10160 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10162 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10164 val = tr32(tgtreg);
10166 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10167 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10170 /* Receive/send statistics. */
10171 if (tg3_flag(tp, 5750_PLUS)) {
10172 val = tr32(RCVLPC_STATS_ENABLE);
10173 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10174 tw32(RCVLPC_STATS_ENABLE, val);
10175 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10176 tg3_flag(tp, TSO_CAPABLE)) {
10177 val = tr32(RCVLPC_STATS_ENABLE);
10178 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10179 tw32(RCVLPC_STATS_ENABLE, val);
10181 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10183 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10184 tw32(SNDDATAI_STATSENAB, 0xffffff);
10185 tw32(SNDDATAI_STATSCTRL,
10186 (SNDDATAI_SCTRL_ENABLE |
10187 SNDDATAI_SCTRL_FASTUPD));
10189 /* Setup host coalescing engine. */
10190 tw32(HOSTCC_MODE, 0);
10191 for (i = 0; i < 2000; i++) {
10192 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10197 __tg3_set_coalesce(tp, &tp->coal);
10199 if (!tg3_flag(tp, 5705_PLUS)) {
10200 /* Status/statistics block address. See tg3_timer,
10201 * the tg3_periodic_fetch_stats call there, and
10202 * tg3_get_stats to see how this works for 5705/5750 chips.
10204 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10205 ((u64) tp->stats_mapping >> 32));
10206 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10207 ((u64) tp->stats_mapping & 0xffffffff));
10208 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10210 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10212 /* Clear statistics and status block memory areas */
10213 for (i = NIC_SRAM_STATS_BLK;
10214 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10215 i += sizeof(u32)) {
10216 tg3_write_mem(tp, i, 0);
10221 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10223 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10224 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10225 if (!tg3_flag(tp, 5705_PLUS))
10226 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10228 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10229 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10230 /* reset to prevent losing 1st rx packet intermittently */
10231 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10235 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10236 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10237 MAC_MODE_FHDE_ENABLE;
10238 if (tg3_flag(tp, ENABLE_APE))
10239 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10240 if (!tg3_flag(tp, 5705_PLUS) &&
10241 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10242 tg3_asic_rev(tp) != ASIC_REV_5700)
10243 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10244 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10247 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10248 * If TG3_FLAG_IS_NIC is zero, we should read the
10249 * register to preserve the GPIO settings for LOMs. The GPIOs,
10250 * whether used as inputs or outputs, are set by boot code after
10253 if (!tg3_flag(tp, IS_NIC)) {
10256 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10257 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10258 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10260 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10261 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10262 GRC_LCLCTRL_GPIO_OUTPUT3;
10264 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10265 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10267 tp->grc_local_ctrl &= ~gpio_mask;
10268 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10270 /* GPIO1 must be driven high for eeprom write protect */
10271 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10272 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10273 GRC_LCLCTRL_GPIO_OUTPUT1);
10275 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10278 if (tg3_flag(tp, USING_MSIX)) {
10279 val = tr32(MSGINT_MODE);
10280 val |= MSGINT_MODE_ENABLE;
10281 if (tp->irq_cnt > 1)
10282 val |= MSGINT_MODE_MULTIVEC_EN;
10283 if (!tg3_flag(tp, 1SHOT_MSI))
10284 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10285 tw32(MSGINT_MODE, val);
10288 if (!tg3_flag(tp, 5705_PLUS)) {
10289 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10293 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10294 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10295 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10296 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10297 WDMAC_MODE_LNGREAD_ENAB);
10299 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10300 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10301 if (tg3_flag(tp, TSO_CAPABLE) &&
10302 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10303 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10305 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10306 !tg3_flag(tp, IS_5788)) {
10307 val |= WDMAC_MODE_RX_ACCEL;
10311 /* Enable host coalescing bug fix */
10312 if (tg3_flag(tp, 5755_PLUS))
10313 val |= WDMAC_MODE_STATUS_TAG_FIX;
10315 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10316 val |= WDMAC_MODE_BURST_ALL_DATA;
10318 tw32_f(WDMAC_MODE, val);
10321 if (tg3_flag(tp, PCIX_MODE)) {
10324 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10326 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10327 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10328 pcix_cmd |= PCI_X_CMD_READ_2K;
10329 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10330 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10331 pcix_cmd |= PCI_X_CMD_READ_2K;
10333 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10337 tw32_f(RDMAC_MODE, rdmac_mode);
10340 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10341 tg3_asic_rev(tp) == ASIC_REV_5720) {
10342 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10343 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10346 if (i < TG3_NUM_RDMA_CHANNELS) {
10347 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10348 val |= tg3_lso_rd_dma_workaround_bit(tp);
10349 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10350 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10354 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10355 if (!tg3_flag(tp, 5705_PLUS))
10356 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10358 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10359 tw32(SNDDATAC_MODE,
10360 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10362 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10364 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10365 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10366 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10367 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10368 val |= RCVDBDI_MODE_LRG_RING_SZ;
10369 tw32(RCVDBDI_MODE, val);
10370 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10371 if (tg3_flag(tp, HW_TSO_1) ||
10372 tg3_flag(tp, HW_TSO_2) ||
10373 tg3_flag(tp, HW_TSO_3))
10374 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10375 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10376 if (tg3_flag(tp, ENABLE_TSS))
10377 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10378 tw32(SNDBDI_MODE, val);
10379 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10381 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10382 err = tg3_load_5701_a0_firmware_fix(tp);
10387 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10388 /* Ignore any errors for the firmware download. If download
10389 * fails, the device will operate with EEE disabled
10391 tg3_load_57766_firmware(tp);
10394 if (tg3_flag(tp, TSO_CAPABLE)) {
10395 err = tg3_load_tso_firmware(tp);
10400 tp->tx_mode = TX_MODE_ENABLE;
10402 if (tg3_flag(tp, 5755_PLUS) ||
10403 tg3_asic_rev(tp) == ASIC_REV_5906)
10404 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10406 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10407 tg3_asic_rev(tp) == ASIC_REV_5762) {
10408 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10409 tp->tx_mode &= ~val;
10410 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10413 tw32_f(MAC_TX_MODE, tp->tx_mode);
10416 if (tg3_flag(tp, ENABLE_RSS)) {
10417 tg3_rss_write_indir_tbl(tp);
10419 /* Setup the "secret" hash key. */
10420 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10421 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10422 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10423 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10424 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10425 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10426 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10427 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10428 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10429 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10432 tp->rx_mode = RX_MODE_ENABLE;
10433 if (tg3_flag(tp, 5755_PLUS))
10434 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10436 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10437 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10439 if (tg3_flag(tp, ENABLE_RSS))
10440 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10441 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10442 RX_MODE_RSS_IPV6_HASH_EN |
10443 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10444 RX_MODE_RSS_IPV4_HASH_EN |
10445 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10447 tw32_f(MAC_RX_MODE, tp->rx_mode);
10450 tw32(MAC_LED_CTRL, tp->led_ctrl);
10452 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10453 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10454 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10457 tw32_f(MAC_RX_MODE, tp->rx_mode);
10460 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10461 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10462 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10463 /* Set drive transmission level to 1.2V */
10464 /* only if the signal pre-emphasis bit is not set */
10465 val = tr32(MAC_SERDES_CFG);
10468 tw32(MAC_SERDES_CFG, val);
10470 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10471 tw32(MAC_SERDES_CFG, 0x616000);
10474 /* Prevent chip from dropping frames when flow control
10477 if (tg3_flag(tp, 57765_CLASS))
10481 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10483 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10484 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10485 /* Use hardware link auto-negotiation */
10486 tg3_flag_set(tp, HW_AUTONEG);
10489 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10490 tg3_asic_rev(tp) == ASIC_REV_5714) {
10493 tmp = tr32(SERDES_RX_CTRL);
10494 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10495 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10496 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10497 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10500 if (!tg3_flag(tp, USE_PHYLIB)) {
10501 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10502 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10504 err = tg3_setup_phy(tp, false);
10508 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10509 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10512 /* Clear CRC stats. */
10513 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10514 tg3_writephy(tp, MII_TG3_TEST1,
10515 tmp | MII_TG3_TEST1_CRC_EN);
10516 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10521 __tg3_set_rx_mode(tp->dev);
10523 /* Initialize receive rules. */
10524 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10525 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10526 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10527 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10529 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10533 if (tg3_flag(tp, ENABLE_ASF))
10537 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10539 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10541 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10543 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10545 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10547 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10549 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10551 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10553 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10555 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10557 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10559 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10561 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10563 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10571 if (tg3_flag(tp, ENABLE_APE))
10572 /* Write our heartbeat update interval to APE. */
10573 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10574 APE_HOST_HEARTBEAT_INT_DISABLE);
10576 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10581 /* Called at device open time to get the chip ready for
10582 * packet processing. Invoked with tp->lock held.
10584 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10586 /* Chip may have been just powered on. If so, the boot code may still
10587 * be running initialization. Wait for it to finish to avoid races in
10588 * accessing the hardware.
10590 tg3_enable_register_access(tp);
10593 tg3_switch_clocks(tp);
10595 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10597 return tg3_reset_hw(tp, reset_phy);
10600 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10604 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10605 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10607 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10610 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10611 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10612 memset(ocir, 0, TG3_OCIR_LEN);
10616 /* sysfs attributes for hwmon */
10617 static ssize_t tg3_show_temp(struct device *dev,
10618 struct device_attribute *devattr, char *buf)
10620 struct pci_dev *pdev = to_pci_dev(dev);
10621 struct net_device *netdev = pci_get_drvdata(pdev);
10622 struct tg3 *tp = netdev_priv(netdev);
10623 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10626 spin_lock_bh(&tp->lock);
10627 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10628 sizeof(temperature));
10629 spin_unlock_bh(&tp->lock);
10630 return sprintf(buf, "%u\n", temperature);
10634 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10635 TG3_TEMP_SENSOR_OFFSET);
10636 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10637 TG3_TEMP_CAUTION_OFFSET);
10638 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10639 TG3_TEMP_MAX_OFFSET);
10641 static struct attribute *tg3_attributes[] = {
10642 &sensor_dev_attr_temp1_input.dev_attr.attr,
10643 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10644 &sensor_dev_attr_temp1_max.dev_attr.attr,
10648 static const struct attribute_group tg3_group = {
10649 .attrs = tg3_attributes,
10652 static void tg3_hwmon_close(struct tg3 *tp)
10654 if (tp->hwmon_dev) {
10655 hwmon_device_unregister(tp->hwmon_dev);
10656 tp->hwmon_dev = NULL;
10657 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10661 static void tg3_hwmon_open(struct tg3 *tp)
10665 struct pci_dev *pdev = tp->pdev;
10666 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10668 tg3_sd_scan_scratchpad(tp, ocirs);
10670 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10671 if (!ocirs[i].src_data_length)
10674 size += ocirs[i].src_hdr_length;
10675 size += ocirs[i].src_data_length;
10681 /* Register hwmon sysfs hooks */
10682 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10684 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10688 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10689 if (IS_ERR(tp->hwmon_dev)) {
10690 tp->hwmon_dev = NULL;
10691 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10692 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10697 #define TG3_STAT_ADD32(PSTAT, REG) \
10698 do { u32 __val = tr32(REG); \
10699 (PSTAT)->low += __val; \
10700 if ((PSTAT)->low < __val) \
10701 (PSTAT)->high += 1; \
10704 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10706 struct tg3_hw_stats *sp = tp->hw_stats;
10711 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10712 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10713 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10714 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10715 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10716 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10717 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10718 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10719 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10720 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10721 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10722 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10723 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10724 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10725 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10726 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10729 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10730 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10731 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10732 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10735 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10736 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10737 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10738 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10739 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10740 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10741 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10742 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10743 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10744 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10745 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10746 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10747 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10748 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10750 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10751 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10752 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10753 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10754 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10756 u32 val = tr32(HOSTCC_FLOW_ATTN);
10757 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10759 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10760 sp->rx_discards.low += val;
10761 if (sp->rx_discards.low < val)
10762 sp->rx_discards.high += 1;
10764 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10766 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10769 static void tg3_chk_missed_msi(struct tg3 *tp)
10773 for (i = 0; i < tp->irq_cnt; i++) {
10774 struct tg3_napi *tnapi = &tp->napi[i];
10776 if (tg3_has_work(tnapi)) {
10777 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10778 tnapi->last_tx_cons == tnapi->tx_cons) {
10779 if (tnapi->chk_msi_cnt < 1) {
10780 tnapi->chk_msi_cnt++;
10786 tnapi->chk_msi_cnt = 0;
10787 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10788 tnapi->last_tx_cons = tnapi->tx_cons;
10792 static void tg3_timer(unsigned long __opaque)
10794 struct tg3 *tp = (struct tg3 *) __opaque;
10796 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10797 goto restart_timer;
10799 spin_lock(&tp->lock);
10801 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10802 tg3_flag(tp, 57765_CLASS))
10803 tg3_chk_missed_msi(tp);
10805 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10806 /* BCM4785: Flush posted writes from GbE to host memory. */
10810 if (!tg3_flag(tp, TAGGED_STATUS)) {
10811 /* All of this garbage is because when using non-tagged
10812 * IRQ status the mailbox/status_block protocol the chip
10813 * uses with the cpu is race prone.
10815 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10816 tw32(GRC_LOCAL_CTRL,
10817 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10819 tw32(HOSTCC_MODE, tp->coalesce_mode |
10820 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10823 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10824 spin_unlock(&tp->lock);
10825 tg3_reset_task_schedule(tp);
10826 goto restart_timer;
10830 /* This part only runs once per second. */
10831 if (!--tp->timer_counter) {
10832 if (tg3_flag(tp, 5705_PLUS))
10833 tg3_periodic_fetch_stats(tp);
10835 if (tp->setlpicnt && !--tp->setlpicnt)
10836 tg3_phy_eee_enable(tp);
10838 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10842 mac_stat = tr32(MAC_STATUS);
10845 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10846 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10848 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10852 tg3_setup_phy(tp, false);
10853 } else if (tg3_flag(tp, POLL_SERDES)) {
10854 u32 mac_stat = tr32(MAC_STATUS);
10855 int need_setup = 0;
10858 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10861 if (!tp->link_up &&
10862 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10863 MAC_STATUS_SIGNAL_DET))) {
10867 if (!tp->serdes_counter) {
10870 ~MAC_MODE_PORT_MODE_MASK));
10872 tw32_f(MAC_MODE, tp->mac_mode);
10875 tg3_setup_phy(tp, false);
10877 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10878 tg3_flag(tp, 5780_CLASS)) {
10879 tg3_serdes_parallel_detect(tp);
10882 tp->timer_counter = tp->timer_multiplier;
10885 /* Heartbeat is only sent once every 2 seconds.
10887 * The heartbeat is to tell the ASF firmware that the host
10888 * driver is still alive. In the event that the OS crashes,
10889 * ASF needs to reset the hardware to free up the FIFO space
10890 * that may be filled with rx packets destined for the host.
10891 * If the FIFO is full, ASF will no longer function properly.
10893 * Unintended resets have been reported on real time kernels
10894 * where the timer doesn't run on time. Netpoll will also have
10897 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10898 * to check the ring condition when the heartbeat is expiring
10899 * before doing the reset. This will prevent most unintended
10902 if (!--tp->asf_counter) {
10903 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10904 tg3_wait_for_event_ack(tp);
10906 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10907 FWCMD_NICDRV_ALIVE3);
10908 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10909 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10910 TG3_FW_UPDATE_TIMEOUT_SEC);
10912 tg3_generate_fw_event(tp);
10914 tp->asf_counter = tp->asf_multiplier;
10917 spin_unlock(&tp->lock);
10920 tp->timer.expires = jiffies + tp->timer_offset;
10921 add_timer(&tp->timer);
10924 static void tg3_timer_init(struct tg3 *tp)
10926 if (tg3_flag(tp, TAGGED_STATUS) &&
10927 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10928 !tg3_flag(tp, 57765_CLASS))
10929 tp->timer_offset = HZ;
10931 tp->timer_offset = HZ / 10;
10933 BUG_ON(tp->timer_offset > HZ);
10935 tp->timer_multiplier = (HZ / tp->timer_offset);
10936 tp->asf_multiplier = (HZ / tp->timer_offset) *
10937 TG3_FW_UPDATE_FREQ_SEC;
10939 init_timer(&tp->timer);
10940 tp->timer.data = (unsigned long) tp;
10941 tp->timer.function = tg3_timer;
10944 static void tg3_timer_start(struct tg3 *tp)
10946 tp->asf_counter = tp->asf_multiplier;
10947 tp->timer_counter = tp->timer_multiplier;
10949 tp->timer.expires = jiffies + tp->timer_offset;
10950 add_timer(&tp->timer);
10953 static void tg3_timer_stop(struct tg3 *tp)
10955 del_timer_sync(&tp->timer);
10958 /* Restart hardware after configuration changes, self-test, etc.
10959 * Invoked with tp->lock held.
10961 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10962 __releases(tp->lock)
10963 __acquires(tp->lock)
10967 err = tg3_init_hw(tp, reset_phy);
10969 netdev_err(tp->dev,
10970 "Failed to re-initialize device, aborting\n");
10971 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10972 tg3_full_unlock(tp);
10973 tg3_timer_stop(tp);
10975 tg3_napi_enable(tp);
10976 dev_close(tp->dev);
10977 tg3_full_lock(tp, 0);
10982 static void tg3_reset_task(struct work_struct *work)
10984 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10987 tg3_full_lock(tp, 0);
10989 if (!netif_running(tp->dev)) {
10990 tg3_flag_clear(tp, RESET_TASK_PENDING);
10991 tg3_full_unlock(tp);
10995 tg3_full_unlock(tp);
10999 tg3_netif_stop(tp);
11001 tg3_full_lock(tp, 1);
11003 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11004 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11005 tp->write32_rx_mbox = tg3_write_flush_reg32;
11006 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11007 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11010 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11011 err = tg3_init_hw(tp, true);
11015 tg3_netif_start(tp);
11018 tg3_full_unlock(tp);
11023 tg3_flag_clear(tp, RESET_TASK_PENDING);
11026 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11029 unsigned long flags;
11031 struct tg3_napi *tnapi = &tp->napi[irq_num];
11033 if (tp->irq_cnt == 1)
11034 name = tp->dev->name;
11036 name = &tnapi->irq_lbl[0];
11037 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
11038 name[IFNAMSIZ-1] = 0;
11041 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11043 if (tg3_flag(tp, 1SHOT_MSI))
11044 fn = tg3_msi_1shot;
11047 fn = tg3_interrupt;
11048 if (tg3_flag(tp, TAGGED_STATUS))
11049 fn = tg3_interrupt_tagged;
11050 flags = IRQF_SHARED;
11053 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11056 static int tg3_test_interrupt(struct tg3 *tp)
11058 struct tg3_napi *tnapi = &tp->napi[0];
11059 struct net_device *dev = tp->dev;
11060 int err, i, intr_ok = 0;
11063 if (!netif_running(dev))
11066 tg3_disable_ints(tp);
11068 free_irq(tnapi->irq_vec, tnapi);
11071 * Turn off MSI one shot mode. Otherwise this test has no
11072 * observable way to know whether the interrupt was delivered.
11074 if (tg3_flag(tp, 57765_PLUS)) {
11075 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11076 tw32(MSGINT_MODE, val);
11079 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11080 IRQF_SHARED, dev->name, tnapi);
11084 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11085 tg3_enable_ints(tp);
11087 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11090 for (i = 0; i < 5; i++) {
11091 u32 int_mbox, misc_host_ctrl;
11093 int_mbox = tr32_mailbox(tnapi->int_mbox);
11094 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11096 if ((int_mbox != 0) ||
11097 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11102 if (tg3_flag(tp, 57765_PLUS) &&
11103 tnapi->hw_status->status_tag != tnapi->last_tag)
11104 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11109 tg3_disable_ints(tp);
11111 free_irq(tnapi->irq_vec, tnapi);
11113 err = tg3_request_irq(tp, 0);
11119 /* Reenable MSI one shot mode. */
11120 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11121 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11122 tw32(MSGINT_MODE, val);
11130 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11131 * successfully restored
11133 static int tg3_test_msi(struct tg3 *tp)
11138 if (!tg3_flag(tp, USING_MSI))
11141 /* Turn off SERR reporting in case MSI terminates with Master
11144 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11145 pci_write_config_word(tp->pdev, PCI_COMMAND,
11146 pci_cmd & ~PCI_COMMAND_SERR);
11148 err = tg3_test_interrupt(tp);
11150 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11155 /* other failures */
11159 /* MSI test failed, go back to INTx mode */
11160 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11161 "to INTx mode. Please report this failure to the PCI "
11162 "maintainer and include system chipset information\n");
11164 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11166 pci_disable_msi(tp->pdev);
11168 tg3_flag_clear(tp, USING_MSI);
11169 tp->napi[0].irq_vec = tp->pdev->irq;
11171 err = tg3_request_irq(tp, 0);
11175 /* Need to reset the chip because the MSI cycle may have terminated
11176 * with Master Abort.
11178 tg3_full_lock(tp, 1);
11180 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11181 err = tg3_init_hw(tp, true);
11183 tg3_full_unlock(tp);
11186 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11191 static int tg3_request_firmware(struct tg3 *tp)
11193 const struct tg3_firmware_hdr *fw_hdr;
11195 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11196 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11201 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11203 /* Firmware blob starts with version numbers, followed by
11204 * start address and _full_ length including BSS sections
11205 * (which must be longer than the actual data, of course
11208 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11209 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11210 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11211 tp->fw_len, tp->fw_needed);
11212 release_firmware(tp->fw);
11217 /* We no longer need firmware; we have it. */
11218 tp->fw_needed = NULL;
11222 static u32 tg3_irq_count(struct tg3 *tp)
11224 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11227 /* We want as many rx rings enabled as there are cpus.
11228 * In multiqueue MSI-X mode, the first MSI-X vector
11229 * only deals with link interrupts, etc, so we add
11230 * one to the number of vectors we are requesting.
11232 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11238 static bool tg3_enable_msix(struct tg3 *tp)
11241 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11243 tp->txq_cnt = tp->txq_req;
11244 tp->rxq_cnt = tp->rxq_req;
11246 tp->rxq_cnt = netif_get_num_default_rss_queues();
11247 if (tp->rxq_cnt > tp->rxq_max)
11248 tp->rxq_cnt = tp->rxq_max;
11250 /* Disable multiple TX rings by default. Simple round-robin hardware
11251 * scheduling of the TX rings can cause starvation of rings with
11252 * small packets when other rings have TSO or jumbo packets.
11257 tp->irq_cnt = tg3_irq_count(tp);
11259 for (i = 0; i < tp->irq_max; i++) {
11260 msix_ent[i].entry = i;
11261 msix_ent[i].vector = 0;
11264 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11267 } else if (rc != 0) {
11268 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11270 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11273 tp->rxq_cnt = max(rc - 1, 1);
11275 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11278 for (i = 0; i < tp->irq_max; i++)
11279 tp->napi[i].irq_vec = msix_ent[i].vector;
11281 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11282 pci_disable_msix(tp->pdev);
11286 if (tp->irq_cnt == 1)
11289 tg3_flag_set(tp, ENABLE_RSS);
11291 if (tp->txq_cnt > 1)
11292 tg3_flag_set(tp, ENABLE_TSS);
11294 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11299 static void tg3_ints_init(struct tg3 *tp)
11301 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11302 !tg3_flag(tp, TAGGED_STATUS)) {
11303 /* All MSI supporting chips should support tagged
11304 * status. Assert that this is the case.
11306 netdev_warn(tp->dev,
11307 "MSI without TAGGED_STATUS? Not using MSI\n");
11311 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11312 tg3_flag_set(tp, USING_MSIX);
11313 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11314 tg3_flag_set(tp, USING_MSI);
11316 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11317 u32 msi_mode = tr32(MSGINT_MODE);
11318 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11319 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11320 if (!tg3_flag(tp, 1SHOT_MSI))
11321 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11322 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11325 if (!tg3_flag(tp, USING_MSIX)) {
11327 tp->napi[0].irq_vec = tp->pdev->irq;
11330 if (tp->irq_cnt == 1) {
11333 netif_set_real_num_tx_queues(tp->dev, 1);
11334 netif_set_real_num_rx_queues(tp->dev, 1);
11338 static void tg3_ints_fini(struct tg3 *tp)
11340 if (tg3_flag(tp, USING_MSIX))
11341 pci_disable_msix(tp->pdev);
11342 else if (tg3_flag(tp, USING_MSI))
11343 pci_disable_msi(tp->pdev);
11344 tg3_flag_clear(tp, USING_MSI);
11345 tg3_flag_clear(tp, USING_MSIX);
11346 tg3_flag_clear(tp, ENABLE_RSS);
11347 tg3_flag_clear(tp, ENABLE_TSS);
11350 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11353 struct net_device *dev = tp->dev;
11357 * Setup interrupts first so we know how
11358 * many NAPI resources to allocate
11362 tg3_rss_check_indir_tbl(tp);
11364 /* The placement of this call is tied
11365 * to the setup and use of Host TX descriptors.
11367 err = tg3_alloc_consistent(tp);
11369 goto out_ints_fini;
11373 tg3_napi_enable(tp);
11375 for (i = 0; i < tp->irq_cnt; i++) {
11376 struct tg3_napi *tnapi = &tp->napi[i];
11377 err = tg3_request_irq(tp, i);
11379 for (i--; i >= 0; i--) {
11380 tnapi = &tp->napi[i];
11381 free_irq(tnapi->irq_vec, tnapi);
11383 goto out_napi_fini;
11387 tg3_full_lock(tp, 0);
11390 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11392 err = tg3_init_hw(tp, reset_phy);
11394 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11395 tg3_free_rings(tp);
11398 tg3_full_unlock(tp);
11403 if (test_irq && tg3_flag(tp, USING_MSI)) {
11404 err = tg3_test_msi(tp);
11407 tg3_full_lock(tp, 0);
11408 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11409 tg3_free_rings(tp);
11410 tg3_full_unlock(tp);
11412 goto out_napi_fini;
11415 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11416 u32 val = tr32(PCIE_TRANSACTION_CFG);
11418 tw32(PCIE_TRANSACTION_CFG,
11419 val | PCIE_TRANS_CFG_1SHOT_MSI);
11425 tg3_hwmon_open(tp);
11427 tg3_full_lock(tp, 0);
11429 tg3_timer_start(tp);
11430 tg3_flag_set(tp, INIT_COMPLETE);
11431 tg3_enable_ints(tp);
11436 tg3_ptp_resume(tp);
11439 tg3_full_unlock(tp);
11441 netif_tx_start_all_queues(dev);
11444 * Reset loopback feature if it was turned on while the device was down
11445 * make sure that it's installed properly now.
11447 if (dev->features & NETIF_F_LOOPBACK)
11448 tg3_set_loopback(dev, dev->features);
11453 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11454 struct tg3_napi *tnapi = &tp->napi[i];
11455 free_irq(tnapi->irq_vec, tnapi);
11459 tg3_napi_disable(tp);
11461 tg3_free_consistent(tp);
11469 static void tg3_stop(struct tg3 *tp)
11473 tg3_reset_task_cancel(tp);
11474 tg3_netif_stop(tp);
11476 tg3_timer_stop(tp);
11478 tg3_hwmon_close(tp);
11482 tg3_full_lock(tp, 1);
11484 tg3_disable_ints(tp);
11486 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11487 tg3_free_rings(tp);
11488 tg3_flag_clear(tp, INIT_COMPLETE);
11490 tg3_full_unlock(tp);
11492 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11493 struct tg3_napi *tnapi = &tp->napi[i];
11494 free_irq(tnapi->irq_vec, tnapi);
11501 tg3_free_consistent(tp);
11504 static int tg3_open(struct net_device *dev)
11506 struct tg3 *tp = netdev_priv(dev);
11509 if (tp->fw_needed) {
11510 err = tg3_request_firmware(tp);
11511 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11513 netdev_warn(tp->dev, "EEE capability disabled\n");
11514 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11515 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11516 netdev_warn(tp->dev, "EEE capability restored\n");
11517 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11519 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11523 netdev_warn(tp->dev, "TSO capability disabled\n");
11524 tg3_flag_clear(tp, TSO_CAPABLE);
11525 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11526 netdev_notice(tp->dev, "TSO capability restored\n");
11527 tg3_flag_set(tp, TSO_CAPABLE);
11531 tg3_carrier_off(tp);
11533 err = tg3_power_up(tp);
11537 tg3_full_lock(tp, 0);
11539 tg3_disable_ints(tp);
11540 tg3_flag_clear(tp, INIT_COMPLETE);
11542 tg3_full_unlock(tp);
11544 err = tg3_start(tp,
11545 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11548 tg3_frob_aux_power(tp, false);
11549 pci_set_power_state(tp->pdev, PCI_D3hot);
11552 if (tg3_flag(tp, PTP_CAPABLE)) {
11553 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11555 if (IS_ERR(tp->ptp_clock))
11556 tp->ptp_clock = NULL;
11562 static int tg3_close(struct net_device *dev)
11564 struct tg3 *tp = netdev_priv(dev);
11570 /* Clear stats across close / open calls */
11571 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11572 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11574 tg3_power_down_prepare(tp);
11576 tg3_carrier_off(tp);
11581 static inline u64 get_stat64(tg3_stat64_t *val)
11583 return ((u64)val->high << 32) | ((u64)val->low);
11586 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11588 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11590 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11591 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11592 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11595 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11596 tg3_writephy(tp, MII_TG3_TEST1,
11597 val | MII_TG3_TEST1_CRC_EN);
11598 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11602 tp->phy_crc_errors += val;
11604 return tp->phy_crc_errors;
11607 return get_stat64(&hw_stats->rx_fcs_errors);
11610 #define ESTAT_ADD(member) \
11611 estats->member = old_estats->member + \
11612 get_stat64(&hw_stats->member)
11614 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11616 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11617 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11619 ESTAT_ADD(rx_octets);
11620 ESTAT_ADD(rx_fragments);
11621 ESTAT_ADD(rx_ucast_packets);
11622 ESTAT_ADD(rx_mcast_packets);
11623 ESTAT_ADD(rx_bcast_packets);
11624 ESTAT_ADD(rx_fcs_errors);
11625 ESTAT_ADD(rx_align_errors);
11626 ESTAT_ADD(rx_xon_pause_rcvd);
11627 ESTAT_ADD(rx_xoff_pause_rcvd);
11628 ESTAT_ADD(rx_mac_ctrl_rcvd);
11629 ESTAT_ADD(rx_xoff_entered);
11630 ESTAT_ADD(rx_frame_too_long_errors);
11631 ESTAT_ADD(rx_jabbers);
11632 ESTAT_ADD(rx_undersize_packets);
11633 ESTAT_ADD(rx_in_length_errors);
11634 ESTAT_ADD(rx_out_length_errors);
11635 ESTAT_ADD(rx_64_or_less_octet_packets);
11636 ESTAT_ADD(rx_65_to_127_octet_packets);
11637 ESTAT_ADD(rx_128_to_255_octet_packets);
11638 ESTAT_ADD(rx_256_to_511_octet_packets);
11639 ESTAT_ADD(rx_512_to_1023_octet_packets);
11640 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11641 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11642 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11643 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11644 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11646 ESTAT_ADD(tx_octets);
11647 ESTAT_ADD(tx_collisions);
11648 ESTAT_ADD(tx_xon_sent);
11649 ESTAT_ADD(tx_xoff_sent);
11650 ESTAT_ADD(tx_flow_control);
11651 ESTAT_ADD(tx_mac_errors);
11652 ESTAT_ADD(tx_single_collisions);
11653 ESTAT_ADD(tx_mult_collisions);
11654 ESTAT_ADD(tx_deferred);
11655 ESTAT_ADD(tx_excessive_collisions);
11656 ESTAT_ADD(tx_late_collisions);
11657 ESTAT_ADD(tx_collide_2times);
11658 ESTAT_ADD(tx_collide_3times);
11659 ESTAT_ADD(tx_collide_4times);
11660 ESTAT_ADD(tx_collide_5times);
11661 ESTAT_ADD(tx_collide_6times);
11662 ESTAT_ADD(tx_collide_7times);
11663 ESTAT_ADD(tx_collide_8times);
11664 ESTAT_ADD(tx_collide_9times);
11665 ESTAT_ADD(tx_collide_10times);
11666 ESTAT_ADD(tx_collide_11times);
11667 ESTAT_ADD(tx_collide_12times);
11668 ESTAT_ADD(tx_collide_13times);
11669 ESTAT_ADD(tx_collide_14times);
11670 ESTAT_ADD(tx_collide_15times);
11671 ESTAT_ADD(tx_ucast_packets);
11672 ESTAT_ADD(tx_mcast_packets);
11673 ESTAT_ADD(tx_bcast_packets);
11674 ESTAT_ADD(tx_carrier_sense_errors);
11675 ESTAT_ADD(tx_discards);
11676 ESTAT_ADD(tx_errors);
11678 ESTAT_ADD(dma_writeq_full);
11679 ESTAT_ADD(dma_write_prioq_full);
11680 ESTAT_ADD(rxbds_empty);
11681 ESTAT_ADD(rx_discards);
11682 ESTAT_ADD(rx_errors);
11683 ESTAT_ADD(rx_threshold_hit);
11685 ESTAT_ADD(dma_readq_full);
11686 ESTAT_ADD(dma_read_prioq_full);
11687 ESTAT_ADD(tx_comp_queue_full);
11689 ESTAT_ADD(ring_set_send_prod_index);
11690 ESTAT_ADD(ring_status_update);
11691 ESTAT_ADD(nic_irqs);
11692 ESTAT_ADD(nic_avoided_irqs);
11693 ESTAT_ADD(nic_tx_threshold_hit);
11695 ESTAT_ADD(mbuf_lwm_thresh_hit);
11698 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11700 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11701 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11703 stats->rx_packets = old_stats->rx_packets +
11704 get_stat64(&hw_stats->rx_ucast_packets) +
11705 get_stat64(&hw_stats->rx_mcast_packets) +
11706 get_stat64(&hw_stats->rx_bcast_packets);
11708 stats->tx_packets = old_stats->tx_packets +
11709 get_stat64(&hw_stats->tx_ucast_packets) +
11710 get_stat64(&hw_stats->tx_mcast_packets) +
11711 get_stat64(&hw_stats->tx_bcast_packets);
11713 stats->rx_bytes = old_stats->rx_bytes +
11714 get_stat64(&hw_stats->rx_octets);
11715 stats->tx_bytes = old_stats->tx_bytes +
11716 get_stat64(&hw_stats->tx_octets);
11718 stats->rx_errors = old_stats->rx_errors +
11719 get_stat64(&hw_stats->rx_errors);
11720 stats->tx_errors = old_stats->tx_errors +
11721 get_stat64(&hw_stats->tx_errors) +
11722 get_stat64(&hw_stats->tx_mac_errors) +
11723 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11724 get_stat64(&hw_stats->tx_discards);
11726 stats->multicast = old_stats->multicast +
11727 get_stat64(&hw_stats->rx_mcast_packets);
11728 stats->collisions = old_stats->collisions +
11729 get_stat64(&hw_stats->tx_collisions);
11731 stats->rx_length_errors = old_stats->rx_length_errors +
11732 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11733 get_stat64(&hw_stats->rx_undersize_packets);
11735 stats->rx_over_errors = old_stats->rx_over_errors +
11736 get_stat64(&hw_stats->rxbds_empty);
11737 stats->rx_frame_errors = old_stats->rx_frame_errors +
11738 get_stat64(&hw_stats->rx_align_errors);
11739 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11740 get_stat64(&hw_stats->tx_discards);
11741 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11742 get_stat64(&hw_stats->tx_carrier_sense_errors);
11744 stats->rx_crc_errors = old_stats->rx_crc_errors +
11745 tg3_calc_crc_errors(tp);
11747 stats->rx_missed_errors = old_stats->rx_missed_errors +
11748 get_stat64(&hw_stats->rx_discards);
11750 stats->rx_dropped = tp->rx_dropped;
11751 stats->tx_dropped = tp->tx_dropped;
11754 static int tg3_get_regs_len(struct net_device *dev)
11756 return TG3_REG_BLK_SIZE;
11759 static void tg3_get_regs(struct net_device *dev,
11760 struct ethtool_regs *regs, void *_p)
11762 struct tg3 *tp = netdev_priv(dev);
11766 memset(_p, 0, TG3_REG_BLK_SIZE);
11768 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11771 tg3_full_lock(tp, 0);
11773 tg3_dump_legacy_regs(tp, (u32 *)_p);
11775 tg3_full_unlock(tp);
11778 static int tg3_get_eeprom_len(struct net_device *dev)
11780 struct tg3 *tp = netdev_priv(dev);
11782 return tp->nvram_size;
11785 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11787 struct tg3 *tp = netdev_priv(dev);
11790 u32 i, offset, len, b_offset, b_count;
11793 if (tg3_flag(tp, NO_NVRAM))
11796 offset = eeprom->offset;
11800 eeprom->magic = TG3_EEPROM_MAGIC;
11803 /* adjustments to start on required 4 byte boundary */
11804 b_offset = offset & 3;
11805 b_count = 4 - b_offset;
11806 if (b_count > len) {
11807 /* i.e. offset=1 len=2 */
11810 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11813 memcpy(data, ((char *)&val) + b_offset, b_count);
11816 eeprom->len += b_count;
11819 /* read bytes up to the last 4 byte boundary */
11820 pd = &data[eeprom->len];
11821 for (i = 0; i < (len - (len & 3)); i += 4) {
11822 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11827 memcpy(pd + i, &val, 4);
11832 /* read last bytes not ending on 4 byte boundary */
11833 pd = &data[eeprom->len];
11835 b_offset = offset + len - b_count;
11836 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11839 memcpy(pd, &val, b_count);
11840 eeprom->len += b_count;
11845 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11847 struct tg3 *tp = netdev_priv(dev);
11849 u32 offset, len, b_offset, odd_len;
11853 if (tg3_flag(tp, NO_NVRAM) ||
11854 eeprom->magic != TG3_EEPROM_MAGIC)
11857 offset = eeprom->offset;
11860 if ((b_offset = (offset & 3))) {
11861 /* adjustments to start on required 4 byte boundary */
11862 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11873 /* adjustments to end on required 4 byte boundary */
11875 len = (len + 3) & ~3;
11876 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11882 if (b_offset || odd_len) {
11883 buf = kmalloc(len, GFP_KERNEL);
11887 memcpy(buf, &start, 4);
11889 memcpy(buf+len-4, &end, 4);
11890 memcpy(buf + b_offset, data, eeprom->len);
11893 ret = tg3_nvram_write_block(tp, offset, len, buf);
11901 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11903 struct tg3 *tp = netdev_priv(dev);
11905 if (tg3_flag(tp, USE_PHYLIB)) {
11906 struct phy_device *phydev;
11907 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11909 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11910 return phy_ethtool_gset(phydev, cmd);
11913 cmd->supported = (SUPPORTED_Autoneg);
11915 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11916 cmd->supported |= (SUPPORTED_1000baseT_Half |
11917 SUPPORTED_1000baseT_Full);
11919 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11920 cmd->supported |= (SUPPORTED_100baseT_Half |
11921 SUPPORTED_100baseT_Full |
11922 SUPPORTED_10baseT_Half |
11923 SUPPORTED_10baseT_Full |
11925 cmd->port = PORT_TP;
11927 cmd->supported |= SUPPORTED_FIBRE;
11928 cmd->port = PORT_FIBRE;
11931 cmd->advertising = tp->link_config.advertising;
11932 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11933 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11934 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11935 cmd->advertising |= ADVERTISED_Pause;
11937 cmd->advertising |= ADVERTISED_Pause |
11938 ADVERTISED_Asym_Pause;
11940 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11941 cmd->advertising |= ADVERTISED_Asym_Pause;
11944 if (netif_running(dev) && tp->link_up) {
11945 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11946 cmd->duplex = tp->link_config.active_duplex;
11947 cmd->lp_advertising = tp->link_config.rmt_adv;
11948 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11949 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11950 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11952 cmd->eth_tp_mdix = ETH_TP_MDI;
11955 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11956 cmd->duplex = DUPLEX_UNKNOWN;
11957 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11959 cmd->phy_address = tp->phy_addr;
11960 cmd->transceiver = XCVR_INTERNAL;
11961 cmd->autoneg = tp->link_config.autoneg;
11967 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11969 struct tg3 *tp = netdev_priv(dev);
11970 u32 speed = ethtool_cmd_speed(cmd);
11972 if (tg3_flag(tp, USE_PHYLIB)) {
11973 struct phy_device *phydev;
11974 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11976 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11977 return phy_ethtool_sset(phydev, cmd);
11980 if (cmd->autoneg != AUTONEG_ENABLE &&
11981 cmd->autoneg != AUTONEG_DISABLE)
11984 if (cmd->autoneg == AUTONEG_DISABLE &&
11985 cmd->duplex != DUPLEX_FULL &&
11986 cmd->duplex != DUPLEX_HALF)
11989 if (cmd->autoneg == AUTONEG_ENABLE) {
11990 u32 mask = ADVERTISED_Autoneg |
11992 ADVERTISED_Asym_Pause;
11994 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11995 mask |= ADVERTISED_1000baseT_Half |
11996 ADVERTISED_1000baseT_Full;
11998 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11999 mask |= ADVERTISED_100baseT_Half |
12000 ADVERTISED_100baseT_Full |
12001 ADVERTISED_10baseT_Half |
12002 ADVERTISED_10baseT_Full |
12005 mask |= ADVERTISED_FIBRE;
12007 if (cmd->advertising & ~mask)
12010 mask &= (ADVERTISED_1000baseT_Half |
12011 ADVERTISED_1000baseT_Full |
12012 ADVERTISED_100baseT_Half |
12013 ADVERTISED_100baseT_Full |
12014 ADVERTISED_10baseT_Half |
12015 ADVERTISED_10baseT_Full);
12017 cmd->advertising &= mask;
12019 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12020 if (speed != SPEED_1000)
12023 if (cmd->duplex != DUPLEX_FULL)
12026 if (speed != SPEED_100 &&
12032 tg3_full_lock(tp, 0);
12034 tp->link_config.autoneg = cmd->autoneg;
12035 if (cmd->autoneg == AUTONEG_ENABLE) {
12036 tp->link_config.advertising = (cmd->advertising |
12037 ADVERTISED_Autoneg);
12038 tp->link_config.speed = SPEED_UNKNOWN;
12039 tp->link_config.duplex = DUPLEX_UNKNOWN;
12041 tp->link_config.advertising = 0;
12042 tp->link_config.speed = speed;
12043 tp->link_config.duplex = cmd->duplex;
12046 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12048 tg3_warn_mgmt_link_flap(tp);
12050 if (netif_running(dev))
12051 tg3_setup_phy(tp, true);
12053 tg3_full_unlock(tp);
12058 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12060 struct tg3 *tp = netdev_priv(dev);
12062 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12063 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12064 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12065 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12068 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12070 struct tg3 *tp = netdev_priv(dev);
12072 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12073 wol->supported = WAKE_MAGIC;
12075 wol->supported = 0;
12077 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12078 wol->wolopts = WAKE_MAGIC;
12079 memset(&wol->sopass, 0, sizeof(wol->sopass));
12082 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12084 struct tg3 *tp = netdev_priv(dev);
12085 struct device *dp = &tp->pdev->dev;
12087 if (wol->wolopts & ~WAKE_MAGIC)
12089 if ((wol->wolopts & WAKE_MAGIC) &&
12090 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12093 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12095 spin_lock_bh(&tp->lock);
12096 if (device_may_wakeup(dp))
12097 tg3_flag_set(tp, WOL_ENABLE);
12099 tg3_flag_clear(tp, WOL_ENABLE);
12100 spin_unlock_bh(&tp->lock);
12105 static u32 tg3_get_msglevel(struct net_device *dev)
12107 struct tg3 *tp = netdev_priv(dev);
12108 return tp->msg_enable;
12111 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12113 struct tg3 *tp = netdev_priv(dev);
12114 tp->msg_enable = value;
12117 static int tg3_nway_reset(struct net_device *dev)
12119 struct tg3 *tp = netdev_priv(dev);
12122 if (!netif_running(dev))
12125 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12128 tg3_warn_mgmt_link_flap(tp);
12130 if (tg3_flag(tp, USE_PHYLIB)) {
12131 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12133 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12137 spin_lock_bh(&tp->lock);
12139 tg3_readphy(tp, MII_BMCR, &bmcr);
12140 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12141 ((bmcr & BMCR_ANENABLE) ||
12142 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12143 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12147 spin_unlock_bh(&tp->lock);
12153 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12155 struct tg3 *tp = netdev_priv(dev);
12157 ering->rx_max_pending = tp->rx_std_ring_mask;
12158 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12159 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12161 ering->rx_jumbo_max_pending = 0;
12163 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12165 ering->rx_pending = tp->rx_pending;
12166 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12167 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12169 ering->rx_jumbo_pending = 0;
12171 ering->tx_pending = tp->napi[0].tx_pending;
12174 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12176 struct tg3 *tp = netdev_priv(dev);
12177 int i, irq_sync = 0, err = 0;
12179 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12180 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12181 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12182 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12183 (tg3_flag(tp, TSO_BUG) &&
12184 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12187 if (netif_running(dev)) {
12189 tg3_netif_stop(tp);
12193 tg3_full_lock(tp, irq_sync);
12195 tp->rx_pending = ering->rx_pending;
12197 if (tg3_flag(tp, MAX_RXPEND_64) &&
12198 tp->rx_pending > 63)
12199 tp->rx_pending = 63;
12200 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12202 for (i = 0; i < tp->irq_max; i++)
12203 tp->napi[i].tx_pending = ering->tx_pending;
12205 if (netif_running(dev)) {
12206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12207 err = tg3_restart_hw(tp, false);
12209 tg3_netif_start(tp);
12212 tg3_full_unlock(tp);
12214 if (irq_sync && !err)
12220 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12222 struct tg3 *tp = netdev_priv(dev);
12224 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12226 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12227 epause->rx_pause = 1;
12229 epause->rx_pause = 0;
12231 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12232 epause->tx_pause = 1;
12234 epause->tx_pause = 0;
12237 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12239 struct tg3 *tp = netdev_priv(dev);
12242 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12243 tg3_warn_mgmt_link_flap(tp);
12245 if (tg3_flag(tp, USE_PHYLIB)) {
12247 struct phy_device *phydev;
12249 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12251 if (!(phydev->supported & SUPPORTED_Pause) ||
12252 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12253 (epause->rx_pause != epause->tx_pause)))
12256 tp->link_config.flowctrl = 0;
12257 if (epause->rx_pause) {
12258 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12260 if (epause->tx_pause) {
12261 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12262 newadv = ADVERTISED_Pause;
12264 newadv = ADVERTISED_Pause |
12265 ADVERTISED_Asym_Pause;
12266 } else if (epause->tx_pause) {
12267 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12268 newadv = ADVERTISED_Asym_Pause;
12272 if (epause->autoneg)
12273 tg3_flag_set(tp, PAUSE_AUTONEG);
12275 tg3_flag_clear(tp, PAUSE_AUTONEG);
12277 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12278 u32 oldadv = phydev->advertising &
12279 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12280 if (oldadv != newadv) {
12281 phydev->advertising &=
12282 ~(ADVERTISED_Pause |
12283 ADVERTISED_Asym_Pause);
12284 phydev->advertising |= newadv;
12285 if (phydev->autoneg) {
12287 * Always renegotiate the link to
12288 * inform our link partner of our
12289 * flow control settings, even if the
12290 * flow control is forced. Let
12291 * tg3_adjust_link() do the final
12292 * flow control setup.
12294 return phy_start_aneg(phydev);
12298 if (!epause->autoneg)
12299 tg3_setup_flow_control(tp, 0, 0);
12301 tp->link_config.advertising &=
12302 ~(ADVERTISED_Pause |
12303 ADVERTISED_Asym_Pause);
12304 tp->link_config.advertising |= newadv;
12309 if (netif_running(dev)) {
12310 tg3_netif_stop(tp);
12314 tg3_full_lock(tp, irq_sync);
12316 if (epause->autoneg)
12317 tg3_flag_set(tp, PAUSE_AUTONEG);
12319 tg3_flag_clear(tp, PAUSE_AUTONEG);
12320 if (epause->rx_pause)
12321 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12323 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12324 if (epause->tx_pause)
12325 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12327 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12329 if (netif_running(dev)) {
12330 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12331 err = tg3_restart_hw(tp, false);
12333 tg3_netif_start(tp);
12336 tg3_full_unlock(tp);
12339 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12344 static int tg3_get_sset_count(struct net_device *dev, int sset)
12348 return TG3_NUM_TEST;
12350 return TG3_NUM_STATS;
12352 return -EOPNOTSUPP;
12356 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12357 u32 *rules __always_unused)
12359 struct tg3 *tp = netdev_priv(dev);
12361 if (!tg3_flag(tp, SUPPORT_MSIX))
12362 return -EOPNOTSUPP;
12364 switch (info->cmd) {
12365 case ETHTOOL_GRXRINGS:
12366 if (netif_running(tp->dev))
12367 info->data = tp->rxq_cnt;
12369 info->data = num_online_cpus();
12370 if (info->data > TG3_RSS_MAX_NUM_QS)
12371 info->data = TG3_RSS_MAX_NUM_QS;
12374 /* The first interrupt vector only
12375 * handles link interrupts.
12381 return -EOPNOTSUPP;
12385 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12388 struct tg3 *tp = netdev_priv(dev);
12390 if (tg3_flag(tp, SUPPORT_MSIX))
12391 size = TG3_RSS_INDIR_TBL_SIZE;
12396 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12398 struct tg3 *tp = netdev_priv(dev);
12401 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12402 indir[i] = tp->rss_ind_tbl[i];
12407 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12409 struct tg3 *tp = netdev_priv(dev);
12412 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12413 tp->rss_ind_tbl[i] = indir[i];
12415 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12418 /* It is legal to write the indirection
12419 * table while the device is running.
12421 tg3_full_lock(tp, 0);
12422 tg3_rss_write_indir_tbl(tp);
12423 tg3_full_unlock(tp);
12428 static void tg3_get_channels(struct net_device *dev,
12429 struct ethtool_channels *channel)
12431 struct tg3 *tp = netdev_priv(dev);
12432 u32 deflt_qs = netif_get_num_default_rss_queues();
12434 channel->max_rx = tp->rxq_max;
12435 channel->max_tx = tp->txq_max;
12437 if (netif_running(dev)) {
12438 channel->rx_count = tp->rxq_cnt;
12439 channel->tx_count = tp->txq_cnt;
12442 channel->rx_count = tp->rxq_req;
12444 channel->rx_count = min(deflt_qs, tp->rxq_max);
12447 channel->tx_count = tp->txq_req;
12449 channel->tx_count = min(deflt_qs, tp->txq_max);
12453 static int tg3_set_channels(struct net_device *dev,
12454 struct ethtool_channels *channel)
12456 struct tg3 *tp = netdev_priv(dev);
12458 if (!tg3_flag(tp, SUPPORT_MSIX))
12459 return -EOPNOTSUPP;
12461 if (channel->rx_count > tp->rxq_max ||
12462 channel->tx_count > tp->txq_max)
12465 tp->rxq_req = channel->rx_count;
12466 tp->txq_req = channel->tx_count;
12468 if (!netif_running(dev))
12473 tg3_carrier_off(tp);
12475 tg3_start(tp, true, false, false);
12480 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12482 switch (stringset) {
12484 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12487 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12490 WARN_ON(1); /* we need a WARN() */
12495 static int tg3_set_phys_id(struct net_device *dev,
12496 enum ethtool_phys_id_state state)
12498 struct tg3 *tp = netdev_priv(dev);
12500 if (!netif_running(tp->dev))
12504 case ETHTOOL_ID_ACTIVE:
12505 return 1; /* cycle on/off once per second */
12507 case ETHTOOL_ID_ON:
12508 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12509 LED_CTRL_1000MBPS_ON |
12510 LED_CTRL_100MBPS_ON |
12511 LED_CTRL_10MBPS_ON |
12512 LED_CTRL_TRAFFIC_OVERRIDE |
12513 LED_CTRL_TRAFFIC_BLINK |
12514 LED_CTRL_TRAFFIC_LED);
12517 case ETHTOOL_ID_OFF:
12518 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12519 LED_CTRL_TRAFFIC_OVERRIDE);
12522 case ETHTOOL_ID_INACTIVE:
12523 tw32(MAC_LED_CTRL, tp->led_ctrl);
12530 static void tg3_get_ethtool_stats(struct net_device *dev,
12531 struct ethtool_stats *estats, u64 *tmp_stats)
12533 struct tg3 *tp = netdev_priv(dev);
12536 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12538 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12541 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12545 u32 offset = 0, len = 0;
12548 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12551 if (magic == TG3_EEPROM_MAGIC) {
12552 for (offset = TG3_NVM_DIR_START;
12553 offset < TG3_NVM_DIR_END;
12554 offset += TG3_NVM_DIRENT_SIZE) {
12555 if (tg3_nvram_read(tp, offset, &val))
12558 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12559 TG3_NVM_DIRTYPE_EXTVPD)
12563 if (offset != TG3_NVM_DIR_END) {
12564 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12565 if (tg3_nvram_read(tp, offset + 4, &offset))
12568 offset = tg3_nvram_logical_addr(tp, offset);
12572 if (!offset || !len) {
12573 offset = TG3_NVM_VPD_OFF;
12574 len = TG3_NVM_VPD_LEN;
12577 buf = kmalloc(len, GFP_KERNEL);
12581 if (magic == TG3_EEPROM_MAGIC) {
12582 for (i = 0; i < len; i += 4) {
12583 /* The data is in little-endian format in NVRAM.
12584 * Use the big-endian read routines to preserve
12585 * the byte order as it exists in NVRAM.
12587 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12593 unsigned int pos = 0;
12595 ptr = (u8 *)&buf[0];
12596 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12597 cnt = pci_read_vpd(tp->pdev, pos,
12599 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12617 #define NVRAM_TEST_SIZE 0x100
12618 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12619 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12620 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12621 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12622 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12623 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12624 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12625 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12627 static int tg3_test_nvram(struct tg3 *tp)
12629 u32 csum, magic, len;
12631 int i, j, k, err = 0, size;
12633 if (tg3_flag(tp, NO_NVRAM))
12636 if (tg3_nvram_read(tp, 0, &magic) != 0)
12639 if (magic == TG3_EEPROM_MAGIC)
12640 size = NVRAM_TEST_SIZE;
12641 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12642 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12643 TG3_EEPROM_SB_FORMAT_1) {
12644 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12645 case TG3_EEPROM_SB_REVISION_0:
12646 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12648 case TG3_EEPROM_SB_REVISION_2:
12649 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12651 case TG3_EEPROM_SB_REVISION_3:
12652 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12654 case TG3_EEPROM_SB_REVISION_4:
12655 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12657 case TG3_EEPROM_SB_REVISION_5:
12658 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12660 case TG3_EEPROM_SB_REVISION_6:
12661 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12668 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12669 size = NVRAM_SELFBOOT_HW_SIZE;
12673 buf = kmalloc(size, GFP_KERNEL);
12678 for (i = 0, j = 0; i < size; i += 4, j++) {
12679 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12686 /* Selfboot format */
12687 magic = be32_to_cpu(buf[0]);
12688 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12689 TG3_EEPROM_MAGIC_FW) {
12690 u8 *buf8 = (u8 *) buf, csum8 = 0;
12692 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12693 TG3_EEPROM_SB_REVISION_2) {
12694 /* For rev 2, the csum doesn't include the MBA. */
12695 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12697 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12700 for (i = 0; i < size; i++)
12713 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12714 TG3_EEPROM_MAGIC_HW) {
12715 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12716 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12717 u8 *buf8 = (u8 *) buf;
12719 /* Separate the parity bits and the data bytes. */
12720 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12721 if ((i == 0) || (i == 8)) {
12725 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12726 parity[k++] = buf8[i] & msk;
12728 } else if (i == 16) {
12732 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12733 parity[k++] = buf8[i] & msk;
12736 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12737 parity[k++] = buf8[i] & msk;
12740 data[j++] = buf8[i];
12744 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12745 u8 hw8 = hweight8(data[i]);
12747 if ((hw8 & 0x1) && parity[i])
12749 else if (!(hw8 & 0x1) && !parity[i])
12758 /* Bootstrap checksum at offset 0x10 */
12759 csum = calc_crc((unsigned char *) buf, 0x10);
12760 if (csum != le32_to_cpu(buf[0x10/4]))
12763 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12764 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12765 if (csum != le32_to_cpu(buf[0xfc/4]))
12770 buf = tg3_vpd_readblock(tp, &len);
12774 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12776 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12780 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12783 i += PCI_VPD_LRDT_TAG_SIZE;
12784 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12785 PCI_VPD_RO_KEYWORD_CHKSUM);
12789 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12791 for (i = 0; i <= j; i++)
12792 csum8 += ((u8 *)buf)[i];
12806 #define TG3_SERDES_TIMEOUT_SEC 2
12807 #define TG3_COPPER_TIMEOUT_SEC 6
12809 static int tg3_test_link(struct tg3 *tp)
12813 if (!netif_running(tp->dev))
12816 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12817 max = TG3_SERDES_TIMEOUT_SEC;
12819 max = TG3_COPPER_TIMEOUT_SEC;
12821 for (i = 0; i < max; i++) {
12825 if (msleep_interruptible(1000))
12832 /* Only test the commonly used registers */
12833 static int tg3_test_registers(struct tg3 *tp)
12835 int i, is_5705, is_5750;
12836 u32 offset, read_mask, write_mask, val, save_val, read_val;
12840 #define TG3_FL_5705 0x1
12841 #define TG3_FL_NOT_5705 0x2
12842 #define TG3_FL_NOT_5788 0x4
12843 #define TG3_FL_NOT_5750 0x8
12847 /* MAC Control Registers */
12848 { MAC_MODE, TG3_FL_NOT_5705,
12849 0x00000000, 0x00ef6f8c },
12850 { MAC_MODE, TG3_FL_5705,
12851 0x00000000, 0x01ef6b8c },
12852 { MAC_STATUS, TG3_FL_NOT_5705,
12853 0x03800107, 0x00000000 },
12854 { MAC_STATUS, TG3_FL_5705,
12855 0x03800100, 0x00000000 },
12856 { MAC_ADDR_0_HIGH, 0x0000,
12857 0x00000000, 0x0000ffff },
12858 { MAC_ADDR_0_LOW, 0x0000,
12859 0x00000000, 0xffffffff },
12860 { MAC_RX_MTU_SIZE, 0x0000,
12861 0x00000000, 0x0000ffff },
12862 { MAC_TX_MODE, 0x0000,
12863 0x00000000, 0x00000070 },
12864 { MAC_TX_LENGTHS, 0x0000,
12865 0x00000000, 0x00003fff },
12866 { MAC_RX_MODE, TG3_FL_NOT_5705,
12867 0x00000000, 0x000007fc },
12868 { MAC_RX_MODE, TG3_FL_5705,
12869 0x00000000, 0x000007dc },
12870 { MAC_HASH_REG_0, 0x0000,
12871 0x00000000, 0xffffffff },
12872 { MAC_HASH_REG_1, 0x0000,
12873 0x00000000, 0xffffffff },
12874 { MAC_HASH_REG_2, 0x0000,
12875 0x00000000, 0xffffffff },
12876 { MAC_HASH_REG_3, 0x0000,
12877 0x00000000, 0xffffffff },
12879 /* Receive Data and Receive BD Initiator Control Registers. */
12880 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12881 0x00000000, 0xffffffff },
12882 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12883 0x00000000, 0xffffffff },
12884 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12885 0x00000000, 0x00000003 },
12886 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12887 0x00000000, 0xffffffff },
12888 { RCVDBDI_STD_BD+0, 0x0000,
12889 0x00000000, 0xffffffff },
12890 { RCVDBDI_STD_BD+4, 0x0000,
12891 0x00000000, 0xffffffff },
12892 { RCVDBDI_STD_BD+8, 0x0000,
12893 0x00000000, 0xffff0002 },
12894 { RCVDBDI_STD_BD+0xc, 0x0000,
12895 0x00000000, 0xffffffff },
12897 /* Receive BD Initiator Control Registers. */
12898 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12899 0x00000000, 0xffffffff },
12900 { RCVBDI_STD_THRESH, TG3_FL_5705,
12901 0x00000000, 0x000003ff },
12902 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12903 0x00000000, 0xffffffff },
12905 /* Host Coalescing Control Registers. */
12906 { HOSTCC_MODE, TG3_FL_NOT_5705,
12907 0x00000000, 0x00000004 },
12908 { HOSTCC_MODE, TG3_FL_5705,
12909 0x00000000, 0x000000f6 },
12910 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12911 0x00000000, 0xffffffff },
12912 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12913 0x00000000, 0x000003ff },
12914 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12915 0x00000000, 0xffffffff },
12916 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12917 0x00000000, 0x000003ff },
12918 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12919 0x00000000, 0xffffffff },
12920 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12921 0x00000000, 0x000000ff },
12922 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12923 0x00000000, 0xffffffff },
12924 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12925 0x00000000, 0x000000ff },
12926 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12927 0x00000000, 0xffffffff },
12928 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12929 0x00000000, 0xffffffff },
12930 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12931 0x00000000, 0xffffffff },
12932 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12933 0x00000000, 0x000000ff },
12934 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12935 0x00000000, 0xffffffff },
12936 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12937 0x00000000, 0x000000ff },
12938 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12939 0x00000000, 0xffffffff },
12940 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12941 0x00000000, 0xffffffff },
12942 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12943 0x00000000, 0xffffffff },
12944 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12945 0x00000000, 0xffffffff },
12946 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12947 0x00000000, 0xffffffff },
12948 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12949 0xffffffff, 0x00000000 },
12950 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12951 0xffffffff, 0x00000000 },
12953 /* Buffer Manager Control Registers. */
12954 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12955 0x00000000, 0x007fff80 },
12956 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12957 0x00000000, 0x007fffff },
12958 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12959 0x00000000, 0x0000003f },
12960 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12961 0x00000000, 0x000001ff },
12962 { BUFMGR_MB_HIGH_WATER, 0x0000,
12963 0x00000000, 0x000001ff },
12964 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12965 0xffffffff, 0x00000000 },
12966 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12967 0xffffffff, 0x00000000 },
12969 /* Mailbox Registers */
12970 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12971 0x00000000, 0x000001ff },
12972 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12973 0x00000000, 0x000001ff },
12974 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12975 0x00000000, 0x000007ff },
12976 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12977 0x00000000, 0x000001ff },
12979 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12982 is_5705 = is_5750 = 0;
12983 if (tg3_flag(tp, 5705_PLUS)) {
12985 if (tg3_flag(tp, 5750_PLUS))
12989 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12990 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12993 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12996 if (tg3_flag(tp, IS_5788) &&
12997 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13000 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13003 offset = (u32) reg_tbl[i].offset;
13004 read_mask = reg_tbl[i].read_mask;
13005 write_mask = reg_tbl[i].write_mask;
13007 /* Save the original register content */
13008 save_val = tr32(offset);
13010 /* Determine the read-only value. */
13011 read_val = save_val & read_mask;
13013 /* Write zero to the register, then make sure the read-only bits
13014 * are not changed and the read/write bits are all zeros.
13018 val = tr32(offset);
13020 /* Test the read-only and read/write bits. */
13021 if (((val & read_mask) != read_val) || (val & write_mask))
13024 /* Write ones to all the bits defined by RdMask and WrMask, then
13025 * make sure the read-only bits are not changed and the
13026 * read/write bits are all ones.
13028 tw32(offset, read_mask | write_mask);
13030 val = tr32(offset);
13032 /* Test the read-only bits. */
13033 if ((val & read_mask) != read_val)
13036 /* Test the read/write bits. */
13037 if ((val & write_mask) != write_mask)
13040 tw32(offset, save_val);
13046 if (netif_msg_hw(tp))
13047 netdev_err(tp->dev,
13048 "Register test failed at offset %x\n", offset);
13049 tw32(offset, save_val);
13053 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13055 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13059 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13060 for (j = 0; j < len; j += 4) {
13063 tg3_write_mem(tp, offset + j, test_pattern[i]);
13064 tg3_read_mem(tp, offset + j, &val);
13065 if (val != test_pattern[i])
13072 static int tg3_test_memory(struct tg3 *tp)
13074 static struct mem_entry {
13077 } mem_tbl_570x[] = {
13078 { 0x00000000, 0x00b50},
13079 { 0x00002000, 0x1c000},
13080 { 0xffffffff, 0x00000}
13081 }, mem_tbl_5705[] = {
13082 { 0x00000100, 0x0000c},
13083 { 0x00000200, 0x00008},
13084 { 0x00004000, 0x00800},
13085 { 0x00006000, 0x01000},
13086 { 0x00008000, 0x02000},
13087 { 0x00010000, 0x0e000},
13088 { 0xffffffff, 0x00000}
13089 }, mem_tbl_5755[] = {
13090 { 0x00000200, 0x00008},
13091 { 0x00004000, 0x00800},
13092 { 0x00006000, 0x00800},
13093 { 0x00008000, 0x02000},
13094 { 0x00010000, 0x0c000},
13095 { 0xffffffff, 0x00000}
13096 }, mem_tbl_5906[] = {
13097 { 0x00000200, 0x00008},
13098 { 0x00004000, 0x00400},
13099 { 0x00006000, 0x00400},
13100 { 0x00008000, 0x01000},
13101 { 0x00010000, 0x01000},
13102 { 0xffffffff, 0x00000}
13103 }, mem_tbl_5717[] = {
13104 { 0x00000200, 0x00008},
13105 { 0x00010000, 0x0a000},
13106 { 0x00020000, 0x13c00},
13107 { 0xffffffff, 0x00000}
13108 }, mem_tbl_57765[] = {
13109 { 0x00000200, 0x00008},
13110 { 0x00004000, 0x00800},
13111 { 0x00006000, 0x09800},
13112 { 0x00010000, 0x0a000},
13113 { 0xffffffff, 0x00000}
13115 struct mem_entry *mem_tbl;
13119 if (tg3_flag(tp, 5717_PLUS))
13120 mem_tbl = mem_tbl_5717;
13121 else if (tg3_flag(tp, 57765_CLASS) ||
13122 tg3_asic_rev(tp) == ASIC_REV_5762)
13123 mem_tbl = mem_tbl_57765;
13124 else if (tg3_flag(tp, 5755_PLUS))
13125 mem_tbl = mem_tbl_5755;
13126 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13127 mem_tbl = mem_tbl_5906;
13128 else if (tg3_flag(tp, 5705_PLUS))
13129 mem_tbl = mem_tbl_5705;
13131 mem_tbl = mem_tbl_570x;
13133 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13134 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13142 #define TG3_TSO_MSS 500
13144 #define TG3_TSO_IP_HDR_LEN 20
13145 #define TG3_TSO_TCP_HDR_LEN 20
13146 #define TG3_TSO_TCP_OPT_LEN 12
13148 static const u8 tg3_tso_header[] = {
13150 0x45, 0x00, 0x00, 0x00,
13151 0x00, 0x00, 0x40, 0x00,
13152 0x40, 0x06, 0x00, 0x00,
13153 0x0a, 0x00, 0x00, 0x01,
13154 0x0a, 0x00, 0x00, 0x02,
13155 0x0d, 0x00, 0xe0, 0x00,
13156 0x00, 0x00, 0x01, 0x00,
13157 0x00, 0x00, 0x02, 0x00,
13158 0x80, 0x10, 0x10, 0x00,
13159 0x14, 0x09, 0x00, 0x00,
13160 0x01, 0x01, 0x08, 0x0a,
13161 0x11, 0x11, 0x11, 0x11,
13162 0x11, 0x11, 0x11, 0x11,
13165 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13167 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13168 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13170 struct sk_buff *skb;
13171 u8 *tx_data, *rx_data;
13173 int num_pkts, tx_len, rx_len, i, err;
13174 struct tg3_rx_buffer_desc *desc;
13175 struct tg3_napi *tnapi, *rnapi;
13176 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13178 tnapi = &tp->napi[0];
13179 rnapi = &tp->napi[0];
13180 if (tp->irq_cnt > 1) {
13181 if (tg3_flag(tp, ENABLE_RSS))
13182 rnapi = &tp->napi[1];
13183 if (tg3_flag(tp, ENABLE_TSS))
13184 tnapi = &tp->napi[1];
13186 coal_now = tnapi->coal_now | rnapi->coal_now;
13191 skb = netdev_alloc_skb(tp->dev, tx_len);
13195 tx_data = skb_put(skb, tx_len);
13196 memcpy(tx_data, tp->dev->dev_addr, 6);
13197 memset(tx_data + 6, 0x0, 8);
13199 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13201 if (tso_loopback) {
13202 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13204 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13205 TG3_TSO_TCP_OPT_LEN;
13207 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13208 sizeof(tg3_tso_header));
13211 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13212 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13214 /* Set the total length field in the IP header */
13215 iph->tot_len = htons((u16)(mss + hdr_len));
13217 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13218 TXD_FLAG_CPU_POST_DMA);
13220 if (tg3_flag(tp, HW_TSO_1) ||
13221 tg3_flag(tp, HW_TSO_2) ||
13222 tg3_flag(tp, HW_TSO_3)) {
13224 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13225 th = (struct tcphdr *)&tx_data[val];
13228 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13230 if (tg3_flag(tp, HW_TSO_3)) {
13231 mss |= (hdr_len & 0xc) << 12;
13232 if (hdr_len & 0x10)
13233 base_flags |= 0x00000010;
13234 base_flags |= (hdr_len & 0x3e0) << 5;
13235 } else if (tg3_flag(tp, HW_TSO_2))
13236 mss |= hdr_len << 9;
13237 else if (tg3_flag(tp, HW_TSO_1) ||
13238 tg3_asic_rev(tp) == ASIC_REV_5705) {
13239 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13241 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13244 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13247 data_off = ETH_HLEN;
13249 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13250 tx_len > VLAN_ETH_FRAME_LEN)
13251 base_flags |= TXD_FLAG_JMB_PKT;
13254 for (i = data_off; i < tx_len; i++)
13255 tx_data[i] = (u8) (i & 0xff);
13257 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13258 if (pci_dma_mapping_error(tp->pdev, map)) {
13259 dev_kfree_skb(skb);
13263 val = tnapi->tx_prod;
13264 tnapi->tx_buffers[val].skb = skb;
13265 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13267 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13272 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13274 budget = tg3_tx_avail(tnapi);
13275 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13276 base_flags | TXD_FLAG_END, mss, 0)) {
13277 tnapi->tx_buffers[val].skb = NULL;
13278 dev_kfree_skb(skb);
13284 /* Sync BD data before updating mailbox */
13287 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13288 tr32_mailbox(tnapi->prodmbox);
13292 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13293 for (i = 0; i < 35; i++) {
13294 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13299 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13300 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13301 if ((tx_idx == tnapi->tx_prod) &&
13302 (rx_idx == (rx_start_idx + num_pkts)))
13306 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13307 dev_kfree_skb(skb);
13309 if (tx_idx != tnapi->tx_prod)
13312 if (rx_idx != rx_start_idx + num_pkts)
13316 while (rx_idx != rx_start_idx) {
13317 desc = &rnapi->rx_rcb[rx_start_idx++];
13318 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13319 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13321 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13322 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13325 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13328 if (!tso_loopback) {
13329 if (rx_len != tx_len)
13332 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13333 if (opaque_key != RXD_OPAQUE_RING_STD)
13336 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13339 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13340 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13341 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13345 if (opaque_key == RXD_OPAQUE_RING_STD) {
13346 rx_data = tpr->rx_std_buffers[desc_idx].data;
13347 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13349 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13350 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13351 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13356 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13357 PCI_DMA_FROMDEVICE);
13359 rx_data += TG3_RX_OFFSET(tp);
13360 for (i = data_off; i < rx_len; i++, val++) {
13361 if (*(rx_data + i) != (u8) (val & 0xff))
13368 /* tg3_free_rings will unmap and free the rx_data */
13373 #define TG3_STD_LOOPBACK_FAILED 1
13374 #define TG3_JMB_LOOPBACK_FAILED 2
13375 #define TG3_TSO_LOOPBACK_FAILED 4
13376 #define TG3_LOOPBACK_FAILED \
13377 (TG3_STD_LOOPBACK_FAILED | \
13378 TG3_JMB_LOOPBACK_FAILED | \
13379 TG3_TSO_LOOPBACK_FAILED)
13381 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13385 u32 jmb_pkt_sz = 9000;
13388 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13390 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13391 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13393 if (!netif_running(tp->dev)) {
13394 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13395 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13397 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13401 err = tg3_reset_hw(tp, true);
13403 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13404 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13406 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13410 if (tg3_flag(tp, ENABLE_RSS)) {
13413 /* Reroute all rx packets to the 1st queue */
13414 for (i = MAC_RSS_INDIR_TBL_0;
13415 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13419 /* HW errata - mac loopback fails in some cases on 5780.
13420 * Normal traffic and PHY loopback are not affected by
13421 * errata. Also, the MAC loopback test is deprecated for
13422 * all newer ASIC revisions.
13424 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13425 !tg3_flag(tp, CPMU_PRESENT)) {
13426 tg3_mac_loopback(tp, true);
13428 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13429 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13431 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13432 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13433 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13435 tg3_mac_loopback(tp, false);
13438 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13439 !tg3_flag(tp, USE_PHYLIB)) {
13442 tg3_phy_lpbk_set(tp, 0, false);
13444 /* Wait for link */
13445 for (i = 0; i < 100; i++) {
13446 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13451 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13452 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13453 if (tg3_flag(tp, TSO_CAPABLE) &&
13454 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13455 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13456 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13457 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13458 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13461 tg3_phy_lpbk_set(tp, 0, true);
13463 /* All link indications report up, but the hardware
13464 * isn't really ready for about 20 msec. Double it
13469 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13470 data[TG3_EXT_LOOPB_TEST] |=
13471 TG3_STD_LOOPBACK_FAILED;
13472 if (tg3_flag(tp, TSO_CAPABLE) &&
13473 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13474 data[TG3_EXT_LOOPB_TEST] |=
13475 TG3_TSO_LOOPBACK_FAILED;
13476 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13477 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13478 data[TG3_EXT_LOOPB_TEST] |=
13479 TG3_JMB_LOOPBACK_FAILED;
13482 /* Re-enable gphy autopowerdown. */
13483 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13484 tg3_phy_toggle_apd(tp, true);
13487 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13488 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13491 tp->phy_flags |= eee_cap;
13496 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13499 struct tg3 *tp = netdev_priv(dev);
13500 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13502 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13503 if (tg3_power_up(tp)) {
13504 etest->flags |= ETH_TEST_FL_FAILED;
13505 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13508 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13511 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13513 if (tg3_test_nvram(tp) != 0) {
13514 etest->flags |= ETH_TEST_FL_FAILED;
13515 data[TG3_NVRAM_TEST] = 1;
13517 if (!doextlpbk && tg3_test_link(tp)) {
13518 etest->flags |= ETH_TEST_FL_FAILED;
13519 data[TG3_LINK_TEST] = 1;
13521 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13522 int err, err2 = 0, irq_sync = 0;
13524 if (netif_running(dev)) {
13526 tg3_netif_stop(tp);
13530 tg3_full_lock(tp, irq_sync);
13531 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13532 err = tg3_nvram_lock(tp);
13533 tg3_halt_cpu(tp, RX_CPU_BASE);
13534 if (!tg3_flag(tp, 5705_PLUS))
13535 tg3_halt_cpu(tp, TX_CPU_BASE);
13537 tg3_nvram_unlock(tp);
13539 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13542 if (tg3_test_registers(tp) != 0) {
13543 etest->flags |= ETH_TEST_FL_FAILED;
13544 data[TG3_REGISTER_TEST] = 1;
13547 if (tg3_test_memory(tp) != 0) {
13548 etest->flags |= ETH_TEST_FL_FAILED;
13549 data[TG3_MEMORY_TEST] = 1;
13553 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13555 if (tg3_test_loopback(tp, data, doextlpbk))
13556 etest->flags |= ETH_TEST_FL_FAILED;
13558 tg3_full_unlock(tp);
13560 if (tg3_test_interrupt(tp) != 0) {
13561 etest->flags |= ETH_TEST_FL_FAILED;
13562 data[TG3_INTERRUPT_TEST] = 1;
13565 tg3_full_lock(tp, 0);
13567 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13568 if (netif_running(dev)) {
13569 tg3_flag_set(tp, INIT_COMPLETE);
13570 err2 = tg3_restart_hw(tp, true);
13572 tg3_netif_start(tp);
13575 tg3_full_unlock(tp);
13577 if (irq_sync && !err2)
13580 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13581 tg3_power_down_prepare(tp);
13585 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13586 struct ifreq *ifr, int cmd)
13588 struct tg3 *tp = netdev_priv(dev);
13589 struct hwtstamp_config stmpconf;
13591 if (!tg3_flag(tp, PTP_CAPABLE))
13594 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13597 if (stmpconf.flags)
13600 switch (stmpconf.tx_type) {
13601 case HWTSTAMP_TX_ON:
13602 tg3_flag_set(tp, TX_TSTAMP_EN);
13604 case HWTSTAMP_TX_OFF:
13605 tg3_flag_clear(tp, TX_TSTAMP_EN);
13611 switch (stmpconf.rx_filter) {
13612 case HWTSTAMP_FILTER_NONE:
13615 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13616 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13617 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13619 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13620 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13621 TG3_RX_PTP_CTL_SYNC_EVNT;
13623 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13624 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13625 TG3_RX_PTP_CTL_DELAY_REQ;
13627 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13628 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13629 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13631 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13632 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13633 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13635 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13636 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13637 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13639 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13640 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13641 TG3_RX_PTP_CTL_SYNC_EVNT;
13643 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13644 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13645 TG3_RX_PTP_CTL_SYNC_EVNT;
13647 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13648 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13649 TG3_RX_PTP_CTL_SYNC_EVNT;
13651 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13652 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13653 TG3_RX_PTP_CTL_DELAY_REQ;
13655 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13656 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13657 TG3_RX_PTP_CTL_DELAY_REQ;
13659 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13660 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13661 TG3_RX_PTP_CTL_DELAY_REQ;
13667 if (netif_running(dev) && tp->rxptpctl)
13668 tw32(TG3_RX_PTP_CTL,
13669 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13671 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13675 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13677 struct mii_ioctl_data *data = if_mii(ifr);
13678 struct tg3 *tp = netdev_priv(dev);
13681 if (tg3_flag(tp, USE_PHYLIB)) {
13682 struct phy_device *phydev;
13683 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13685 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13686 return phy_mii_ioctl(phydev, ifr, cmd);
13691 data->phy_id = tp->phy_addr;
13694 case SIOCGMIIREG: {
13697 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13698 break; /* We have no PHY */
13700 if (!netif_running(dev))
13703 spin_lock_bh(&tp->lock);
13704 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13705 data->reg_num & 0x1f, &mii_regval);
13706 spin_unlock_bh(&tp->lock);
13708 data->val_out = mii_regval;
13714 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13715 break; /* We have no PHY */
13717 if (!netif_running(dev))
13720 spin_lock_bh(&tp->lock);
13721 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13722 data->reg_num & 0x1f, data->val_in);
13723 spin_unlock_bh(&tp->lock);
13727 case SIOCSHWTSTAMP:
13728 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13734 return -EOPNOTSUPP;
13737 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13739 struct tg3 *tp = netdev_priv(dev);
13741 memcpy(ec, &tp->coal, sizeof(*ec));
13745 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13747 struct tg3 *tp = netdev_priv(dev);
13748 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13749 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13751 if (!tg3_flag(tp, 5705_PLUS)) {
13752 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13753 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13754 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13755 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13758 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13759 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13760 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13761 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13762 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13763 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13764 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13765 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13766 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13767 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13770 /* No rx interrupts will be generated if both are zero */
13771 if ((ec->rx_coalesce_usecs == 0) &&
13772 (ec->rx_max_coalesced_frames == 0))
13775 /* No tx interrupts will be generated if both are zero */
13776 if ((ec->tx_coalesce_usecs == 0) &&
13777 (ec->tx_max_coalesced_frames == 0))
13780 /* Only copy relevant parameters, ignore all others. */
13781 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13782 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13783 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13784 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13785 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13786 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13787 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13788 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13789 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13791 if (netif_running(dev)) {
13792 tg3_full_lock(tp, 0);
13793 __tg3_set_coalesce(tp, &tp->coal);
13794 tg3_full_unlock(tp);
13799 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13801 struct tg3 *tp = netdev_priv(dev);
13803 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13804 netdev_warn(tp->dev, "Board does not support EEE!\n");
13805 return -EOPNOTSUPP;
13808 if (edata->advertised != tp->eee.advertised) {
13809 netdev_warn(tp->dev,
13810 "Direct manipulation of EEE advertisement is not supported\n");
13814 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13815 netdev_warn(tp->dev,
13816 "Maximal Tx Lpi timer supported is %#x(u)\n",
13817 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13823 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13824 tg3_warn_mgmt_link_flap(tp);
13826 if (netif_running(tp->dev)) {
13827 tg3_full_lock(tp, 0);
13830 tg3_full_unlock(tp);
13836 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13838 struct tg3 *tp = netdev_priv(dev);
13840 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13841 netdev_warn(tp->dev,
13842 "Board does not support EEE!\n");
13843 return -EOPNOTSUPP;
13850 static const struct ethtool_ops tg3_ethtool_ops = {
13851 .get_settings = tg3_get_settings,
13852 .set_settings = tg3_set_settings,
13853 .get_drvinfo = tg3_get_drvinfo,
13854 .get_regs_len = tg3_get_regs_len,
13855 .get_regs = tg3_get_regs,
13856 .get_wol = tg3_get_wol,
13857 .set_wol = tg3_set_wol,
13858 .get_msglevel = tg3_get_msglevel,
13859 .set_msglevel = tg3_set_msglevel,
13860 .nway_reset = tg3_nway_reset,
13861 .get_link = ethtool_op_get_link,
13862 .get_eeprom_len = tg3_get_eeprom_len,
13863 .get_eeprom = tg3_get_eeprom,
13864 .set_eeprom = tg3_set_eeprom,
13865 .get_ringparam = tg3_get_ringparam,
13866 .set_ringparam = tg3_set_ringparam,
13867 .get_pauseparam = tg3_get_pauseparam,
13868 .set_pauseparam = tg3_set_pauseparam,
13869 .self_test = tg3_self_test,
13870 .get_strings = tg3_get_strings,
13871 .set_phys_id = tg3_set_phys_id,
13872 .get_ethtool_stats = tg3_get_ethtool_stats,
13873 .get_coalesce = tg3_get_coalesce,
13874 .set_coalesce = tg3_set_coalesce,
13875 .get_sset_count = tg3_get_sset_count,
13876 .get_rxnfc = tg3_get_rxnfc,
13877 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13878 .get_rxfh_indir = tg3_get_rxfh_indir,
13879 .set_rxfh_indir = tg3_set_rxfh_indir,
13880 .get_channels = tg3_get_channels,
13881 .set_channels = tg3_set_channels,
13882 .get_ts_info = tg3_get_ts_info,
13883 .get_eee = tg3_get_eee,
13884 .set_eee = tg3_set_eee,
13887 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13888 struct rtnl_link_stats64 *stats)
13890 struct tg3 *tp = netdev_priv(dev);
13892 spin_lock_bh(&tp->lock);
13893 if (!tp->hw_stats) {
13894 spin_unlock_bh(&tp->lock);
13895 return &tp->net_stats_prev;
13898 tg3_get_nstats(tp, stats);
13899 spin_unlock_bh(&tp->lock);
13904 static void tg3_set_rx_mode(struct net_device *dev)
13906 struct tg3 *tp = netdev_priv(dev);
13908 if (!netif_running(dev))
13911 tg3_full_lock(tp, 0);
13912 __tg3_set_rx_mode(dev);
13913 tg3_full_unlock(tp);
13916 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13919 dev->mtu = new_mtu;
13921 if (new_mtu > ETH_DATA_LEN) {
13922 if (tg3_flag(tp, 5780_CLASS)) {
13923 netdev_update_features(dev);
13924 tg3_flag_clear(tp, TSO_CAPABLE);
13926 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13929 if (tg3_flag(tp, 5780_CLASS)) {
13930 tg3_flag_set(tp, TSO_CAPABLE);
13931 netdev_update_features(dev);
13933 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13937 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13939 struct tg3 *tp = netdev_priv(dev);
13941 bool reset_phy = false;
13943 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13946 if (!netif_running(dev)) {
13947 /* We'll just catch it later when the
13950 tg3_set_mtu(dev, tp, new_mtu);
13956 tg3_netif_stop(tp);
13958 tg3_full_lock(tp, 1);
13960 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13962 tg3_set_mtu(dev, tp, new_mtu);
13964 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13965 * breaks all requests to 256 bytes.
13967 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13970 err = tg3_restart_hw(tp, reset_phy);
13973 tg3_netif_start(tp);
13975 tg3_full_unlock(tp);
13983 static const struct net_device_ops tg3_netdev_ops = {
13984 .ndo_open = tg3_open,
13985 .ndo_stop = tg3_close,
13986 .ndo_start_xmit = tg3_start_xmit,
13987 .ndo_get_stats64 = tg3_get_stats64,
13988 .ndo_validate_addr = eth_validate_addr,
13989 .ndo_set_rx_mode = tg3_set_rx_mode,
13990 .ndo_set_mac_address = tg3_set_mac_addr,
13991 .ndo_do_ioctl = tg3_ioctl,
13992 .ndo_tx_timeout = tg3_tx_timeout,
13993 .ndo_change_mtu = tg3_change_mtu,
13994 .ndo_fix_features = tg3_fix_features,
13995 .ndo_set_features = tg3_set_features,
13996 #ifdef CONFIG_NET_POLL_CONTROLLER
13997 .ndo_poll_controller = tg3_poll_controller,
14001 static void tg3_get_eeprom_size(struct tg3 *tp)
14003 u32 cursize, val, magic;
14005 tp->nvram_size = EEPROM_CHIP_SIZE;
14007 if (tg3_nvram_read(tp, 0, &magic) != 0)
14010 if ((magic != TG3_EEPROM_MAGIC) &&
14011 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14012 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14016 * Size the chip by reading offsets at increasing powers of two.
14017 * When we encounter our validation signature, we know the addressing
14018 * has wrapped around, and thus have our chip size.
14022 while (cursize < tp->nvram_size) {
14023 if (tg3_nvram_read(tp, cursize, &val) != 0)
14032 tp->nvram_size = cursize;
14035 static void tg3_get_nvram_size(struct tg3 *tp)
14039 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14042 /* Selfboot format */
14043 if (val != TG3_EEPROM_MAGIC) {
14044 tg3_get_eeprom_size(tp);
14048 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14050 /* This is confusing. We want to operate on the
14051 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14052 * call will read from NVRAM and byteswap the data
14053 * according to the byteswapping settings for all
14054 * other register accesses. This ensures the data we
14055 * want will always reside in the lower 16-bits.
14056 * However, the data in NVRAM is in LE format, which
14057 * means the data from the NVRAM read will always be
14058 * opposite the endianness of the CPU. The 16-bit
14059 * byteswap then brings the data to CPU endianness.
14061 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14065 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14068 static void tg3_get_nvram_info(struct tg3 *tp)
14072 nvcfg1 = tr32(NVRAM_CFG1);
14073 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14074 tg3_flag_set(tp, FLASH);
14076 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14077 tw32(NVRAM_CFG1, nvcfg1);
14080 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14081 tg3_flag(tp, 5780_CLASS)) {
14082 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14083 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14084 tp->nvram_jedecnum = JEDEC_ATMEL;
14085 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14086 tg3_flag_set(tp, NVRAM_BUFFERED);
14088 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14089 tp->nvram_jedecnum = JEDEC_ATMEL;
14090 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14092 case FLASH_VENDOR_ATMEL_EEPROM:
14093 tp->nvram_jedecnum = JEDEC_ATMEL;
14094 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14095 tg3_flag_set(tp, NVRAM_BUFFERED);
14097 case FLASH_VENDOR_ST:
14098 tp->nvram_jedecnum = JEDEC_ST;
14099 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14100 tg3_flag_set(tp, NVRAM_BUFFERED);
14102 case FLASH_VENDOR_SAIFUN:
14103 tp->nvram_jedecnum = JEDEC_SAIFUN;
14104 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14106 case FLASH_VENDOR_SST_SMALL:
14107 case FLASH_VENDOR_SST_LARGE:
14108 tp->nvram_jedecnum = JEDEC_SST;
14109 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14113 tp->nvram_jedecnum = JEDEC_ATMEL;
14114 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14115 tg3_flag_set(tp, NVRAM_BUFFERED);
14119 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14121 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14122 case FLASH_5752PAGE_SIZE_256:
14123 tp->nvram_pagesize = 256;
14125 case FLASH_5752PAGE_SIZE_512:
14126 tp->nvram_pagesize = 512;
14128 case FLASH_5752PAGE_SIZE_1K:
14129 tp->nvram_pagesize = 1024;
14131 case FLASH_5752PAGE_SIZE_2K:
14132 tp->nvram_pagesize = 2048;
14134 case FLASH_5752PAGE_SIZE_4K:
14135 tp->nvram_pagesize = 4096;
14137 case FLASH_5752PAGE_SIZE_264:
14138 tp->nvram_pagesize = 264;
14140 case FLASH_5752PAGE_SIZE_528:
14141 tp->nvram_pagesize = 528;
14146 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14150 nvcfg1 = tr32(NVRAM_CFG1);
14152 /* NVRAM protection for TPM */
14153 if (nvcfg1 & (1 << 27))
14154 tg3_flag_set(tp, PROTECTED_NVRAM);
14156 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14157 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14158 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14159 tp->nvram_jedecnum = JEDEC_ATMEL;
14160 tg3_flag_set(tp, NVRAM_BUFFERED);
14162 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14163 tp->nvram_jedecnum = JEDEC_ATMEL;
14164 tg3_flag_set(tp, NVRAM_BUFFERED);
14165 tg3_flag_set(tp, FLASH);
14167 case FLASH_5752VENDOR_ST_M45PE10:
14168 case FLASH_5752VENDOR_ST_M45PE20:
14169 case FLASH_5752VENDOR_ST_M45PE40:
14170 tp->nvram_jedecnum = JEDEC_ST;
14171 tg3_flag_set(tp, NVRAM_BUFFERED);
14172 tg3_flag_set(tp, FLASH);
14176 if (tg3_flag(tp, FLASH)) {
14177 tg3_nvram_get_pagesize(tp, nvcfg1);
14179 /* For eeprom, set pagesize to maximum eeprom size */
14180 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14182 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14183 tw32(NVRAM_CFG1, nvcfg1);
14187 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14189 u32 nvcfg1, protect = 0;
14191 nvcfg1 = tr32(NVRAM_CFG1);
14193 /* NVRAM protection for TPM */
14194 if (nvcfg1 & (1 << 27)) {
14195 tg3_flag_set(tp, PROTECTED_NVRAM);
14199 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14201 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14202 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14203 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14204 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14205 tp->nvram_jedecnum = JEDEC_ATMEL;
14206 tg3_flag_set(tp, NVRAM_BUFFERED);
14207 tg3_flag_set(tp, FLASH);
14208 tp->nvram_pagesize = 264;
14209 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14210 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14211 tp->nvram_size = (protect ? 0x3e200 :
14212 TG3_NVRAM_SIZE_512KB);
14213 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14214 tp->nvram_size = (protect ? 0x1f200 :
14215 TG3_NVRAM_SIZE_256KB);
14217 tp->nvram_size = (protect ? 0x1f200 :
14218 TG3_NVRAM_SIZE_128KB);
14220 case FLASH_5752VENDOR_ST_M45PE10:
14221 case FLASH_5752VENDOR_ST_M45PE20:
14222 case FLASH_5752VENDOR_ST_M45PE40:
14223 tp->nvram_jedecnum = JEDEC_ST;
14224 tg3_flag_set(tp, NVRAM_BUFFERED);
14225 tg3_flag_set(tp, FLASH);
14226 tp->nvram_pagesize = 256;
14227 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14228 tp->nvram_size = (protect ?
14229 TG3_NVRAM_SIZE_64KB :
14230 TG3_NVRAM_SIZE_128KB);
14231 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14232 tp->nvram_size = (protect ?
14233 TG3_NVRAM_SIZE_64KB :
14234 TG3_NVRAM_SIZE_256KB);
14236 tp->nvram_size = (protect ?
14237 TG3_NVRAM_SIZE_128KB :
14238 TG3_NVRAM_SIZE_512KB);
14243 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14247 nvcfg1 = tr32(NVRAM_CFG1);
14249 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14250 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14251 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14252 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14253 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14254 tp->nvram_jedecnum = JEDEC_ATMEL;
14255 tg3_flag_set(tp, NVRAM_BUFFERED);
14256 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14258 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14259 tw32(NVRAM_CFG1, nvcfg1);
14261 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14262 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14263 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14264 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14265 tp->nvram_jedecnum = JEDEC_ATMEL;
14266 tg3_flag_set(tp, NVRAM_BUFFERED);
14267 tg3_flag_set(tp, FLASH);
14268 tp->nvram_pagesize = 264;
14270 case FLASH_5752VENDOR_ST_M45PE10:
14271 case FLASH_5752VENDOR_ST_M45PE20:
14272 case FLASH_5752VENDOR_ST_M45PE40:
14273 tp->nvram_jedecnum = JEDEC_ST;
14274 tg3_flag_set(tp, NVRAM_BUFFERED);
14275 tg3_flag_set(tp, FLASH);
14276 tp->nvram_pagesize = 256;
14281 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14283 u32 nvcfg1, protect = 0;
14285 nvcfg1 = tr32(NVRAM_CFG1);
14287 /* NVRAM protection for TPM */
14288 if (nvcfg1 & (1 << 27)) {
14289 tg3_flag_set(tp, PROTECTED_NVRAM);
14293 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14295 case FLASH_5761VENDOR_ATMEL_ADB021D:
14296 case FLASH_5761VENDOR_ATMEL_ADB041D:
14297 case FLASH_5761VENDOR_ATMEL_ADB081D:
14298 case FLASH_5761VENDOR_ATMEL_ADB161D:
14299 case FLASH_5761VENDOR_ATMEL_MDB021D:
14300 case FLASH_5761VENDOR_ATMEL_MDB041D:
14301 case FLASH_5761VENDOR_ATMEL_MDB081D:
14302 case FLASH_5761VENDOR_ATMEL_MDB161D:
14303 tp->nvram_jedecnum = JEDEC_ATMEL;
14304 tg3_flag_set(tp, NVRAM_BUFFERED);
14305 tg3_flag_set(tp, FLASH);
14306 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14307 tp->nvram_pagesize = 256;
14309 case FLASH_5761VENDOR_ST_A_M45PE20:
14310 case FLASH_5761VENDOR_ST_A_M45PE40:
14311 case FLASH_5761VENDOR_ST_A_M45PE80:
14312 case FLASH_5761VENDOR_ST_A_M45PE16:
14313 case FLASH_5761VENDOR_ST_M_M45PE20:
14314 case FLASH_5761VENDOR_ST_M_M45PE40:
14315 case FLASH_5761VENDOR_ST_M_M45PE80:
14316 case FLASH_5761VENDOR_ST_M_M45PE16:
14317 tp->nvram_jedecnum = JEDEC_ST;
14318 tg3_flag_set(tp, NVRAM_BUFFERED);
14319 tg3_flag_set(tp, FLASH);
14320 tp->nvram_pagesize = 256;
14325 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14328 case FLASH_5761VENDOR_ATMEL_ADB161D:
14329 case FLASH_5761VENDOR_ATMEL_MDB161D:
14330 case FLASH_5761VENDOR_ST_A_M45PE16:
14331 case FLASH_5761VENDOR_ST_M_M45PE16:
14332 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14334 case FLASH_5761VENDOR_ATMEL_ADB081D:
14335 case FLASH_5761VENDOR_ATMEL_MDB081D:
14336 case FLASH_5761VENDOR_ST_A_M45PE80:
14337 case FLASH_5761VENDOR_ST_M_M45PE80:
14338 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14340 case FLASH_5761VENDOR_ATMEL_ADB041D:
14341 case FLASH_5761VENDOR_ATMEL_MDB041D:
14342 case FLASH_5761VENDOR_ST_A_M45PE40:
14343 case FLASH_5761VENDOR_ST_M_M45PE40:
14344 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14346 case FLASH_5761VENDOR_ATMEL_ADB021D:
14347 case FLASH_5761VENDOR_ATMEL_MDB021D:
14348 case FLASH_5761VENDOR_ST_A_M45PE20:
14349 case FLASH_5761VENDOR_ST_M_M45PE20:
14350 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14356 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14358 tp->nvram_jedecnum = JEDEC_ATMEL;
14359 tg3_flag_set(tp, NVRAM_BUFFERED);
14360 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14363 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14367 nvcfg1 = tr32(NVRAM_CFG1);
14369 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14370 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14371 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14372 tp->nvram_jedecnum = JEDEC_ATMEL;
14373 tg3_flag_set(tp, NVRAM_BUFFERED);
14374 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14376 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14377 tw32(NVRAM_CFG1, nvcfg1);
14379 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14380 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14381 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14382 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14383 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14384 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14385 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14386 tp->nvram_jedecnum = JEDEC_ATMEL;
14387 tg3_flag_set(tp, NVRAM_BUFFERED);
14388 tg3_flag_set(tp, FLASH);
14390 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14391 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14392 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14393 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14394 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14396 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14397 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14398 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14400 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14401 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14402 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14406 case FLASH_5752VENDOR_ST_M45PE10:
14407 case FLASH_5752VENDOR_ST_M45PE20:
14408 case FLASH_5752VENDOR_ST_M45PE40:
14409 tp->nvram_jedecnum = JEDEC_ST;
14410 tg3_flag_set(tp, NVRAM_BUFFERED);
14411 tg3_flag_set(tp, FLASH);
14413 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14414 case FLASH_5752VENDOR_ST_M45PE10:
14415 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14417 case FLASH_5752VENDOR_ST_M45PE20:
14418 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14420 case FLASH_5752VENDOR_ST_M45PE40:
14421 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14426 tg3_flag_set(tp, NO_NVRAM);
14430 tg3_nvram_get_pagesize(tp, nvcfg1);
14431 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14432 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14436 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14440 nvcfg1 = tr32(NVRAM_CFG1);
14442 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14443 case FLASH_5717VENDOR_ATMEL_EEPROM:
14444 case FLASH_5717VENDOR_MICRO_EEPROM:
14445 tp->nvram_jedecnum = JEDEC_ATMEL;
14446 tg3_flag_set(tp, NVRAM_BUFFERED);
14447 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14449 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14450 tw32(NVRAM_CFG1, nvcfg1);
14452 case FLASH_5717VENDOR_ATMEL_MDB011D:
14453 case FLASH_5717VENDOR_ATMEL_ADB011B:
14454 case FLASH_5717VENDOR_ATMEL_ADB011D:
14455 case FLASH_5717VENDOR_ATMEL_MDB021D:
14456 case FLASH_5717VENDOR_ATMEL_ADB021B:
14457 case FLASH_5717VENDOR_ATMEL_ADB021D:
14458 case FLASH_5717VENDOR_ATMEL_45USPT:
14459 tp->nvram_jedecnum = JEDEC_ATMEL;
14460 tg3_flag_set(tp, NVRAM_BUFFERED);
14461 tg3_flag_set(tp, FLASH);
14463 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14464 case FLASH_5717VENDOR_ATMEL_MDB021D:
14465 /* Detect size with tg3_nvram_get_size() */
14467 case FLASH_5717VENDOR_ATMEL_ADB021B:
14468 case FLASH_5717VENDOR_ATMEL_ADB021D:
14469 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14472 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14476 case FLASH_5717VENDOR_ST_M_M25PE10:
14477 case FLASH_5717VENDOR_ST_A_M25PE10:
14478 case FLASH_5717VENDOR_ST_M_M45PE10:
14479 case FLASH_5717VENDOR_ST_A_M45PE10:
14480 case FLASH_5717VENDOR_ST_M_M25PE20:
14481 case FLASH_5717VENDOR_ST_A_M25PE20:
14482 case FLASH_5717VENDOR_ST_M_M45PE20:
14483 case FLASH_5717VENDOR_ST_A_M45PE20:
14484 case FLASH_5717VENDOR_ST_25USPT:
14485 case FLASH_5717VENDOR_ST_45USPT:
14486 tp->nvram_jedecnum = JEDEC_ST;
14487 tg3_flag_set(tp, NVRAM_BUFFERED);
14488 tg3_flag_set(tp, FLASH);
14490 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14491 case FLASH_5717VENDOR_ST_M_M25PE20:
14492 case FLASH_5717VENDOR_ST_M_M45PE20:
14493 /* Detect size with tg3_nvram_get_size() */
14495 case FLASH_5717VENDOR_ST_A_M25PE20:
14496 case FLASH_5717VENDOR_ST_A_M45PE20:
14497 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14500 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14505 tg3_flag_set(tp, NO_NVRAM);
14509 tg3_nvram_get_pagesize(tp, nvcfg1);
14510 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14511 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14514 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14516 u32 nvcfg1, nvmpinstrp;
14518 nvcfg1 = tr32(NVRAM_CFG1);
14519 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14521 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14522 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14523 tg3_flag_set(tp, NO_NVRAM);
14527 switch (nvmpinstrp) {
14528 case FLASH_5762_EEPROM_HD:
14529 nvmpinstrp = FLASH_5720_EEPROM_HD;
14531 case FLASH_5762_EEPROM_LD:
14532 nvmpinstrp = FLASH_5720_EEPROM_LD;
14534 case FLASH_5720VENDOR_M_ST_M45PE20:
14535 /* This pinstrap supports multiple sizes, so force it
14536 * to read the actual size from location 0xf0.
14538 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14543 switch (nvmpinstrp) {
14544 case FLASH_5720_EEPROM_HD:
14545 case FLASH_5720_EEPROM_LD:
14546 tp->nvram_jedecnum = JEDEC_ATMEL;
14547 tg3_flag_set(tp, NVRAM_BUFFERED);
14549 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14550 tw32(NVRAM_CFG1, nvcfg1);
14551 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14552 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14554 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14556 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14557 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14558 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14559 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14560 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14561 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14562 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14563 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14564 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14565 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14566 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14567 case FLASH_5720VENDOR_ATMEL_45USPT:
14568 tp->nvram_jedecnum = JEDEC_ATMEL;
14569 tg3_flag_set(tp, NVRAM_BUFFERED);
14570 tg3_flag_set(tp, FLASH);
14572 switch (nvmpinstrp) {
14573 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14574 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14575 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14576 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14578 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14579 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14580 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14581 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14583 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14584 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14585 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14588 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14589 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14593 case FLASH_5720VENDOR_M_ST_M25PE10:
14594 case FLASH_5720VENDOR_M_ST_M45PE10:
14595 case FLASH_5720VENDOR_A_ST_M25PE10:
14596 case FLASH_5720VENDOR_A_ST_M45PE10:
14597 case FLASH_5720VENDOR_M_ST_M25PE20:
14598 case FLASH_5720VENDOR_M_ST_M45PE20:
14599 case FLASH_5720VENDOR_A_ST_M25PE20:
14600 case FLASH_5720VENDOR_A_ST_M45PE20:
14601 case FLASH_5720VENDOR_M_ST_M25PE40:
14602 case FLASH_5720VENDOR_M_ST_M45PE40:
14603 case FLASH_5720VENDOR_A_ST_M25PE40:
14604 case FLASH_5720VENDOR_A_ST_M45PE40:
14605 case FLASH_5720VENDOR_M_ST_M25PE80:
14606 case FLASH_5720VENDOR_M_ST_M45PE80:
14607 case FLASH_5720VENDOR_A_ST_M25PE80:
14608 case FLASH_5720VENDOR_A_ST_M45PE80:
14609 case FLASH_5720VENDOR_ST_25USPT:
14610 case FLASH_5720VENDOR_ST_45USPT:
14611 tp->nvram_jedecnum = JEDEC_ST;
14612 tg3_flag_set(tp, NVRAM_BUFFERED);
14613 tg3_flag_set(tp, FLASH);
14615 switch (nvmpinstrp) {
14616 case FLASH_5720VENDOR_M_ST_M25PE20:
14617 case FLASH_5720VENDOR_M_ST_M45PE20:
14618 case FLASH_5720VENDOR_A_ST_M25PE20:
14619 case FLASH_5720VENDOR_A_ST_M45PE20:
14620 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14622 case FLASH_5720VENDOR_M_ST_M25PE40:
14623 case FLASH_5720VENDOR_M_ST_M45PE40:
14624 case FLASH_5720VENDOR_A_ST_M25PE40:
14625 case FLASH_5720VENDOR_A_ST_M45PE40:
14626 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14628 case FLASH_5720VENDOR_M_ST_M25PE80:
14629 case FLASH_5720VENDOR_M_ST_M45PE80:
14630 case FLASH_5720VENDOR_A_ST_M25PE80:
14631 case FLASH_5720VENDOR_A_ST_M45PE80:
14632 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14635 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14636 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14641 tg3_flag_set(tp, NO_NVRAM);
14645 tg3_nvram_get_pagesize(tp, nvcfg1);
14646 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14647 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14649 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14652 if (tg3_nvram_read(tp, 0, &val))
14655 if (val != TG3_EEPROM_MAGIC &&
14656 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14657 tg3_flag_set(tp, NO_NVRAM);
14661 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14662 static void tg3_nvram_init(struct tg3 *tp)
14664 if (tg3_flag(tp, IS_SSB_CORE)) {
14665 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14666 tg3_flag_clear(tp, NVRAM);
14667 tg3_flag_clear(tp, NVRAM_BUFFERED);
14668 tg3_flag_set(tp, NO_NVRAM);
14672 tw32_f(GRC_EEPROM_ADDR,
14673 (EEPROM_ADDR_FSM_RESET |
14674 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14675 EEPROM_ADDR_CLKPERD_SHIFT)));
14679 /* Enable seeprom accesses. */
14680 tw32_f(GRC_LOCAL_CTRL,
14681 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14684 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14685 tg3_asic_rev(tp) != ASIC_REV_5701) {
14686 tg3_flag_set(tp, NVRAM);
14688 if (tg3_nvram_lock(tp)) {
14689 netdev_warn(tp->dev,
14690 "Cannot get nvram lock, %s failed\n",
14694 tg3_enable_nvram_access(tp);
14696 tp->nvram_size = 0;
14698 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14699 tg3_get_5752_nvram_info(tp);
14700 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14701 tg3_get_5755_nvram_info(tp);
14702 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14703 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14704 tg3_asic_rev(tp) == ASIC_REV_5785)
14705 tg3_get_5787_nvram_info(tp);
14706 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14707 tg3_get_5761_nvram_info(tp);
14708 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14709 tg3_get_5906_nvram_info(tp);
14710 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14711 tg3_flag(tp, 57765_CLASS))
14712 tg3_get_57780_nvram_info(tp);
14713 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14714 tg3_asic_rev(tp) == ASIC_REV_5719)
14715 tg3_get_5717_nvram_info(tp);
14716 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14717 tg3_asic_rev(tp) == ASIC_REV_5762)
14718 tg3_get_5720_nvram_info(tp);
14720 tg3_get_nvram_info(tp);
14722 if (tp->nvram_size == 0)
14723 tg3_get_nvram_size(tp);
14725 tg3_disable_nvram_access(tp);
14726 tg3_nvram_unlock(tp);
14729 tg3_flag_clear(tp, NVRAM);
14730 tg3_flag_clear(tp, NVRAM_BUFFERED);
14732 tg3_get_eeprom_size(tp);
14736 struct subsys_tbl_ent {
14737 u16 subsys_vendor, subsys_devid;
14741 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14742 /* Broadcom boards. */
14743 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14744 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14745 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14746 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14747 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14748 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14749 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14750 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14751 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14752 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14753 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14754 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14755 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14756 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14757 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14758 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14759 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14760 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14761 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14762 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14763 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14764 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14767 { TG3PCI_SUBVENDOR_ID_3COM,
14768 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14769 { TG3PCI_SUBVENDOR_ID_3COM,
14770 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14771 { TG3PCI_SUBVENDOR_ID_3COM,
14772 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14773 { TG3PCI_SUBVENDOR_ID_3COM,
14774 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14775 { TG3PCI_SUBVENDOR_ID_3COM,
14776 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14779 { TG3PCI_SUBVENDOR_ID_DELL,
14780 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14781 { TG3PCI_SUBVENDOR_ID_DELL,
14782 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14783 { TG3PCI_SUBVENDOR_ID_DELL,
14784 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14785 { TG3PCI_SUBVENDOR_ID_DELL,
14786 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14788 /* Compaq boards. */
14789 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14790 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14791 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14792 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14793 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14794 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14795 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14796 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14797 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14798 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14801 { TG3PCI_SUBVENDOR_ID_IBM,
14802 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14805 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14809 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14810 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14811 tp->pdev->subsystem_vendor) &&
14812 (subsys_id_to_phy_id[i].subsys_devid ==
14813 tp->pdev->subsystem_device))
14814 return &subsys_id_to_phy_id[i];
14819 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14823 tp->phy_id = TG3_PHY_ID_INVALID;
14824 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14826 /* Assume an onboard device and WOL capable by default. */
14827 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14828 tg3_flag_set(tp, WOL_CAP);
14830 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14831 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14832 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14833 tg3_flag_set(tp, IS_NIC);
14835 val = tr32(VCPU_CFGSHDW);
14836 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14837 tg3_flag_set(tp, ASPM_WORKAROUND);
14838 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14839 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14840 tg3_flag_set(tp, WOL_ENABLE);
14841 device_set_wakeup_enable(&tp->pdev->dev, true);
14846 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14847 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14848 u32 nic_cfg, led_cfg;
14849 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14850 int eeprom_phy_serdes = 0;
14852 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14853 tp->nic_sram_data_cfg = nic_cfg;
14855 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14856 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14857 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14858 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14859 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14860 (ver > 0) && (ver < 0x100))
14861 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14863 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14864 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14866 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14867 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14868 eeprom_phy_serdes = 1;
14870 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14871 if (nic_phy_id != 0) {
14872 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14873 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14875 eeprom_phy_id = (id1 >> 16) << 10;
14876 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14877 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14881 tp->phy_id = eeprom_phy_id;
14882 if (eeprom_phy_serdes) {
14883 if (!tg3_flag(tp, 5705_PLUS))
14884 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14886 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14889 if (tg3_flag(tp, 5750_PLUS))
14890 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14891 SHASTA_EXT_LED_MODE_MASK);
14893 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14897 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14898 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14901 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14902 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14905 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14906 tp->led_ctrl = LED_CTRL_MODE_MAC;
14908 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14909 * read on some older 5700/5701 bootcode.
14911 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14912 tg3_asic_rev(tp) == ASIC_REV_5701)
14913 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14917 case SHASTA_EXT_LED_SHARED:
14918 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14919 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14920 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14921 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14922 LED_CTRL_MODE_PHY_2);
14925 case SHASTA_EXT_LED_MAC:
14926 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14929 case SHASTA_EXT_LED_COMBO:
14930 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14931 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14932 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14933 LED_CTRL_MODE_PHY_2);
14938 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14939 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14940 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14941 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14943 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14944 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14946 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14947 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14948 if ((tp->pdev->subsystem_vendor ==
14949 PCI_VENDOR_ID_ARIMA) &&
14950 (tp->pdev->subsystem_device == 0x205a ||
14951 tp->pdev->subsystem_device == 0x2063))
14952 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14954 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14955 tg3_flag_set(tp, IS_NIC);
14958 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14959 tg3_flag_set(tp, ENABLE_ASF);
14960 if (tg3_flag(tp, 5750_PLUS))
14961 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14964 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14965 tg3_flag(tp, 5750_PLUS))
14966 tg3_flag_set(tp, ENABLE_APE);
14968 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14969 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14970 tg3_flag_clear(tp, WOL_CAP);
14972 if (tg3_flag(tp, WOL_CAP) &&
14973 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14974 tg3_flag_set(tp, WOL_ENABLE);
14975 device_set_wakeup_enable(&tp->pdev->dev, true);
14978 if (cfg2 & (1 << 17))
14979 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14981 /* serdes signal pre-emphasis in register 0x590 set by */
14982 /* bootcode if bit 18 is set */
14983 if (cfg2 & (1 << 18))
14984 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14986 if ((tg3_flag(tp, 57765_PLUS) ||
14987 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14988 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14989 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14990 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14992 if (tg3_flag(tp, PCI_EXPRESS)) {
14995 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14996 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14997 !tg3_flag(tp, 57765_PLUS) &&
14998 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14999 tg3_flag_set(tp, ASPM_WORKAROUND);
15000 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15001 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15002 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15003 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15006 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15007 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15008 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15009 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15010 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15011 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15014 if (tg3_flag(tp, WOL_CAP))
15015 device_set_wakeup_enable(&tp->pdev->dev,
15016 tg3_flag(tp, WOL_ENABLE));
15018 device_set_wakeup_capable(&tp->pdev->dev, false);
15021 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15024 u32 val2, off = offset * 8;
15026 err = tg3_nvram_lock(tp);
15030 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15031 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15032 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15033 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15036 for (i = 0; i < 100; i++) {
15037 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15038 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15039 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15045 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15047 tg3_nvram_unlock(tp);
15048 if (val2 & APE_OTP_STATUS_CMD_DONE)
15054 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15059 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15060 tw32(OTP_CTRL, cmd);
15062 /* Wait for up to 1 ms for command to execute. */
15063 for (i = 0; i < 100; i++) {
15064 val = tr32(OTP_STATUS);
15065 if (val & OTP_STATUS_CMD_DONE)
15070 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15073 /* Read the gphy configuration from the OTP region of the chip. The gphy
15074 * configuration is a 32-bit value that straddles the alignment boundary.
15075 * We do two 32-bit reads and then shift and merge the results.
15077 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15079 u32 bhalf_otp, thalf_otp;
15081 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15083 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15086 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15088 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15091 thalf_otp = tr32(OTP_READ_DATA);
15093 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15095 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15098 bhalf_otp = tr32(OTP_READ_DATA);
15100 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15103 static void tg3_phy_init_link_config(struct tg3 *tp)
15105 u32 adv = ADVERTISED_Autoneg;
15107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15108 adv |= ADVERTISED_1000baseT_Half |
15109 ADVERTISED_1000baseT_Full;
15111 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15112 adv |= ADVERTISED_100baseT_Half |
15113 ADVERTISED_100baseT_Full |
15114 ADVERTISED_10baseT_Half |
15115 ADVERTISED_10baseT_Full |
15118 adv |= ADVERTISED_FIBRE;
15120 tp->link_config.advertising = adv;
15121 tp->link_config.speed = SPEED_UNKNOWN;
15122 tp->link_config.duplex = DUPLEX_UNKNOWN;
15123 tp->link_config.autoneg = AUTONEG_ENABLE;
15124 tp->link_config.active_speed = SPEED_UNKNOWN;
15125 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15130 static int tg3_phy_probe(struct tg3 *tp)
15132 u32 hw_phy_id_1, hw_phy_id_2;
15133 u32 hw_phy_id, hw_phy_id_masked;
15136 /* flow control autonegotiation is default behavior */
15137 tg3_flag_set(tp, PAUSE_AUTONEG);
15138 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15140 if (tg3_flag(tp, ENABLE_APE)) {
15141 switch (tp->pci_fn) {
15143 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15146 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15149 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15152 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15157 if (!tg3_flag(tp, ENABLE_ASF) &&
15158 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15159 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15160 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15161 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15163 if (tg3_flag(tp, USE_PHYLIB))
15164 return tg3_phy_init(tp);
15166 /* Reading the PHY ID register can conflict with ASF
15167 * firmware access to the PHY hardware.
15170 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15171 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15173 /* Now read the physical PHY_ID from the chip and verify
15174 * that it is sane. If it doesn't look good, we fall back
15175 * to either the hard-coded table based PHY_ID and failing
15176 * that the value found in the eeprom area.
15178 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15179 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15181 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15182 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15183 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15185 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15188 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15189 tp->phy_id = hw_phy_id;
15190 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15191 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15193 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15195 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15196 /* Do nothing, phy ID already set up in
15197 * tg3_get_eeprom_hw_cfg().
15200 struct subsys_tbl_ent *p;
15202 /* No eeprom signature? Try the hardcoded
15203 * subsys device table.
15205 p = tg3_lookup_by_subsys(tp);
15207 tp->phy_id = p->phy_id;
15208 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15209 /* For now we saw the IDs 0xbc050cd0,
15210 * 0xbc050f80 and 0xbc050c30 on devices
15211 * connected to an BCM4785 and there are
15212 * probably more. Just assume that the phy is
15213 * supported when it is connected to a SSB core
15220 tp->phy_id == TG3_PHY_ID_BCM8002)
15221 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15225 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15226 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15227 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15228 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15229 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15230 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15231 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15232 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15233 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15234 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15236 tp->eee.supported = SUPPORTED_100baseT_Full |
15237 SUPPORTED_1000baseT_Full;
15238 tp->eee.advertised = ADVERTISED_100baseT_Full |
15239 ADVERTISED_1000baseT_Full;
15240 tp->eee.eee_enabled = 1;
15241 tp->eee.tx_lpi_enabled = 1;
15242 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15245 tg3_phy_init_link_config(tp);
15247 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15248 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15249 !tg3_flag(tp, ENABLE_APE) &&
15250 !tg3_flag(tp, ENABLE_ASF)) {
15253 tg3_readphy(tp, MII_BMSR, &bmsr);
15254 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15255 (bmsr & BMSR_LSTATUS))
15256 goto skip_phy_reset;
15258 err = tg3_phy_reset(tp);
15262 tg3_phy_set_wirespeed(tp);
15264 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15265 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15266 tp->link_config.flowctrl);
15268 tg3_writephy(tp, MII_BMCR,
15269 BMCR_ANENABLE | BMCR_ANRESTART);
15274 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15275 err = tg3_init_5401phy_dsp(tp);
15279 err = tg3_init_5401phy_dsp(tp);
15285 static void tg3_read_vpd(struct tg3 *tp)
15288 unsigned int block_end, rosize, len;
15292 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15296 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15298 goto out_not_found;
15300 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15301 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15302 i += PCI_VPD_LRDT_TAG_SIZE;
15304 if (block_end > vpdlen)
15305 goto out_not_found;
15307 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15308 PCI_VPD_RO_KEYWORD_MFR_ID);
15310 len = pci_vpd_info_field_size(&vpd_data[j]);
15312 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15313 if (j + len > block_end || len != 4 ||
15314 memcmp(&vpd_data[j], "1028", 4))
15317 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15318 PCI_VPD_RO_KEYWORD_VENDOR0);
15322 len = pci_vpd_info_field_size(&vpd_data[j]);
15324 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15325 if (j + len > block_end)
15328 if (len >= sizeof(tp->fw_ver))
15329 len = sizeof(tp->fw_ver) - 1;
15330 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15331 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15336 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15337 PCI_VPD_RO_KEYWORD_PARTNO);
15339 goto out_not_found;
15341 len = pci_vpd_info_field_size(&vpd_data[i]);
15343 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15344 if (len > TG3_BPN_SIZE ||
15345 (len + i) > vpdlen)
15346 goto out_not_found;
15348 memcpy(tp->board_part_number, &vpd_data[i], len);
15352 if (tp->board_part_number[0])
15356 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15357 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15358 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15359 strcpy(tp->board_part_number, "BCM5717");
15360 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15361 strcpy(tp->board_part_number, "BCM5718");
15364 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15365 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15366 strcpy(tp->board_part_number, "BCM57780");
15367 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15368 strcpy(tp->board_part_number, "BCM57760");
15369 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15370 strcpy(tp->board_part_number, "BCM57790");
15371 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15372 strcpy(tp->board_part_number, "BCM57788");
15375 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15376 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15377 strcpy(tp->board_part_number, "BCM57761");
15378 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15379 strcpy(tp->board_part_number, "BCM57765");
15380 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15381 strcpy(tp->board_part_number, "BCM57781");
15382 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15383 strcpy(tp->board_part_number, "BCM57785");
15384 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15385 strcpy(tp->board_part_number, "BCM57791");
15386 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15387 strcpy(tp->board_part_number, "BCM57795");
15390 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15391 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15392 strcpy(tp->board_part_number, "BCM57762");
15393 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15394 strcpy(tp->board_part_number, "BCM57766");
15395 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15396 strcpy(tp->board_part_number, "BCM57782");
15397 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15398 strcpy(tp->board_part_number, "BCM57786");
15401 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15402 strcpy(tp->board_part_number, "BCM95906");
15405 strcpy(tp->board_part_number, "none");
15409 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15413 if (tg3_nvram_read(tp, offset, &val) ||
15414 (val & 0xfc000000) != 0x0c000000 ||
15415 tg3_nvram_read(tp, offset + 4, &val) ||
15422 static void tg3_read_bc_ver(struct tg3 *tp)
15424 u32 val, offset, start, ver_offset;
15426 bool newver = false;
15428 if (tg3_nvram_read(tp, 0xc, &offset) ||
15429 tg3_nvram_read(tp, 0x4, &start))
15432 offset = tg3_nvram_logical_addr(tp, offset);
15434 if (tg3_nvram_read(tp, offset, &val))
15437 if ((val & 0xfc000000) == 0x0c000000) {
15438 if (tg3_nvram_read(tp, offset + 4, &val))
15445 dst_off = strlen(tp->fw_ver);
15448 if (TG3_VER_SIZE - dst_off < 16 ||
15449 tg3_nvram_read(tp, offset + 8, &ver_offset))
15452 offset = offset + ver_offset - start;
15453 for (i = 0; i < 16; i += 4) {
15455 if (tg3_nvram_read_be32(tp, offset + i, &v))
15458 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15463 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15466 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15467 TG3_NVM_BCVER_MAJSFT;
15468 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15469 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15470 "v%d.%02d", major, minor);
15474 static void tg3_read_hwsb_ver(struct tg3 *tp)
15476 u32 val, major, minor;
15478 /* Use native endian representation */
15479 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15482 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15483 TG3_NVM_HWSB_CFG1_MAJSFT;
15484 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15485 TG3_NVM_HWSB_CFG1_MINSFT;
15487 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15490 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15492 u32 offset, major, minor, build;
15494 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15496 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15499 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15500 case TG3_EEPROM_SB_REVISION_0:
15501 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15503 case TG3_EEPROM_SB_REVISION_2:
15504 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15506 case TG3_EEPROM_SB_REVISION_3:
15507 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15509 case TG3_EEPROM_SB_REVISION_4:
15510 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15512 case TG3_EEPROM_SB_REVISION_5:
15513 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15515 case TG3_EEPROM_SB_REVISION_6:
15516 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15522 if (tg3_nvram_read(tp, offset, &val))
15525 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15526 TG3_EEPROM_SB_EDH_BLD_SHFT;
15527 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15528 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15529 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15531 if (minor > 99 || build > 26)
15534 offset = strlen(tp->fw_ver);
15535 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15536 " v%d.%02d", major, minor);
15539 offset = strlen(tp->fw_ver);
15540 if (offset < TG3_VER_SIZE - 1)
15541 tp->fw_ver[offset] = 'a' + build - 1;
15545 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15547 u32 val, offset, start;
15550 for (offset = TG3_NVM_DIR_START;
15551 offset < TG3_NVM_DIR_END;
15552 offset += TG3_NVM_DIRENT_SIZE) {
15553 if (tg3_nvram_read(tp, offset, &val))
15556 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15560 if (offset == TG3_NVM_DIR_END)
15563 if (!tg3_flag(tp, 5705_PLUS))
15564 start = 0x08000000;
15565 else if (tg3_nvram_read(tp, offset - 4, &start))
15568 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15569 !tg3_fw_img_is_valid(tp, offset) ||
15570 tg3_nvram_read(tp, offset + 8, &val))
15573 offset += val - start;
15575 vlen = strlen(tp->fw_ver);
15577 tp->fw_ver[vlen++] = ',';
15578 tp->fw_ver[vlen++] = ' ';
15580 for (i = 0; i < 4; i++) {
15582 if (tg3_nvram_read_be32(tp, offset, &v))
15585 offset += sizeof(v);
15587 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15588 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15592 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15597 static void tg3_probe_ncsi(struct tg3 *tp)
15601 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15602 if (apedata != APE_SEG_SIG_MAGIC)
15605 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15606 if (!(apedata & APE_FW_STATUS_READY))
15609 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15610 tg3_flag_set(tp, APE_HAS_NCSI);
15613 static void tg3_read_dash_ver(struct tg3 *tp)
15619 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15621 if (tg3_flag(tp, APE_HAS_NCSI))
15623 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15628 vlen = strlen(tp->fw_ver);
15630 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15632 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15633 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15634 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15635 (apedata & APE_FW_VERSION_BLDMSK));
15638 static void tg3_read_otp_ver(struct tg3 *tp)
15642 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15645 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15646 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15647 TG3_OTP_MAGIC0_VALID(val)) {
15648 u64 val64 = (u64) val << 32 | val2;
15652 for (i = 0; i < 7; i++) {
15653 if ((val64 & 0xff) == 0)
15655 ver = val64 & 0xff;
15658 vlen = strlen(tp->fw_ver);
15659 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15663 static void tg3_read_fw_ver(struct tg3 *tp)
15666 bool vpd_vers = false;
15668 if (tp->fw_ver[0] != 0)
15671 if (tg3_flag(tp, NO_NVRAM)) {
15672 strcat(tp->fw_ver, "sb");
15673 tg3_read_otp_ver(tp);
15677 if (tg3_nvram_read(tp, 0, &val))
15680 if (val == TG3_EEPROM_MAGIC)
15681 tg3_read_bc_ver(tp);
15682 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15683 tg3_read_sb_ver(tp, val);
15684 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15685 tg3_read_hwsb_ver(tp);
15687 if (tg3_flag(tp, ENABLE_ASF)) {
15688 if (tg3_flag(tp, ENABLE_APE)) {
15689 tg3_probe_ncsi(tp);
15691 tg3_read_dash_ver(tp);
15692 } else if (!vpd_vers) {
15693 tg3_read_mgmtfw_ver(tp);
15697 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15700 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15702 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15703 return TG3_RX_RET_MAX_SIZE_5717;
15704 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15705 return TG3_RX_RET_MAX_SIZE_5700;
15707 return TG3_RX_RET_MAX_SIZE_5705;
15710 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15711 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15712 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15713 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15717 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15719 struct pci_dev *peer;
15720 unsigned int func, devnr = tp->pdev->devfn & ~7;
15722 for (func = 0; func < 8; func++) {
15723 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15724 if (peer && peer != tp->pdev)
15728 /* 5704 can be configured in single-port mode, set peer to
15729 * tp->pdev in that case.
15737 * We don't need to keep the refcount elevated; there's no way
15738 * to remove one half of this device without removing the other
15745 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15747 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15748 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15751 /* All devices that use the alternate
15752 * ASIC REV location have a CPMU.
15754 tg3_flag_set(tp, CPMU_PRESENT);
15756 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15763 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15764 reg = TG3PCI_GEN2_PRODID_ASICREV;
15765 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15766 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15768 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15769 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15770 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15771 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15772 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15773 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15774 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15775 reg = TG3PCI_GEN15_PRODID_ASICREV;
15777 reg = TG3PCI_PRODID_ASICREV;
15779 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15782 /* Wrong chip ID in 5752 A0. This code can be removed later
15783 * as A0 is not in production.
15785 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15786 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15788 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15789 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15791 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15792 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15793 tg3_asic_rev(tp) == ASIC_REV_5720)
15794 tg3_flag_set(tp, 5717_PLUS);
15796 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15797 tg3_asic_rev(tp) == ASIC_REV_57766)
15798 tg3_flag_set(tp, 57765_CLASS);
15800 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15801 tg3_asic_rev(tp) == ASIC_REV_5762)
15802 tg3_flag_set(tp, 57765_PLUS);
15804 /* Intentionally exclude ASIC_REV_5906 */
15805 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15806 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15807 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15808 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15809 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15810 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15811 tg3_flag(tp, 57765_PLUS))
15812 tg3_flag_set(tp, 5755_PLUS);
15814 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15815 tg3_asic_rev(tp) == ASIC_REV_5714)
15816 tg3_flag_set(tp, 5780_CLASS);
15818 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15819 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15820 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15821 tg3_flag(tp, 5755_PLUS) ||
15822 tg3_flag(tp, 5780_CLASS))
15823 tg3_flag_set(tp, 5750_PLUS);
15825 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15826 tg3_flag(tp, 5750_PLUS))
15827 tg3_flag_set(tp, 5705_PLUS);
15830 static bool tg3_10_100_only_device(struct tg3 *tp,
15831 const struct pci_device_id *ent)
15833 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15835 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15836 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15837 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15840 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15841 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15842 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15852 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15855 u32 pci_state_reg, grc_misc_cfg;
15860 /* Force memory write invalidate off. If we leave it on,
15861 * then on 5700_BX chips we have to enable a workaround.
15862 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15863 * to match the cacheline size. The Broadcom driver have this
15864 * workaround but turns MWI off all the times so never uses
15865 * it. This seems to suggest that the workaround is insufficient.
15867 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15868 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15869 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15871 /* Important! -- Make sure register accesses are byteswapped
15872 * correctly. Also, for those chips that require it, make
15873 * sure that indirect register accesses are enabled before
15874 * the first operation.
15876 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15878 tp->misc_host_ctrl |= (misc_ctrl_reg &
15879 MISC_HOST_CTRL_CHIPREV);
15880 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15881 tp->misc_host_ctrl);
15883 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15885 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15886 * we need to disable memory and use config. cycles
15887 * only to access all registers. The 5702/03 chips
15888 * can mistakenly decode the special cycles from the
15889 * ICH chipsets as memory write cycles, causing corruption
15890 * of register and memory space. Only certain ICH bridges
15891 * will drive special cycles with non-zero data during the
15892 * address phase which can fall within the 5703's address
15893 * range. This is not an ICH bug as the PCI spec allows
15894 * non-zero address during special cycles. However, only
15895 * these ICH bridges are known to drive non-zero addresses
15896 * during special cycles.
15898 * Since special cycles do not cross PCI bridges, we only
15899 * enable this workaround if the 5703 is on the secondary
15900 * bus of these ICH bridges.
15902 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15903 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15904 static struct tg3_dev_id {
15908 } ich_chipsets[] = {
15909 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15911 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15913 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15915 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15919 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15920 struct pci_dev *bridge = NULL;
15922 while (pci_id->vendor != 0) {
15923 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15929 if (pci_id->rev != PCI_ANY_ID) {
15930 if (bridge->revision > pci_id->rev)
15933 if (bridge->subordinate &&
15934 (bridge->subordinate->number ==
15935 tp->pdev->bus->number)) {
15936 tg3_flag_set(tp, ICH_WORKAROUND);
15937 pci_dev_put(bridge);
15943 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15944 static struct tg3_dev_id {
15947 } bridge_chipsets[] = {
15948 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15949 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15952 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15953 struct pci_dev *bridge = NULL;
15955 while (pci_id->vendor != 0) {
15956 bridge = pci_get_device(pci_id->vendor,
15963 if (bridge->subordinate &&
15964 (bridge->subordinate->number <=
15965 tp->pdev->bus->number) &&
15966 (bridge->subordinate->busn_res.end >=
15967 tp->pdev->bus->number)) {
15968 tg3_flag_set(tp, 5701_DMA_BUG);
15969 pci_dev_put(bridge);
15975 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15976 * DMA addresses > 40-bit. This bridge may have other additional
15977 * 57xx devices behind it in some 4-port NIC designs for example.
15978 * Any tg3 device found behind the bridge will also need the 40-bit
15981 if (tg3_flag(tp, 5780_CLASS)) {
15982 tg3_flag_set(tp, 40BIT_DMA_BUG);
15983 tp->msi_cap = tp->pdev->msi_cap;
15985 struct pci_dev *bridge = NULL;
15988 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15989 PCI_DEVICE_ID_SERVERWORKS_EPB,
15991 if (bridge && bridge->subordinate &&
15992 (bridge->subordinate->number <=
15993 tp->pdev->bus->number) &&
15994 (bridge->subordinate->busn_res.end >=
15995 tp->pdev->bus->number)) {
15996 tg3_flag_set(tp, 40BIT_DMA_BUG);
15997 pci_dev_put(bridge);
16003 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16004 tg3_asic_rev(tp) == ASIC_REV_5714)
16005 tp->pdev_peer = tg3_find_peer(tp);
16007 /* Determine TSO capabilities */
16008 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16009 ; /* Do nothing. HW bug. */
16010 else if (tg3_flag(tp, 57765_PLUS))
16011 tg3_flag_set(tp, HW_TSO_3);
16012 else if (tg3_flag(tp, 5755_PLUS) ||
16013 tg3_asic_rev(tp) == ASIC_REV_5906)
16014 tg3_flag_set(tp, HW_TSO_2);
16015 else if (tg3_flag(tp, 5750_PLUS)) {
16016 tg3_flag_set(tp, HW_TSO_1);
16017 tg3_flag_set(tp, TSO_BUG);
16018 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16019 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16020 tg3_flag_clear(tp, TSO_BUG);
16021 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16022 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16023 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16024 tg3_flag_set(tp, FW_TSO);
16025 tg3_flag_set(tp, TSO_BUG);
16026 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16027 tp->fw_needed = FIRMWARE_TG3TSO5;
16029 tp->fw_needed = FIRMWARE_TG3TSO;
16032 /* Selectively allow TSO based on operating conditions */
16033 if (tg3_flag(tp, HW_TSO_1) ||
16034 tg3_flag(tp, HW_TSO_2) ||
16035 tg3_flag(tp, HW_TSO_3) ||
16036 tg3_flag(tp, FW_TSO)) {
16037 /* For firmware TSO, assume ASF is disabled.
16038 * We'll disable TSO later if we discover ASF
16039 * is enabled in tg3_get_eeprom_hw_cfg().
16041 tg3_flag_set(tp, TSO_CAPABLE);
16043 tg3_flag_clear(tp, TSO_CAPABLE);
16044 tg3_flag_clear(tp, TSO_BUG);
16045 tp->fw_needed = NULL;
16048 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16049 tp->fw_needed = FIRMWARE_TG3;
16051 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16052 tp->fw_needed = FIRMWARE_TG357766;
16056 if (tg3_flag(tp, 5750_PLUS)) {
16057 tg3_flag_set(tp, SUPPORT_MSI);
16058 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16059 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16060 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16061 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16062 tp->pdev_peer == tp->pdev))
16063 tg3_flag_clear(tp, SUPPORT_MSI);
16065 if (tg3_flag(tp, 5755_PLUS) ||
16066 tg3_asic_rev(tp) == ASIC_REV_5906) {
16067 tg3_flag_set(tp, 1SHOT_MSI);
16070 if (tg3_flag(tp, 57765_PLUS)) {
16071 tg3_flag_set(tp, SUPPORT_MSIX);
16072 tp->irq_max = TG3_IRQ_MAX_VECS;
16078 if (tp->irq_max > 1) {
16079 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16080 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16082 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16083 tg3_asic_rev(tp) == ASIC_REV_5720)
16084 tp->txq_max = tp->irq_max - 1;
16087 if (tg3_flag(tp, 5755_PLUS) ||
16088 tg3_asic_rev(tp) == ASIC_REV_5906)
16089 tg3_flag_set(tp, SHORT_DMA_BUG);
16091 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16092 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16094 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16095 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16096 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16097 tg3_asic_rev(tp) == ASIC_REV_5762)
16098 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16100 if (tg3_flag(tp, 57765_PLUS) &&
16101 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16102 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16104 if (!tg3_flag(tp, 5705_PLUS) ||
16105 tg3_flag(tp, 5780_CLASS) ||
16106 tg3_flag(tp, USE_JUMBO_BDFLAG))
16107 tg3_flag_set(tp, JUMBO_CAPABLE);
16109 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16112 if (pci_is_pcie(tp->pdev)) {
16115 tg3_flag_set(tp, PCI_EXPRESS);
16117 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16118 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16119 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16120 tg3_flag_clear(tp, HW_TSO_2);
16121 tg3_flag_clear(tp, TSO_CAPABLE);
16123 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16124 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16125 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16126 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16127 tg3_flag_set(tp, CLKREQ_BUG);
16128 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16129 tg3_flag_set(tp, L1PLLPD_EN);
16131 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16132 /* BCM5785 devices are effectively PCIe devices, and should
16133 * follow PCIe codepaths, but do not have a PCIe capabilities
16136 tg3_flag_set(tp, PCI_EXPRESS);
16137 } else if (!tg3_flag(tp, 5705_PLUS) ||
16138 tg3_flag(tp, 5780_CLASS)) {
16139 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16140 if (!tp->pcix_cap) {
16141 dev_err(&tp->pdev->dev,
16142 "Cannot find PCI-X capability, aborting\n");
16146 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16147 tg3_flag_set(tp, PCIX_MODE);
16150 /* If we have an AMD 762 or VIA K8T800 chipset, write
16151 * reordering to the mailbox registers done by the host
16152 * controller can cause major troubles. We read back from
16153 * every mailbox register write to force the writes to be
16154 * posted to the chip in order.
16156 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16157 !tg3_flag(tp, PCI_EXPRESS))
16158 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16160 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16161 &tp->pci_cacheline_sz);
16162 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16163 &tp->pci_lat_timer);
16164 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16165 tp->pci_lat_timer < 64) {
16166 tp->pci_lat_timer = 64;
16167 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16168 tp->pci_lat_timer);
16171 /* Important! -- It is critical that the PCI-X hw workaround
16172 * situation is decided before the first MMIO register access.
16174 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16175 /* 5700 BX chips need to have their TX producer index
16176 * mailboxes written twice to workaround a bug.
16178 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16180 /* If we are in PCI-X mode, enable register write workaround.
16182 * The workaround is to use indirect register accesses
16183 * for all chip writes not to mailbox registers.
16185 if (tg3_flag(tp, PCIX_MODE)) {
16188 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16190 /* The chip can have it's power management PCI config
16191 * space registers clobbered due to this bug.
16192 * So explicitly force the chip into D0 here.
16194 pci_read_config_dword(tp->pdev,
16195 tp->pm_cap + PCI_PM_CTRL,
16197 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16198 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16199 pci_write_config_dword(tp->pdev,
16200 tp->pm_cap + PCI_PM_CTRL,
16203 /* Also, force SERR#/PERR# in PCI command. */
16204 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16205 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16206 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16210 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16211 tg3_flag_set(tp, PCI_HIGH_SPEED);
16212 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16213 tg3_flag_set(tp, PCI_32BIT);
16215 /* Chip-specific fixup from Broadcom driver */
16216 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16217 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16218 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16219 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16222 /* Default fast path register access methods */
16223 tp->read32 = tg3_read32;
16224 tp->write32 = tg3_write32;
16225 tp->read32_mbox = tg3_read32;
16226 tp->write32_mbox = tg3_write32;
16227 tp->write32_tx_mbox = tg3_write32;
16228 tp->write32_rx_mbox = tg3_write32;
16230 /* Various workaround register access methods */
16231 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16232 tp->write32 = tg3_write_indirect_reg32;
16233 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16234 (tg3_flag(tp, PCI_EXPRESS) &&
16235 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16237 * Back to back register writes can cause problems on these
16238 * chips, the workaround is to read back all reg writes
16239 * except those to mailbox regs.
16241 * See tg3_write_indirect_reg32().
16243 tp->write32 = tg3_write_flush_reg32;
16246 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16247 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16248 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16249 tp->write32_rx_mbox = tg3_write_flush_reg32;
16252 if (tg3_flag(tp, ICH_WORKAROUND)) {
16253 tp->read32 = tg3_read_indirect_reg32;
16254 tp->write32 = tg3_write_indirect_reg32;
16255 tp->read32_mbox = tg3_read_indirect_mbox;
16256 tp->write32_mbox = tg3_write_indirect_mbox;
16257 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16258 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16263 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16264 pci_cmd &= ~PCI_COMMAND_MEMORY;
16265 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16267 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16268 tp->read32_mbox = tg3_read32_mbox_5906;
16269 tp->write32_mbox = tg3_write32_mbox_5906;
16270 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16271 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16274 if (tp->write32 == tg3_write_indirect_reg32 ||
16275 (tg3_flag(tp, PCIX_MODE) &&
16276 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16277 tg3_asic_rev(tp) == ASIC_REV_5701)))
16278 tg3_flag_set(tp, SRAM_USE_CONFIG);
16280 /* The memory arbiter has to be enabled in order for SRAM accesses
16281 * to succeed. Normally on powerup the tg3 chip firmware will make
16282 * sure it is enabled, but other entities such as system netboot
16283 * code might disable it.
16285 val = tr32(MEMARB_MODE);
16286 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16288 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16289 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16290 tg3_flag(tp, 5780_CLASS)) {
16291 if (tg3_flag(tp, PCIX_MODE)) {
16292 pci_read_config_dword(tp->pdev,
16293 tp->pcix_cap + PCI_X_STATUS,
16295 tp->pci_fn = val & 0x7;
16297 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16298 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16299 tg3_asic_rev(tp) == ASIC_REV_5720) {
16300 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16301 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16302 val = tr32(TG3_CPMU_STATUS);
16304 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16305 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16307 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16308 TG3_CPMU_STATUS_FSHFT_5719;
16311 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16312 tp->write32_tx_mbox = tg3_write_flush_reg32;
16313 tp->write32_rx_mbox = tg3_write_flush_reg32;
16316 /* Get eeprom hw config before calling tg3_set_power_state().
16317 * In particular, the TG3_FLAG_IS_NIC flag must be
16318 * determined before calling tg3_set_power_state() so that
16319 * we know whether or not to switch out of Vaux power.
16320 * When the flag is set, it means that GPIO1 is used for eeprom
16321 * write protect and also implies that it is a LOM where GPIOs
16322 * are not used to switch power.
16324 tg3_get_eeprom_hw_cfg(tp);
16326 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16327 tg3_flag_clear(tp, TSO_CAPABLE);
16328 tg3_flag_clear(tp, TSO_BUG);
16329 tp->fw_needed = NULL;
16332 if (tg3_flag(tp, ENABLE_APE)) {
16333 /* Allow reads and writes to the
16334 * APE register and memory space.
16336 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16337 PCISTATE_ALLOW_APE_SHMEM_WR |
16338 PCISTATE_ALLOW_APE_PSPACE_WR;
16339 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16342 tg3_ape_lock_init(tp);
16345 /* Set up tp->grc_local_ctrl before calling
16346 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16347 * will bring 5700's external PHY out of reset.
16348 * It is also used as eeprom write protect on LOMs.
16350 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16351 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16352 tg3_flag(tp, EEPROM_WRITE_PROT))
16353 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16354 GRC_LCLCTRL_GPIO_OUTPUT1);
16355 /* Unused GPIO3 must be driven as output on 5752 because there
16356 * are no pull-up resistors on unused GPIO pins.
16358 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16359 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16361 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16362 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16363 tg3_flag(tp, 57765_CLASS))
16364 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16366 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16367 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16368 /* Turn off the debug UART. */
16369 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16370 if (tg3_flag(tp, IS_NIC))
16371 /* Keep VMain power. */
16372 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16373 GRC_LCLCTRL_GPIO_OUTPUT0;
16376 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16377 tp->grc_local_ctrl |=
16378 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16380 /* Switch out of Vaux if it is a NIC */
16381 tg3_pwrsrc_switch_to_vmain(tp);
16383 /* Derive initial jumbo mode from MTU assigned in
16384 * ether_setup() via the alloc_etherdev() call
16386 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16387 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16389 /* Determine WakeOnLan speed to use. */
16390 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16391 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16392 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16393 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16394 tg3_flag_clear(tp, WOL_SPEED_100MB);
16396 tg3_flag_set(tp, WOL_SPEED_100MB);
16399 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16400 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16402 /* A few boards don't want Ethernet@WireSpeed phy feature */
16403 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16404 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16405 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16406 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16407 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16408 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16409 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16411 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16412 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16413 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16414 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16415 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16417 if (tg3_flag(tp, 5705_PLUS) &&
16418 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16419 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16420 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16421 !tg3_flag(tp, 57765_PLUS)) {
16422 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16423 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16424 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16425 tg3_asic_rev(tp) == ASIC_REV_5761) {
16426 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16427 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16428 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16429 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16430 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16432 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16435 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16436 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16437 tp->phy_otp = tg3_read_otp_phycfg(tp);
16438 if (tp->phy_otp == 0)
16439 tp->phy_otp = TG3_OTP_DEFAULT;
16442 if (tg3_flag(tp, CPMU_PRESENT))
16443 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16445 tp->mi_mode = MAC_MI_MODE_BASE;
16447 tp->coalesce_mode = 0;
16448 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16449 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16450 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16452 /* Set these bits to enable statistics workaround. */
16453 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16454 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16455 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16456 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16457 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16460 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16461 tg3_asic_rev(tp) == ASIC_REV_57780)
16462 tg3_flag_set(tp, USE_PHYLIB);
16464 err = tg3_mdio_init(tp);
16468 /* Initialize data/descriptor byte/word swapping. */
16469 val = tr32(GRC_MODE);
16470 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16471 tg3_asic_rev(tp) == ASIC_REV_5762)
16472 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16473 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16474 GRC_MODE_B2HRX_ENABLE |
16475 GRC_MODE_HTX2B_ENABLE |
16476 GRC_MODE_HOST_STACKUP);
16478 val &= GRC_MODE_HOST_STACKUP;
16480 tw32(GRC_MODE, val | tp->grc_mode);
16482 tg3_switch_clocks(tp);
16484 /* Clear this out for sanity. */
16485 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16487 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16489 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16490 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16491 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16492 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16493 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16494 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16495 void __iomem *sram_base;
16497 /* Write some dummy words into the SRAM status block
16498 * area, see if it reads back correctly. If the return
16499 * value is bad, force enable the PCIX workaround.
16501 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16503 writel(0x00000000, sram_base);
16504 writel(0x00000000, sram_base + 4);
16505 writel(0xffffffff, sram_base + 4);
16506 if (readl(sram_base) != 0x00000000)
16507 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16512 tg3_nvram_init(tp);
16514 /* If the device has an NVRAM, no need to load patch firmware */
16515 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16516 !tg3_flag(tp, NO_NVRAM))
16517 tp->fw_needed = NULL;
16519 grc_misc_cfg = tr32(GRC_MISC_CFG);
16520 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16522 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16523 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16524 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16525 tg3_flag_set(tp, IS_5788);
16527 if (!tg3_flag(tp, IS_5788) &&
16528 tg3_asic_rev(tp) != ASIC_REV_5700)
16529 tg3_flag_set(tp, TAGGED_STATUS);
16530 if (tg3_flag(tp, TAGGED_STATUS)) {
16531 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16532 HOSTCC_MODE_CLRTICK_TXBD);
16534 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16535 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16536 tp->misc_host_ctrl);
16539 /* Preserve the APE MAC_MODE bits */
16540 if (tg3_flag(tp, ENABLE_APE))
16541 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16545 if (tg3_10_100_only_device(tp, ent))
16546 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16548 err = tg3_phy_probe(tp);
16550 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16551 /* ... but do not return immediately ... */
16556 tg3_read_fw_ver(tp);
16558 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16559 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16561 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16562 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16564 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16567 /* 5700 {AX,BX} chips have a broken status block link
16568 * change bit implementation, so we must use the
16569 * status register in those cases.
16571 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16572 tg3_flag_set(tp, USE_LINKCHG_REG);
16574 tg3_flag_clear(tp, USE_LINKCHG_REG);
16576 /* The led_ctrl is set during tg3_phy_probe, here we might
16577 * have to force the link status polling mechanism based
16578 * upon subsystem IDs.
16580 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16581 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16582 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16583 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16584 tg3_flag_set(tp, USE_LINKCHG_REG);
16587 /* For all SERDES we poll the MAC status register. */
16588 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16589 tg3_flag_set(tp, POLL_SERDES);
16591 tg3_flag_clear(tp, POLL_SERDES);
16593 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16594 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16595 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16596 tg3_flag(tp, PCIX_MODE)) {
16597 tp->rx_offset = NET_SKB_PAD;
16598 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16599 tp->rx_copy_thresh = ~(u16)0;
16603 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16604 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16605 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16607 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16609 /* Increment the rx prod index on the rx std ring by at most
16610 * 8 for these chips to workaround hw errata.
16612 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16613 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16614 tg3_asic_rev(tp) == ASIC_REV_5755)
16615 tp->rx_std_max_post = 8;
16617 if (tg3_flag(tp, ASPM_WORKAROUND))
16618 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16619 PCIE_PWR_MGMT_L1_THRESH_MSK;
16624 #ifdef CONFIG_SPARC
16625 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16627 struct net_device *dev = tp->dev;
16628 struct pci_dev *pdev = tp->pdev;
16629 struct device_node *dp = pci_device_to_OF_node(pdev);
16630 const unsigned char *addr;
16633 addr = of_get_property(dp, "local-mac-address", &len);
16634 if (addr && len == 6) {
16635 memcpy(dev->dev_addr, addr, 6);
16641 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16643 struct net_device *dev = tp->dev;
16645 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16650 static int tg3_get_device_address(struct tg3 *tp)
16652 struct net_device *dev = tp->dev;
16653 u32 hi, lo, mac_offset;
16657 #ifdef CONFIG_SPARC
16658 if (!tg3_get_macaddr_sparc(tp))
16662 if (tg3_flag(tp, IS_SSB_CORE)) {
16663 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16664 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16669 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16670 tg3_flag(tp, 5780_CLASS)) {
16671 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16673 if (tg3_nvram_lock(tp))
16674 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16676 tg3_nvram_unlock(tp);
16677 } else if (tg3_flag(tp, 5717_PLUS)) {
16678 if (tp->pci_fn & 1)
16680 if (tp->pci_fn > 1)
16681 mac_offset += 0x18c;
16682 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16685 /* First try to get it from MAC address mailbox. */
16686 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16687 if ((hi >> 16) == 0x484b) {
16688 dev->dev_addr[0] = (hi >> 8) & 0xff;
16689 dev->dev_addr[1] = (hi >> 0) & 0xff;
16691 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16692 dev->dev_addr[2] = (lo >> 24) & 0xff;
16693 dev->dev_addr[3] = (lo >> 16) & 0xff;
16694 dev->dev_addr[4] = (lo >> 8) & 0xff;
16695 dev->dev_addr[5] = (lo >> 0) & 0xff;
16697 /* Some old bootcode may report a 0 MAC address in SRAM */
16698 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16701 /* Next, try NVRAM. */
16702 if (!tg3_flag(tp, NO_NVRAM) &&
16703 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16704 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16705 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16706 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16708 /* Finally just fetch it out of the MAC control regs. */
16710 hi = tr32(MAC_ADDR_0_HIGH);
16711 lo = tr32(MAC_ADDR_0_LOW);
16713 dev->dev_addr[5] = lo & 0xff;
16714 dev->dev_addr[4] = (lo >> 8) & 0xff;
16715 dev->dev_addr[3] = (lo >> 16) & 0xff;
16716 dev->dev_addr[2] = (lo >> 24) & 0xff;
16717 dev->dev_addr[1] = hi & 0xff;
16718 dev->dev_addr[0] = (hi >> 8) & 0xff;
16722 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16723 #ifdef CONFIG_SPARC
16724 if (!tg3_get_default_macaddr_sparc(tp))
16732 #define BOUNDARY_SINGLE_CACHELINE 1
16733 #define BOUNDARY_MULTI_CACHELINE 2
16735 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16737 int cacheline_size;
16741 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16743 cacheline_size = 1024;
16745 cacheline_size = (int) byte * 4;
16747 /* On 5703 and later chips, the boundary bits have no
16750 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16751 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16752 !tg3_flag(tp, PCI_EXPRESS))
16755 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16756 goal = BOUNDARY_MULTI_CACHELINE;
16758 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16759 goal = BOUNDARY_SINGLE_CACHELINE;
16765 if (tg3_flag(tp, 57765_PLUS)) {
16766 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16773 /* PCI controllers on most RISC systems tend to disconnect
16774 * when a device tries to burst across a cache-line boundary.
16775 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16777 * Unfortunately, for PCI-E there are only limited
16778 * write-side controls for this, and thus for reads
16779 * we will still get the disconnects. We'll also waste
16780 * these PCI cycles for both read and write for chips
16781 * other than 5700 and 5701 which do not implement the
16784 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16785 switch (cacheline_size) {
16790 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16791 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16792 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16794 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16795 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16800 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16801 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16805 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16806 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16809 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16810 switch (cacheline_size) {
16814 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16815 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16816 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16822 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16823 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16827 switch (cacheline_size) {
16829 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16830 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16831 DMA_RWCTRL_WRITE_BNDRY_16);
16836 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16837 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16838 DMA_RWCTRL_WRITE_BNDRY_32);
16843 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16844 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16845 DMA_RWCTRL_WRITE_BNDRY_64);
16850 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16851 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16852 DMA_RWCTRL_WRITE_BNDRY_128);
16857 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16858 DMA_RWCTRL_WRITE_BNDRY_256);
16861 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16862 DMA_RWCTRL_WRITE_BNDRY_512);
16866 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16867 DMA_RWCTRL_WRITE_BNDRY_1024);
16876 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16877 int size, bool to_device)
16879 struct tg3_internal_buffer_desc test_desc;
16880 u32 sram_dma_descs;
16883 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16885 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16886 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16887 tw32(RDMAC_STATUS, 0);
16888 tw32(WDMAC_STATUS, 0);
16890 tw32(BUFMGR_MODE, 0);
16891 tw32(FTQ_RESET, 0);
16893 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16894 test_desc.addr_lo = buf_dma & 0xffffffff;
16895 test_desc.nic_mbuf = 0x00002100;
16896 test_desc.len = size;
16899 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16900 * the *second* time the tg3 driver was getting loaded after an
16903 * Broadcom tells me:
16904 * ...the DMA engine is connected to the GRC block and a DMA
16905 * reset may affect the GRC block in some unpredictable way...
16906 * The behavior of resets to individual blocks has not been tested.
16908 * Broadcom noted the GRC reset will also reset all sub-components.
16911 test_desc.cqid_sqid = (13 << 8) | 2;
16913 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16916 test_desc.cqid_sqid = (16 << 8) | 7;
16918 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16921 test_desc.flags = 0x00000005;
16923 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16926 val = *(((u32 *)&test_desc) + i);
16927 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16928 sram_dma_descs + (i * sizeof(u32)));
16929 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16931 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16934 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16936 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16939 for (i = 0; i < 40; i++) {
16943 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16945 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16946 if ((val & 0xffff) == sram_dma_descs) {
16957 #define TEST_BUFFER_SIZE 0x2000
16959 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16960 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16964 static int tg3_test_dma(struct tg3 *tp)
16966 dma_addr_t buf_dma;
16967 u32 *buf, saved_dma_rwctrl;
16970 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16971 &buf_dma, GFP_KERNEL);
16977 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16978 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16980 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16982 if (tg3_flag(tp, 57765_PLUS))
16985 if (tg3_flag(tp, PCI_EXPRESS)) {
16986 /* DMA read watermark not used on PCIE */
16987 tp->dma_rwctrl |= 0x00180000;
16988 } else if (!tg3_flag(tp, PCIX_MODE)) {
16989 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16990 tg3_asic_rev(tp) == ASIC_REV_5750)
16991 tp->dma_rwctrl |= 0x003f0000;
16993 tp->dma_rwctrl |= 0x003f000f;
16995 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16996 tg3_asic_rev(tp) == ASIC_REV_5704) {
16997 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16998 u32 read_water = 0x7;
17000 /* If the 5704 is behind the EPB bridge, we can
17001 * do the less restrictive ONE_DMA workaround for
17002 * better performance.
17004 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17005 tg3_asic_rev(tp) == ASIC_REV_5704)
17006 tp->dma_rwctrl |= 0x8000;
17007 else if (ccval == 0x6 || ccval == 0x7)
17008 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17010 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17012 /* Set bit 23 to enable PCIX hw bug fix */
17014 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17015 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17017 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17018 /* 5780 always in PCIX mode */
17019 tp->dma_rwctrl |= 0x00144000;
17020 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17021 /* 5714 always in PCIX mode */
17022 tp->dma_rwctrl |= 0x00148000;
17024 tp->dma_rwctrl |= 0x001b000f;
17027 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17028 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17030 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17031 tg3_asic_rev(tp) == ASIC_REV_5704)
17032 tp->dma_rwctrl &= 0xfffffff0;
17034 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17035 tg3_asic_rev(tp) == ASIC_REV_5701) {
17036 /* Remove this if it causes problems for some boards. */
17037 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17039 /* On 5700/5701 chips, we need to set this bit.
17040 * Otherwise the chip will issue cacheline transactions
17041 * to streamable DMA memory with not all the byte
17042 * enables turned on. This is an error on several
17043 * RISC PCI controllers, in particular sparc64.
17045 * On 5703/5704 chips, this bit has been reassigned
17046 * a different meaning. In particular, it is used
17047 * on those chips to enable a PCI-X workaround.
17049 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17055 /* Unneeded, already done by tg3_get_invariants. */
17056 tg3_switch_clocks(tp);
17059 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17060 tg3_asic_rev(tp) != ASIC_REV_5701)
17063 /* It is best to perform DMA test with maximum write burst size
17064 * to expose the 5700/5701 write DMA bug.
17066 saved_dma_rwctrl = tp->dma_rwctrl;
17067 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17068 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17073 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17076 /* Send the buffer to the chip. */
17077 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17079 dev_err(&tp->pdev->dev,
17080 "%s: Buffer write failed. err = %d\n",
17086 /* validate data reached card RAM correctly. */
17087 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17089 tg3_read_mem(tp, 0x2100 + (i*4), &val);
17090 if (le32_to_cpu(val) != p[i]) {
17091 dev_err(&tp->pdev->dev,
17092 "%s: Buffer corrupted on device! "
17093 "(%d != %d)\n", __func__, val, i);
17094 /* ret = -ENODEV here? */
17099 /* Now read it back. */
17100 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17102 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17103 "err = %d\n", __func__, ret);
17108 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17112 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17113 DMA_RWCTRL_WRITE_BNDRY_16) {
17114 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17115 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17116 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17119 dev_err(&tp->pdev->dev,
17120 "%s: Buffer corrupted on read back! "
17121 "(%d != %d)\n", __func__, p[i], i);
17127 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17133 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17134 DMA_RWCTRL_WRITE_BNDRY_16) {
17135 /* DMA test passed without adjusting DMA boundary,
17136 * now look for chipsets that are known to expose the
17137 * DMA bug without failing the test.
17139 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17140 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17141 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17143 /* Safe to use the calculated DMA boundary. */
17144 tp->dma_rwctrl = saved_dma_rwctrl;
17147 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17151 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17156 static void tg3_init_bufmgr_config(struct tg3 *tp)
17158 if (tg3_flag(tp, 57765_PLUS)) {
17159 tp->bufmgr_config.mbuf_read_dma_low_water =
17160 DEFAULT_MB_RDMA_LOW_WATER_5705;
17161 tp->bufmgr_config.mbuf_mac_rx_low_water =
17162 DEFAULT_MB_MACRX_LOW_WATER_57765;
17163 tp->bufmgr_config.mbuf_high_water =
17164 DEFAULT_MB_HIGH_WATER_57765;
17166 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17167 DEFAULT_MB_RDMA_LOW_WATER_5705;
17168 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17169 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17170 tp->bufmgr_config.mbuf_high_water_jumbo =
17171 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17172 } else if (tg3_flag(tp, 5705_PLUS)) {
17173 tp->bufmgr_config.mbuf_read_dma_low_water =
17174 DEFAULT_MB_RDMA_LOW_WATER_5705;
17175 tp->bufmgr_config.mbuf_mac_rx_low_water =
17176 DEFAULT_MB_MACRX_LOW_WATER_5705;
17177 tp->bufmgr_config.mbuf_high_water =
17178 DEFAULT_MB_HIGH_WATER_5705;
17179 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17180 tp->bufmgr_config.mbuf_mac_rx_low_water =
17181 DEFAULT_MB_MACRX_LOW_WATER_5906;
17182 tp->bufmgr_config.mbuf_high_water =
17183 DEFAULT_MB_HIGH_WATER_5906;
17186 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17187 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17188 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17189 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17190 tp->bufmgr_config.mbuf_high_water_jumbo =
17191 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17193 tp->bufmgr_config.mbuf_read_dma_low_water =
17194 DEFAULT_MB_RDMA_LOW_WATER;
17195 tp->bufmgr_config.mbuf_mac_rx_low_water =
17196 DEFAULT_MB_MACRX_LOW_WATER;
17197 tp->bufmgr_config.mbuf_high_water =
17198 DEFAULT_MB_HIGH_WATER;
17200 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17201 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17202 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17203 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17204 tp->bufmgr_config.mbuf_high_water_jumbo =
17205 DEFAULT_MB_HIGH_WATER_JUMBO;
17208 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17209 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17212 static char *tg3_phy_string(struct tg3 *tp)
17214 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17215 case TG3_PHY_ID_BCM5400: return "5400";
17216 case TG3_PHY_ID_BCM5401: return "5401";
17217 case TG3_PHY_ID_BCM5411: return "5411";
17218 case TG3_PHY_ID_BCM5701: return "5701";
17219 case TG3_PHY_ID_BCM5703: return "5703";
17220 case TG3_PHY_ID_BCM5704: return "5704";
17221 case TG3_PHY_ID_BCM5705: return "5705";
17222 case TG3_PHY_ID_BCM5750: return "5750";
17223 case TG3_PHY_ID_BCM5752: return "5752";
17224 case TG3_PHY_ID_BCM5714: return "5714";
17225 case TG3_PHY_ID_BCM5780: return "5780";
17226 case TG3_PHY_ID_BCM5755: return "5755";
17227 case TG3_PHY_ID_BCM5787: return "5787";
17228 case TG3_PHY_ID_BCM5784: return "5784";
17229 case TG3_PHY_ID_BCM5756: return "5722/5756";
17230 case TG3_PHY_ID_BCM5906: return "5906";
17231 case TG3_PHY_ID_BCM5761: return "5761";
17232 case TG3_PHY_ID_BCM5718C: return "5718C";
17233 case TG3_PHY_ID_BCM5718S: return "5718S";
17234 case TG3_PHY_ID_BCM57765: return "57765";
17235 case TG3_PHY_ID_BCM5719C: return "5719C";
17236 case TG3_PHY_ID_BCM5720C: return "5720C";
17237 case TG3_PHY_ID_BCM5762: return "5762C";
17238 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17239 case 0: return "serdes";
17240 default: return "unknown";
17244 static char *tg3_bus_string(struct tg3 *tp, char *str)
17246 if (tg3_flag(tp, PCI_EXPRESS)) {
17247 strcpy(str, "PCI Express");
17249 } else if (tg3_flag(tp, PCIX_MODE)) {
17250 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17252 strcpy(str, "PCIX:");
17254 if ((clock_ctrl == 7) ||
17255 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17256 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17257 strcat(str, "133MHz");
17258 else if (clock_ctrl == 0)
17259 strcat(str, "33MHz");
17260 else if (clock_ctrl == 2)
17261 strcat(str, "50MHz");
17262 else if (clock_ctrl == 4)
17263 strcat(str, "66MHz");
17264 else if (clock_ctrl == 6)
17265 strcat(str, "100MHz");
17267 strcpy(str, "PCI:");
17268 if (tg3_flag(tp, PCI_HIGH_SPEED))
17269 strcat(str, "66MHz");
17271 strcat(str, "33MHz");
17273 if (tg3_flag(tp, PCI_32BIT))
17274 strcat(str, ":32-bit");
17276 strcat(str, ":64-bit");
17280 static void tg3_init_coal(struct tg3 *tp)
17282 struct ethtool_coalesce *ec = &tp->coal;
17284 memset(ec, 0, sizeof(*ec));
17285 ec->cmd = ETHTOOL_GCOALESCE;
17286 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17287 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17288 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17289 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17290 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17291 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17292 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17293 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17294 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17296 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17297 HOSTCC_MODE_CLRTICK_TXBD)) {
17298 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17299 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17300 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17301 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17304 if (tg3_flag(tp, 5705_PLUS)) {
17305 ec->rx_coalesce_usecs_irq = 0;
17306 ec->tx_coalesce_usecs_irq = 0;
17307 ec->stats_block_coalesce_usecs = 0;
17311 static int tg3_init_one(struct pci_dev *pdev,
17312 const struct pci_device_id *ent)
17314 struct net_device *dev;
17317 u32 sndmbx, rcvmbx, intmbx;
17319 u64 dma_mask, persist_dma_mask;
17320 netdev_features_t features = 0;
17322 printk_once(KERN_INFO "%s\n", version);
17324 err = pci_enable_device(pdev);
17326 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17330 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17332 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17333 goto err_out_disable_pdev;
17336 pci_set_master(pdev);
17338 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17341 goto err_out_free_res;
17344 SET_NETDEV_DEV(dev, &pdev->dev);
17346 tp = netdev_priv(dev);
17349 tp->pm_cap = pdev->pm_cap;
17350 tp->rx_mode = TG3_DEF_RX_MODE;
17351 tp->tx_mode = TG3_DEF_TX_MODE;
17355 tp->msg_enable = tg3_debug;
17357 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17359 if (pdev_is_ssb_gige_core(pdev)) {
17360 tg3_flag_set(tp, IS_SSB_CORE);
17361 if (ssb_gige_must_flush_posted_writes(pdev))
17362 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17363 if (ssb_gige_one_dma_at_once(pdev))
17364 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17365 if (ssb_gige_have_roboswitch(pdev))
17366 tg3_flag_set(tp, ROBOSWITCH);
17367 if (ssb_gige_is_rgmii(pdev))
17368 tg3_flag_set(tp, RGMII_MODE);
17371 /* The word/byte swap controls here control register access byte
17372 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17375 tp->misc_host_ctrl =
17376 MISC_HOST_CTRL_MASK_PCI_INT |
17377 MISC_HOST_CTRL_WORD_SWAP |
17378 MISC_HOST_CTRL_INDIR_ACCESS |
17379 MISC_HOST_CTRL_PCISTATE_RW;
17381 /* The NONFRM (non-frame) byte/word swap controls take effect
17382 * on descriptor entries, anything which isn't packet data.
17384 * The StrongARM chips on the board (one for tx, one for rx)
17385 * are running in big-endian mode.
17387 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17388 GRC_MODE_WSWAP_NONFRM_DATA);
17389 #ifdef __BIG_ENDIAN
17390 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17392 spin_lock_init(&tp->lock);
17393 spin_lock_init(&tp->indirect_lock);
17394 INIT_WORK(&tp->reset_task, tg3_reset_task);
17396 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17398 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17400 goto err_out_free_dev;
17403 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17404 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17407 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17408 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17409 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17410 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17411 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17412 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17413 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17414 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17415 tg3_flag_set(tp, ENABLE_APE);
17416 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17417 if (!tp->aperegs) {
17418 dev_err(&pdev->dev,
17419 "Cannot map APE registers, aborting\n");
17421 goto err_out_iounmap;
17425 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17426 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17428 dev->ethtool_ops = &tg3_ethtool_ops;
17429 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17430 dev->netdev_ops = &tg3_netdev_ops;
17431 dev->irq = pdev->irq;
17433 err = tg3_get_invariants(tp, ent);
17435 dev_err(&pdev->dev,
17436 "Problem fetching invariants of chip, aborting\n");
17437 goto err_out_apeunmap;
17440 /* The EPB bridge inside 5714, 5715, and 5780 and any
17441 * device behind the EPB cannot support DMA addresses > 40-bit.
17442 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17443 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17444 * do DMA address check in tg3_start_xmit().
17446 if (tg3_flag(tp, IS_5788))
17447 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17448 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17449 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17450 #ifdef CONFIG_HIGHMEM
17451 dma_mask = DMA_BIT_MASK(64);
17454 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17456 /* Configure DMA attributes. */
17457 if (dma_mask > DMA_BIT_MASK(32)) {
17458 err = pci_set_dma_mask(pdev, dma_mask);
17460 features |= NETIF_F_HIGHDMA;
17461 err = pci_set_consistent_dma_mask(pdev,
17464 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17465 "DMA for consistent allocations\n");
17466 goto err_out_apeunmap;
17470 if (err || dma_mask == DMA_BIT_MASK(32)) {
17471 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17473 dev_err(&pdev->dev,
17474 "No usable DMA configuration, aborting\n");
17475 goto err_out_apeunmap;
17479 tg3_init_bufmgr_config(tp);
17481 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17483 /* 5700 B0 chips do not support checksumming correctly due
17484 * to hardware bugs.
17486 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17487 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17489 if (tg3_flag(tp, 5755_PLUS))
17490 features |= NETIF_F_IPV6_CSUM;
17493 /* TSO is on by default on chips that support hardware TSO.
17494 * Firmware TSO on older chips gives lower performance, so it
17495 * is off by default, but can be enabled using ethtool.
17497 if ((tg3_flag(tp, HW_TSO_1) ||
17498 tg3_flag(tp, HW_TSO_2) ||
17499 tg3_flag(tp, HW_TSO_3)) &&
17500 (features & NETIF_F_IP_CSUM))
17501 features |= NETIF_F_TSO;
17502 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17503 if (features & NETIF_F_IPV6_CSUM)
17504 features |= NETIF_F_TSO6;
17505 if (tg3_flag(tp, HW_TSO_3) ||
17506 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17507 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17508 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17509 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17510 tg3_asic_rev(tp) == ASIC_REV_57780)
17511 features |= NETIF_F_TSO_ECN;
17514 dev->features |= features;
17515 dev->vlan_features |= features;
17518 * Add loopback capability only for a subset of devices that support
17519 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17520 * loopback for the remaining devices.
17522 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17523 !tg3_flag(tp, CPMU_PRESENT))
17524 /* Add the loopback capability */
17525 features |= NETIF_F_LOOPBACK;
17527 dev->hw_features |= features;
17529 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17530 !tg3_flag(tp, TSO_CAPABLE) &&
17531 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17532 tg3_flag_set(tp, MAX_RXPEND_64);
17533 tp->rx_pending = 63;
17536 err = tg3_get_device_address(tp);
17538 dev_err(&pdev->dev,
17539 "Could not obtain valid ethernet address, aborting\n");
17540 goto err_out_apeunmap;
17544 * Reset chip in case UNDI or EFI driver did not shutdown
17545 * DMA self test will enable WDMAC and we'll see (spurious)
17546 * pending DMA on the PCI bus at that point.
17548 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17549 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17550 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17551 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17554 err = tg3_test_dma(tp);
17556 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17557 goto err_out_apeunmap;
17560 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17561 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17562 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17563 for (i = 0; i < tp->irq_max; i++) {
17564 struct tg3_napi *tnapi = &tp->napi[i];
17567 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17569 tnapi->int_mbox = intmbx;
17575 tnapi->consmbox = rcvmbx;
17576 tnapi->prodmbox = sndmbx;
17579 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17581 tnapi->coal_now = HOSTCC_MODE_NOW;
17583 if (!tg3_flag(tp, SUPPORT_MSIX))
17587 * If we support MSIX, we'll be using RSS. If we're using
17588 * RSS, the first vector only handles link interrupts and the
17589 * remaining vectors handle rx and tx interrupts. Reuse the
17590 * mailbox values for the next iteration. The values we setup
17591 * above are still useful for the single vectored mode.
17606 pci_set_drvdata(pdev, dev);
17608 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17609 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17610 tg3_asic_rev(tp) == ASIC_REV_5762)
17611 tg3_flag_set(tp, PTP_CAPABLE);
17613 tg3_timer_init(tp);
17615 tg3_carrier_off(tp);
17617 err = register_netdev(dev);
17619 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17620 goto err_out_apeunmap;
17623 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17624 tp->board_part_number,
17625 tg3_chip_rev_id(tp),
17626 tg3_bus_string(tp, str),
17629 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17630 struct phy_device *phydev;
17631 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17633 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17634 phydev->drv->name, dev_name(&phydev->dev));
17638 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17639 ethtype = "10/100Base-TX";
17640 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17641 ethtype = "1000Base-SX";
17643 ethtype = "10/100/1000Base-T";
17645 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17646 "(WireSpeed[%d], EEE[%d])\n",
17647 tg3_phy_string(tp), ethtype,
17648 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17649 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17652 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17653 (dev->features & NETIF_F_RXCSUM) != 0,
17654 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17655 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17656 tg3_flag(tp, ENABLE_ASF) != 0,
17657 tg3_flag(tp, TSO_CAPABLE) != 0);
17658 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17660 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17661 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17663 pci_save_state(pdev);
17669 iounmap(tp->aperegs);
17670 tp->aperegs = NULL;
17683 pci_release_regions(pdev);
17685 err_out_disable_pdev:
17686 if (pci_is_enabled(pdev))
17687 pci_disable_device(pdev);
17688 pci_set_drvdata(pdev, NULL);
17692 static void tg3_remove_one(struct pci_dev *pdev)
17694 struct net_device *dev = pci_get_drvdata(pdev);
17697 struct tg3 *tp = netdev_priv(dev);
17699 release_firmware(tp->fw);
17701 tg3_reset_task_cancel(tp);
17703 if (tg3_flag(tp, USE_PHYLIB)) {
17708 unregister_netdev(dev);
17710 iounmap(tp->aperegs);
17711 tp->aperegs = NULL;
17718 pci_release_regions(pdev);
17719 pci_disable_device(pdev);
17720 pci_set_drvdata(pdev, NULL);
17724 #ifdef CONFIG_PM_SLEEP
17725 static int tg3_suspend(struct device *device)
17727 struct pci_dev *pdev = to_pci_dev(device);
17728 struct net_device *dev = pci_get_drvdata(pdev);
17729 struct tg3 *tp = netdev_priv(dev);
17732 if (!netif_running(dev))
17735 tg3_reset_task_cancel(tp);
17737 tg3_netif_stop(tp);
17739 tg3_timer_stop(tp);
17741 tg3_full_lock(tp, 1);
17742 tg3_disable_ints(tp);
17743 tg3_full_unlock(tp);
17745 netif_device_detach(dev);
17747 tg3_full_lock(tp, 0);
17748 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17749 tg3_flag_clear(tp, INIT_COMPLETE);
17750 tg3_full_unlock(tp);
17752 err = tg3_power_down_prepare(tp);
17756 tg3_full_lock(tp, 0);
17758 tg3_flag_set(tp, INIT_COMPLETE);
17759 err2 = tg3_restart_hw(tp, true);
17763 tg3_timer_start(tp);
17765 netif_device_attach(dev);
17766 tg3_netif_start(tp);
17769 tg3_full_unlock(tp);
17778 static int tg3_resume(struct device *device)
17780 struct pci_dev *pdev = to_pci_dev(device);
17781 struct net_device *dev = pci_get_drvdata(pdev);
17782 struct tg3 *tp = netdev_priv(dev);
17785 if (!netif_running(dev))
17788 netif_device_attach(dev);
17790 tg3_full_lock(tp, 0);
17792 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17794 tg3_flag_set(tp, INIT_COMPLETE);
17795 err = tg3_restart_hw(tp,
17796 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17800 tg3_timer_start(tp);
17802 tg3_netif_start(tp);
17805 tg3_full_unlock(tp);
17812 #endif /* CONFIG_PM_SLEEP */
17814 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17816 static void tg3_shutdown(struct pci_dev *pdev)
17818 struct net_device *dev = pci_get_drvdata(pdev);
17819 struct tg3 *tp = netdev_priv(dev);
17822 netif_device_detach(dev);
17824 if (netif_running(dev))
17827 if (system_state == SYSTEM_POWER_OFF)
17828 tg3_power_down(tp);
17834 * tg3_io_error_detected - called when PCI error is detected
17835 * @pdev: Pointer to PCI device
17836 * @state: The current pci connection state
17838 * This function is called after a PCI bus error affecting
17839 * this device has been detected.
17841 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17842 pci_channel_state_t state)
17844 struct net_device *netdev = pci_get_drvdata(pdev);
17845 struct tg3 *tp = netdev_priv(netdev);
17846 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17848 netdev_info(netdev, "PCI I/O error detected\n");
17852 /* We probably don't have netdev yet */
17853 if (!netdev || !netif_running(netdev))
17858 tg3_netif_stop(tp);
17860 tg3_timer_stop(tp);
17862 /* Want to make sure that the reset task doesn't run */
17863 tg3_reset_task_cancel(tp);
17865 netif_device_detach(netdev);
17867 /* Clean up software state, even if MMIO is blocked */
17868 tg3_full_lock(tp, 0);
17869 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17870 tg3_full_unlock(tp);
17873 if (state == pci_channel_io_perm_failure) {
17875 tg3_napi_enable(tp);
17878 err = PCI_ERS_RESULT_DISCONNECT;
17880 pci_disable_device(pdev);
17889 * tg3_io_slot_reset - called after the pci bus has been reset.
17890 * @pdev: Pointer to PCI device
17892 * Restart the card from scratch, as if from a cold-boot.
17893 * At this point, the card has exprienced a hard reset,
17894 * followed by fixups by BIOS, and has its config space
17895 * set up identically to what it was at cold boot.
17897 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17899 struct net_device *netdev = pci_get_drvdata(pdev);
17900 struct tg3 *tp = netdev_priv(netdev);
17901 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17906 if (pci_enable_device(pdev)) {
17907 dev_err(&pdev->dev,
17908 "Cannot re-enable PCI device after reset.\n");
17912 pci_set_master(pdev);
17913 pci_restore_state(pdev);
17914 pci_save_state(pdev);
17916 if (!netdev || !netif_running(netdev)) {
17917 rc = PCI_ERS_RESULT_RECOVERED;
17921 err = tg3_power_up(tp);
17925 rc = PCI_ERS_RESULT_RECOVERED;
17928 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17929 tg3_napi_enable(tp);
17938 * tg3_io_resume - called when traffic can start flowing again.
17939 * @pdev: Pointer to PCI device
17941 * This callback is called when the error recovery driver tells
17942 * us that its OK to resume normal operation.
17944 static void tg3_io_resume(struct pci_dev *pdev)
17946 struct net_device *netdev = pci_get_drvdata(pdev);
17947 struct tg3 *tp = netdev_priv(netdev);
17952 if (!netif_running(netdev))
17955 tg3_full_lock(tp, 0);
17956 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17957 tg3_flag_set(tp, INIT_COMPLETE);
17958 err = tg3_restart_hw(tp, true);
17960 tg3_full_unlock(tp);
17961 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17965 netif_device_attach(netdev);
17967 tg3_timer_start(tp);
17969 tg3_netif_start(tp);
17971 tg3_full_unlock(tp);
17979 static const struct pci_error_handlers tg3_err_handler = {
17980 .error_detected = tg3_io_error_detected,
17981 .slot_reset = tg3_io_slot_reset,
17982 .resume = tg3_io_resume
17985 static struct pci_driver tg3_driver = {
17986 .name = DRV_MODULE_NAME,
17987 .id_table = tg3_pci_tbl,
17988 .probe = tg3_init_one,
17989 .remove = tg3_remove_one,
17990 .err_handler = &tg3_err_handler,
17991 .driver.pm = &tg3_pm_ops,
17992 .shutdown = tg3_shutdown,
17995 module_pci_driver(tg3_driver);