2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 130
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "February 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1648 if (!tg3_readphy(tp, MII_BMCR, ®))
1650 if (!tg3_readphy(tp, MII_BMSR, ®))
1651 val |= (reg & 0xffff);
1655 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1657 if (!tg3_readphy(tp, MII_LPA, ®))
1658 val |= (reg & 0xffff);
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1665 if (!tg3_readphy(tp, MII_STAT1000, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685 tg3_phy_gather_ump_data(tp, data);
1687 tg3_wait_for_event_ack(tp);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1696 tg3_generate_fw_event(tp);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1708 tg3_generate_fw_event(tp);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 static int tg3_poll_fw(struct tg3 *tp)
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834 netdev_info(tp->dev, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3 *tp)
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1856 (tp->link_config.active_speed == SPEED_100 ?
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1871 tg3_ump_link_report(tp);
1874 tp->link_up = netif_carrier_ok(tp->dev);
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1895 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 miireg = ADVERTISE_1000XPAUSE;
1897 else if (flow_ctrl & FLOW_CTRL_TX)
1898 miireg = ADVERTISE_1000XPSE_ASYM;
1899 else if (flow_ctrl & FLOW_CTRL_RX)
1900 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1925 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 if (lcladv & ADVERTISE_1000XPAUSE)
1930 if (rmtadv & ADVERTISE_1000XPAUSE)
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1941 u32 old_rx_mode = tp->rx_mode;
1942 u32 old_tx_mode = tp->tx_mode;
1944 if (tg3_flag(tp, USE_PHYLIB))
1945 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1947 autoneg = tp->link_config.autoneg;
1949 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1953 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1955 flowctrl = tp->link_config.flowctrl;
1957 tp->link_config.active_flowctrl = flowctrl;
1959 if (flowctrl & FLOW_CTRL_RX)
1960 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1962 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1964 if (old_rx_mode != tp->rx_mode)
1965 tw32_f(MAC_RX_MODE, tp->rx_mode);
1967 if (flowctrl & FLOW_CTRL_TX)
1968 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1970 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1972 if (old_tx_mode != tp->tx_mode)
1973 tw32_f(MAC_TX_MODE, tp->tx_mode);
1976 static void tg3_adjust_link(struct net_device *dev)
1978 u8 oldflowctrl, linkmesg = 0;
1979 u32 mac_mode, lcl_adv, rmt_adv;
1980 struct tg3 *tp = netdev_priv(dev);
1981 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1983 spin_lock_bh(&tp->lock);
1985 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 MAC_MODE_HALF_DUPLEX);
1988 oldflowctrl = tp->link_config.active_flowctrl;
1994 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 else if (phydev->speed == SPEED_1000 ||
1997 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2000 mac_mode |= MAC_MODE_PORT_MODE_MII;
2002 if (phydev->duplex == DUPLEX_HALF)
2003 mac_mode |= MAC_MODE_HALF_DUPLEX;
2005 lcl_adv = mii_advertise_flowctrl(
2006 tp->link_config.flowctrl);
2009 rmt_adv = LPA_PAUSE_CAP;
2010 if (phydev->asym_pause)
2011 rmt_adv |= LPA_PAUSE_ASYM;
2014 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2018 if (mac_mode != tp->mac_mode) {
2019 tp->mac_mode = mac_mode;
2020 tw32_f(MAC_MODE, tp->mac_mode);
2024 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 if (phydev->speed == SPEED_10)
2027 MAC_MI_STAT_10MBPS_MODE |
2028 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2030 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2033 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 tw32(MAC_TX_LENGTHS,
2035 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 (6 << TX_LENGTHS_IPG_SHIFT) |
2037 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2039 tw32(MAC_TX_LENGTHS,
2040 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 (6 << TX_LENGTHS_IPG_SHIFT) |
2042 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2044 if (phydev->link != tp->old_link ||
2045 phydev->speed != tp->link_config.active_speed ||
2046 phydev->duplex != tp->link_config.active_duplex ||
2047 oldflowctrl != tp->link_config.active_flowctrl)
2050 tp->old_link = phydev->link;
2051 tp->link_config.active_speed = phydev->speed;
2052 tp->link_config.active_duplex = phydev->duplex;
2054 spin_unlock_bh(&tp->lock);
2057 tg3_link_report(tp);
2060 static int tg3_phy_init(struct tg3 *tp)
2062 struct phy_device *phydev;
2064 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2067 /* Bring the PHY back to a known state. */
2070 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2072 /* Attach the MAC to the PHY. */
2073 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 tg3_adjust_link, phydev->interface);
2075 if (IS_ERR(phydev)) {
2076 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 return PTR_ERR(phydev);
2080 /* Mask with MAC supported features. */
2081 switch (phydev->interface) {
2082 case PHY_INTERFACE_MODE_GMII:
2083 case PHY_INTERFACE_MODE_RGMII:
2084 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 phydev->supported &= (PHY_GBIT_FEATURES |
2087 SUPPORTED_Asym_Pause);
2091 case PHY_INTERFACE_MODE_MII:
2092 phydev->supported &= (PHY_BASIC_FEATURES |
2094 SUPPORTED_Asym_Pause);
2097 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2101 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2103 phydev->advertising = phydev->supported;
2108 static void tg3_phy_start(struct tg3 *tp)
2110 struct phy_device *phydev;
2112 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2115 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2117 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 phydev->speed = tp->link_config.speed;
2120 phydev->duplex = tp->link_config.duplex;
2121 phydev->autoneg = tp->link_config.autoneg;
2122 phydev->advertising = tp->link_config.advertising;
2127 phy_start_aneg(phydev);
2130 static void tg3_phy_stop(struct tg3 *tp)
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2138 static void tg3_phy_fini(struct tg3 *tp)
2140 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2154 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 /* Cannot do read-modify-write on 5401 */
2156 err = tg3_phy_auxctl_write(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2163 err = tg3_phy_auxctl_read(tp,
2164 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2168 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 err = tg3_phy_auxctl_write(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2180 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2187 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2189 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2192 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2200 if (!tg3_flag(tp, 5705_PLUS) ||
2201 (tg3_flag(tp, 5717_PLUS) &&
2202 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 tg3_phy_fet_toggle_apd(tp, enable);
2210 reg = MII_TG3_MISC_SHDW_WREN |
2211 MII_TG3_MISC_SHDW_SCR5_SEL |
2212 MII_TG3_MISC_SHDW_SCR5_LPED |
2213 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2219 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2222 reg = MII_TG3_MISC_SHDW_WREN |
2223 MII_TG3_MISC_SHDW_APD_SEL |
2224 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2226 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2242 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2245 tg3_writephy(tp, MII_TG3_FET_TEST,
2246 ephy | MII_TG3_FET_SHADOW_EN);
2247 if (!tg3_readphy(tp, reg, &phy)) {
2249 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2251 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 tg3_writephy(tp, reg, phy);
2254 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2259 ret = tg3_phy_auxctl_read(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2263 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2265 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 tg3_phy_auxctl_write(tp,
2267 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2277 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2280 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2282 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2295 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2298 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2302 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2306 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2310 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2313 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2316 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2327 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2332 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2333 current_link_up == 1 &&
2334 tp->link_config.active_duplex == DUPLEX_FULL &&
2335 (tp->link_config.active_speed == SPEED_100 ||
2336 tp->link_config.active_speed == SPEED_1000)) {
2339 if (tp->link_config.active_speed == SPEED_1000)
2340 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2342 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2344 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2346 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347 TG3_CL45_D7_EEERES_STAT, &val);
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2354 if (!tp->setlpicnt) {
2355 if (current_link_up == 1 &&
2356 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2370 if (tp->link_config.active_speed == SPEED_1000 &&
2371 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373 tg3_flag(tp, 57765_CLASS)) &&
2374 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375 val = MII_TG3_DSP_TAP26_ALNOKO |
2376 MII_TG3_DSP_TAP26_RMRXSTO;
2377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378 tg3_phy_toggle_auxctl_smdsp(tp, false);
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2392 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393 if ((tmp32 & 0x1000) == 0)
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2405 static const u32 test_pat[4][6] = {
2406 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2413 for (chan = 0; chan < 4; chan++) {
2416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417 (chan * 0x2000) | 0x0200);
2418 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2420 for (i = 0; i < 6; i++)
2421 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425 if (tg3_wait_macro_done(tp)) {
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431 (chan * 0x2000) | 0x0200);
2432 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433 if (tg3_wait_macro_done(tp)) {
2438 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439 if (tg3_wait_macro_done(tp)) {
2444 for (i = 0; i < 6; i += 2) {
2447 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449 tg3_wait_macro_done(tp)) {
2455 if (low != test_pat[chan][i] ||
2456 high != test_pat[chan][i+1]) {
2457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2473 for (chan = 0; chan < 4; chan++) {
2476 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 (chan * 0x2000) | 0x0200);
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp))
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2491 u32 reg32, phy9_orig;
2492 int retries, do_phy_reset, err;
2498 err = tg3_bmcr_reset(tp);
2504 /* Disable transmitter and interrupt. */
2505 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2509 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2511 /* Set full-duplex, 1000 mbps. */
2512 tg3_writephy(tp, MII_BMCR,
2513 BMCR_FULLDPLX | BMCR_SPEED1000);
2515 /* Set to master mode. */
2516 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2519 tg3_writephy(tp, MII_CTRL1000,
2520 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2522 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2526 /* Block the PHY control access. */
2527 tg3_phydsp_write(tp, 0x8005, 0x0800);
2529 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2532 } while (--retries);
2534 err = tg3_phy_reset_chanpat(tp);
2538 tg3_phydsp_write(tp, 0x8005, 0x0000);
2540 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2543 tg3_phy_toggle_auxctl_smdsp(tp, false);
2545 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2547 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2549 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2556 static void tg3_carrier_off(struct tg3 *tp)
2558 netif_carrier_off(tp->dev);
2559 tp->link_up = false;
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2564 if (tg3_flag(tp, ENABLE_ASF))
2565 netdev_warn(tp->dev,
2566 "Management side-band traffic will be interrupted during phy settings change\n");
2569 /* This will reset the tigon3 PHY if there is no valid
2570 * link unless the FORCE argument is non-zero.
2572 static int tg3_phy_reset(struct tg3 *tp)
2577 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578 val = tr32(GRC_MISC_CFG);
2579 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2582 err = tg3_readphy(tp, MII_BMSR, &val);
2583 err |= tg3_readphy(tp, MII_BMSR, &val);
2587 if (netif_running(tp->dev) && tp->link_up) {
2588 netif_carrier_off(tp->dev);
2589 tg3_link_report(tp);
2592 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594 tg3_asic_rev(tp) == ASIC_REV_5705) {
2595 err = tg3_phy_reset_5703_4_5(tp);
2602 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604 cpmuctrl = tr32(TG3_CPMU_CTRL);
2605 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2607 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2610 err = tg3_bmcr_reset(tp);
2614 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2618 tw32(TG3_CPMU_CTRL, cpmuctrl);
2621 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625 CPMU_LSPD_1000MB_MACCLK_12_5) {
2626 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2628 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2632 if (tg3_flag(tp, 5717_PLUS) &&
2633 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2636 tg3_phy_apply_otp(tp);
2638 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639 tg3_phy_toggle_apd(tp, true);
2641 tg3_phy_toggle_apd(tp, false);
2644 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647 tg3_phydsp_write(tp, 0x000a, 0x0323);
2648 tg3_phy_toggle_auxctl_smdsp(tp, false);
2651 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2656 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658 tg3_phydsp_write(tp, 0x000a, 0x310b);
2659 tg3_phydsp_write(tp, 0x201f, 0x9506);
2660 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661 tg3_phy_toggle_auxctl_smdsp(tp, false);
2663 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668 tg3_writephy(tp, MII_TG3_TEST1,
2669 MII_TG3_TEST1_TRIM_EN | 0x4);
2671 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2673 tg3_phy_toggle_auxctl_smdsp(tp, false);
2677 /* Set Extended packet length bit (bit 14) on all chips that */
2678 /* support jumbo frames */
2679 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680 /* Cannot do read-modify-write on 5401 */
2681 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683 /* Set bit 14 with read-modify-write to preserve other bits */
2684 err = tg3_phy_auxctl_read(tp,
2685 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2687 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2691 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692 * jumbo frames transmission.
2694 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2700 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701 /* adjust output voltage */
2702 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706 tg3_phydsp_write(tp, 0xffb, 0x4000);
2708 tg3_phy_toggle_automdix(tp, 1);
2709 tg3_phy_set_wirespeed(tp);
2713 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2715 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2716 TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721 (TG3_GPIO_MSG_DRVR_PRES << 12))
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727 (TG3_GPIO_MSG_NEED_VAUX << 12))
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734 tg3_asic_rev(tp) == ASIC_REV_5719)
2735 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2737 status = tr32(TG3_CPMU_DRV_STATUS);
2739 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740 status &= ~(TG3_GPIO_MSG_MASK << shift);
2741 status |= (newstat << shift);
2743 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744 tg3_asic_rev(tp) == ASIC_REV_5719)
2745 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2747 tw32(TG3_CPMU_DRV_STATUS, status);
2749 return status >> TG3_APE_GPIO_MSG_SHIFT;
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2754 if (!tg3_flag(tp, IS_NIC))
2757 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759 tg3_asic_rev(tp) == ASIC_REV_5720) {
2760 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2770 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2781 if (!tg3_flag(tp, IS_NIC) ||
2782 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783 tg3_asic_rev(tp) == ASIC_REV_5701)
2786 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2788 tw32_wait_f(GRC_LOCAL_CTRL,
2789 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2792 tw32_wait_f(GRC_LOCAL_CTRL,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2796 tw32_wait_f(GRC_LOCAL_CTRL,
2797 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2803 if (!tg3_flag(tp, IS_NIC))
2806 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5701) {
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809 (GRC_LCLCTRL_GPIO_OE0 |
2810 GRC_LCLCTRL_GPIO_OE1 |
2811 GRC_LCLCTRL_GPIO_OE2 |
2812 GRC_LCLCTRL_GPIO_OUTPUT0 |
2813 GRC_LCLCTRL_GPIO_OUTPUT1),
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT0 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2824 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY);
2827 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 u32 grc_local_ctrl = 0;
2838 /* Workaround to prevent overdrawing Amps. */
2839 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2843 TG3_GRC_LCLCTL_PWRSW_DELAY);
2846 /* On 5753 and variants, GPIO2 cannot be used. */
2847 no_gpio2 = tp->nic_sram_data_cfg &
2848 NIC_SRAM_DATA_CFG_NO_GPIO2;
2850 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851 GRC_LCLCTRL_GPIO_OE1 |
2852 GRC_LCLCTRL_GPIO_OE2 |
2853 GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 GRC_LCLCTRL_GPIO_OUTPUT2;
2856 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857 GRC_LCLCTRL_GPIO_OUTPUT2);
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2860 tp->grc_local_ctrl | grc_local_ctrl,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 tp->grc_local_ctrl | grc_local_ctrl,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL,
2872 tp->grc_local_ctrl | grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2882 /* Serialize power state transitions */
2883 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2886 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887 msg = TG3_GPIO_MSG_NEED_VAUX;
2889 msg = tg3_set_function_status(tp, msg);
2891 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2894 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895 tg3_pwrsrc_switch_to_vaux(tp);
2897 tg3_pwrsrc_die_with_vmain(tp);
2900 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2905 bool need_vaux = false;
2907 /* The GPIOs do something completely different on 57765. */
2908 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2911 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913 tg3_asic_rev(tp) == ASIC_REV_5720) {
2914 tg3_frob_aux_power_5717(tp, include_wol ?
2915 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2919 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920 struct net_device *dev_peer;
2922 dev_peer = pci_get_drvdata(tp->pdev_peer);
2924 /* remove_one() may have been run on the peer. */
2926 struct tg3 *tp_peer = netdev_priv(dev_peer);
2928 if (tg3_flag(tp_peer, INIT_COMPLETE))
2931 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932 tg3_flag(tp_peer, ENABLE_ASF))
2937 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938 tg3_flag(tp, ENABLE_ASF))
2942 tg3_pwrsrc_switch_to_vaux(tp);
2944 tg3_pwrsrc_die_with_vmain(tp);
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2949 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2951 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952 if (speed != SPEED_10)
2954 } else if (speed == SPEED_10)
2960 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2964 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2968 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2969 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2970 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2973 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2974 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2975 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2980 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2982 val = tr32(GRC_MISC_CFG);
2983 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2986 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2988 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2991 tg3_writephy(tp, MII_ADVERTISE, 0);
2992 tg3_writephy(tp, MII_BMCR,
2993 BMCR_ANENABLE | BMCR_ANRESTART);
2995 tg3_writephy(tp, MII_TG3_FET_TEST,
2996 phytest | MII_TG3_FET_SHADOW_EN);
2997 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2998 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3000 MII_TG3_FET_SHDW_AUXMODE4,
3003 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3006 } else if (do_low_power) {
3007 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3008 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3010 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3011 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3012 MII_TG3_AUXCTL_PCTL_VREG_11V;
3013 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3016 /* The PHY should not be powered down on some chips because
3019 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3020 tg3_asic_rev(tp) == ASIC_REV_5704 ||
3021 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
3022 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
3023 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
3027 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3028 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3029 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3030 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3031 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3032 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3035 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3038 /* tp->lock is held. */
3039 static int tg3_nvram_lock(struct tg3 *tp)
3041 if (tg3_flag(tp, NVRAM)) {
3044 if (tp->nvram_lock_cnt == 0) {
3045 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3046 for (i = 0; i < 8000; i++) {
3047 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3052 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3056 tp->nvram_lock_cnt++;
3061 /* tp->lock is held. */
3062 static void tg3_nvram_unlock(struct tg3 *tp)
3064 if (tg3_flag(tp, NVRAM)) {
3065 if (tp->nvram_lock_cnt > 0)
3066 tp->nvram_lock_cnt--;
3067 if (tp->nvram_lock_cnt == 0)
3068 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3072 /* tp->lock is held. */
3073 static void tg3_enable_nvram_access(struct tg3 *tp)
3075 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3076 u32 nvaccess = tr32(NVRAM_ACCESS);
3078 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3082 /* tp->lock is held. */
3083 static void tg3_disable_nvram_access(struct tg3 *tp)
3085 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3086 u32 nvaccess = tr32(NVRAM_ACCESS);
3088 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3092 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3093 u32 offset, u32 *val)
3098 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3101 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3102 EEPROM_ADDR_DEVID_MASK |
3104 tw32(GRC_EEPROM_ADDR,
3106 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3107 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3108 EEPROM_ADDR_ADDR_MASK) |
3109 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3111 for (i = 0; i < 1000; i++) {
3112 tmp = tr32(GRC_EEPROM_ADDR);
3114 if (tmp & EEPROM_ADDR_COMPLETE)
3118 if (!(tmp & EEPROM_ADDR_COMPLETE))
3121 tmp = tr32(GRC_EEPROM_DATA);
3124 * The data will always be opposite the native endian
3125 * format. Perform a blind byteswap to compensate.
3132 #define NVRAM_CMD_TIMEOUT 10000
3134 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3138 tw32(NVRAM_CMD, nvram_cmd);
3139 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3141 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3147 if (i == NVRAM_CMD_TIMEOUT)
3153 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3155 if (tg3_flag(tp, NVRAM) &&
3156 tg3_flag(tp, NVRAM_BUFFERED) &&
3157 tg3_flag(tp, FLASH) &&
3158 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3159 (tp->nvram_jedecnum == JEDEC_ATMEL))
3161 addr = ((addr / tp->nvram_pagesize) <<
3162 ATMEL_AT45DB0X1B_PAGE_POS) +
3163 (addr % tp->nvram_pagesize);
3168 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3170 if (tg3_flag(tp, NVRAM) &&
3171 tg3_flag(tp, NVRAM_BUFFERED) &&
3172 tg3_flag(tp, FLASH) &&
3173 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3174 (tp->nvram_jedecnum == JEDEC_ATMEL))
3176 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3177 tp->nvram_pagesize) +
3178 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3183 /* NOTE: Data read in from NVRAM is byteswapped according to
3184 * the byteswapping settings for all other register accesses.
3185 * tg3 devices are BE devices, so on a BE machine, the data
3186 * returned will be exactly as it is seen in NVRAM. On a LE
3187 * machine, the 32-bit value will be byteswapped.
3189 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3193 if (!tg3_flag(tp, NVRAM))
3194 return tg3_nvram_read_using_eeprom(tp, offset, val);
3196 offset = tg3_nvram_phys_addr(tp, offset);
3198 if (offset > NVRAM_ADDR_MSK)
3201 ret = tg3_nvram_lock(tp);
3205 tg3_enable_nvram_access(tp);
3207 tw32(NVRAM_ADDR, offset);
3208 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3209 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3212 *val = tr32(NVRAM_RDDATA);
3214 tg3_disable_nvram_access(tp);
3216 tg3_nvram_unlock(tp);
3221 /* Ensures NVRAM data is in bytestream format. */
3222 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3225 int res = tg3_nvram_read(tp, offset, &v);
3227 *val = cpu_to_be32(v);
3231 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3232 u32 offset, u32 len, u8 *buf)
3237 for (i = 0; i < len; i += 4) {
3243 memcpy(&data, buf + i, 4);
3246 * The SEEPROM interface expects the data to always be opposite
3247 * the native endian format. We accomplish this by reversing
3248 * all the operations that would have been performed on the
3249 * data from a call to tg3_nvram_read_be32().
3251 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3253 val = tr32(GRC_EEPROM_ADDR);
3254 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3256 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3258 tw32(GRC_EEPROM_ADDR, val |
3259 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3260 (addr & EEPROM_ADDR_ADDR_MASK) |
3264 for (j = 0; j < 1000; j++) {
3265 val = tr32(GRC_EEPROM_ADDR);
3267 if (val & EEPROM_ADDR_COMPLETE)
3271 if (!(val & EEPROM_ADDR_COMPLETE)) {
3280 /* offset and length are dword aligned */
3281 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3285 u32 pagesize = tp->nvram_pagesize;
3286 u32 pagemask = pagesize - 1;
3290 tmp = kmalloc(pagesize, GFP_KERNEL);
3296 u32 phy_addr, page_off, size;
3298 phy_addr = offset & ~pagemask;
3300 for (j = 0; j < pagesize; j += 4) {
3301 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3302 (__be32 *) (tmp + j));
3309 page_off = offset & pagemask;
3316 memcpy(tmp + page_off, buf, size);
3318 offset = offset + (pagesize - page_off);
3320 tg3_enable_nvram_access(tp);
3323 * Before we can erase the flash page, we need
3324 * to issue a special "write enable" command.
3326 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3328 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3331 /* Erase the target page */
3332 tw32(NVRAM_ADDR, phy_addr);
3334 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3335 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3337 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3340 /* Issue another write enable to start the write. */
3341 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3343 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3346 for (j = 0; j < pagesize; j += 4) {
3349 data = *((__be32 *) (tmp + j));
3351 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3353 tw32(NVRAM_ADDR, phy_addr + j);
3355 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3359 nvram_cmd |= NVRAM_CMD_FIRST;
3360 else if (j == (pagesize - 4))
3361 nvram_cmd |= NVRAM_CMD_LAST;
3363 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3371 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3372 tg3_nvram_exec_cmd(tp, nvram_cmd);
3379 /* offset and length are dword aligned */
3380 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3385 for (i = 0; i < len; i += 4, offset += 4) {
3386 u32 page_off, phy_addr, nvram_cmd;
3389 memcpy(&data, buf + i, 4);
3390 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3392 page_off = offset % tp->nvram_pagesize;
3394 phy_addr = tg3_nvram_phys_addr(tp, offset);
3396 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3398 if (page_off == 0 || i == 0)
3399 nvram_cmd |= NVRAM_CMD_FIRST;
3400 if (page_off == (tp->nvram_pagesize - 4))
3401 nvram_cmd |= NVRAM_CMD_LAST;
3404 nvram_cmd |= NVRAM_CMD_LAST;
3406 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3407 !tg3_flag(tp, FLASH) ||
3408 !tg3_flag(tp, 57765_PLUS))
3409 tw32(NVRAM_ADDR, phy_addr);
3411 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3412 !tg3_flag(tp, 5755_PLUS) &&
3413 (tp->nvram_jedecnum == JEDEC_ST) &&
3414 (nvram_cmd & NVRAM_CMD_FIRST)) {
3417 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 ret = tg3_nvram_exec_cmd(tp, cmd);
3422 if (!tg3_flag(tp, FLASH)) {
3423 /* We always do complete word writes to eeprom. */
3424 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3427 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3439 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3440 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3441 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3445 if (!tg3_flag(tp, NVRAM)) {
3446 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3450 ret = tg3_nvram_lock(tp);
3454 tg3_enable_nvram_access(tp);
3455 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3456 tw32(NVRAM_WRITE1, 0x406);
3458 grc_mode = tr32(GRC_MODE);
3459 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3461 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3462 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3465 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3469 grc_mode = tr32(GRC_MODE);
3470 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3472 tg3_disable_nvram_access(tp);
3473 tg3_nvram_unlock(tp);
3476 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3477 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3484 #define RX_CPU_SCRATCH_BASE 0x30000
3485 #define RX_CPU_SCRATCH_SIZE 0x04000
3486 #define TX_CPU_SCRATCH_BASE 0x34000
3487 #define TX_CPU_SCRATCH_SIZE 0x04000
3489 /* tp->lock is held. */
3490 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3493 const int iters = 10000;
3495 for (i = 0; i < iters; i++) {
3496 tw32(cpu_base + CPU_STATE, 0xffffffff);
3497 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3498 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3502 return (i == iters) ? -EBUSY : 0;
3505 /* tp->lock is held. */
3506 static int tg3_rxcpu_pause(struct tg3 *tp)
3508 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3510 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3517 /* tp->lock is held. */
3518 static int tg3_txcpu_pause(struct tg3 *tp)
3520 return tg3_pause_cpu(tp, TX_CPU_BASE);
3523 /* tp->lock is held. */
3524 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3526 tw32(cpu_base + CPU_STATE, 0xffffffff);
3527 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3530 /* tp->lock is held. */
3531 static void tg3_rxcpu_resume(struct tg3 *tp)
3533 tg3_resume_cpu(tp, RX_CPU_BASE);
3536 /* tp->lock is held. */
3537 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3541 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3544 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3546 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3549 if (cpu_base == RX_CPU_BASE) {
3550 rc = tg3_rxcpu_pause(tp);
3553 * There is only an Rx CPU for the 5750 derivative in the
3556 if (tg3_flag(tp, IS_SSB_CORE))
3559 rc = tg3_txcpu_pause(tp);
3563 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3564 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3568 /* Clear firmware's nvram arbitration. */
3569 if (tg3_flag(tp, NVRAM))
3570 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3574 static int tg3_fw_data_len(struct tg3 *tp,
3575 const struct tg3_firmware_hdr *fw_hdr)
3579 /* Non fragmented firmware have one firmware header followed by a
3580 * contiguous chunk of data to be written. The length field in that
3581 * header is not the length of data to be written but the complete
3582 * length of the bss. The data length is determined based on
3583 * tp->fw->size minus headers.
3585 * Fragmented firmware have a main header followed by multiple
3586 * fragments. Each fragment is identical to non fragmented firmware
3587 * with a firmware header followed by a contiguous chunk of data. In
3588 * the main header, the length field is unused and set to 0xffffffff.
3589 * In each fragment header the length is the entire size of that
3590 * fragment i.e. fragment data + header length. Data length is
3591 * therefore length field in the header minus TG3_FW_HDR_LEN.
3593 if (tp->fw_len == 0xffffffff)
3594 fw_len = be32_to_cpu(fw_hdr->len);
3596 fw_len = tp->fw->size;
3598 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3601 /* tp->lock is held. */
3602 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3603 u32 cpu_scratch_base, int cpu_scratch_size,
3604 const struct tg3_firmware_hdr *fw_hdr)
3607 void (*write_op)(struct tg3 *, u32, u32);
3608 int total_len = tp->fw->size;
3610 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3612 "%s: Trying to load TX cpu firmware which is 5705\n",
3617 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3618 write_op = tg3_write_mem;
3620 write_op = tg3_write_indirect_reg32;
3622 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3623 /* It is possible that bootcode is still loading at this point.
3624 * Get the nvram lock first before halting the cpu.
3626 int lock_err = tg3_nvram_lock(tp);
3627 err = tg3_halt_cpu(tp, cpu_base);
3629 tg3_nvram_unlock(tp);
3633 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3634 write_op(tp, cpu_scratch_base + i, 0);
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32(cpu_base + CPU_MODE,
3637 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3639 /* Subtract additional main header for fragmented firmware and
3640 * advance to the first fragment
3642 total_len -= TG3_FW_HDR_LEN;
3647 u32 *fw_data = (u32 *)(fw_hdr + 1);
3648 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3649 write_op(tp, cpu_scratch_base +
3650 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3652 be32_to_cpu(fw_data[i]));
3654 total_len -= be32_to_cpu(fw_hdr->len);
3656 /* Advance to next fragment */
3657 fw_hdr = (struct tg3_firmware_hdr *)
3658 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3659 } while (total_len > 0);
3667 /* tp->lock is held. */
3668 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3671 const int iters = 5;
3673 tw32(cpu_base + CPU_STATE, 0xffffffff);
3674 tw32_f(cpu_base + CPU_PC, pc);
3676 for (i = 0; i < iters; i++) {
3677 if (tr32(cpu_base + CPU_PC) == pc)
3679 tw32(cpu_base + CPU_STATE, 0xffffffff);
3680 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3681 tw32_f(cpu_base + CPU_PC, pc);
3685 return (i == iters) ? -EBUSY : 0;
3688 /* tp->lock is held. */
3689 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3691 const struct tg3_firmware_hdr *fw_hdr;
3694 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3696 /* Firmware blob starts with version numbers, followed by
3697 start address and length. We are setting complete length.
3698 length = end_address_of_bss - start_address_of_text.
3699 Remainder is the blob to be loaded contiguously
3700 from start address. */
3702 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3703 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3708 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3709 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3714 /* Now startup only the RX cpu. */
3715 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3716 be32_to_cpu(fw_hdr->base_addr));
3718 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3719 "should be %08x\n", __func__,
3720 tr32(RX_CPU_BASE + CPU_PC),
3721 be32_to_cpu(fw_hdr->base_addr));
3725 tg3_rxcpu_resume(tp);
3730 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3732 const int iters = 1000;
3736 /* Wait for boot code to complete initialization and enter service
3737 * loop. It is then safe to download service patches
3739 for (i = 0; i < iters; i++) {
3740 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3747 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3751 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3753 netdev_warn(tp->dev,
3754 "Other patches exist. Not downloading EEE patch\n");
3761 /* tp->lock is held. */
3762 static void tg3_load_57766_firmware(struct tg3 *tp)
3764 struct tg3_firmware_hdr *fw_hdr;
3766 if (!tg3_flag(tp, NO_NVRAM))
3769 if (tg3_validate_rxcpu_state(tp))
3775 /* This firmware blob has a different format than older firmware
3776 * releases as given below. The main difference is we have fragmented
3777 * data to be written to non-contiguous locations.
3779 * In the beginning we have a firmware header identical to other
3780 * firmware which consists of version, base addr and length. The length
3781 * here is unused and set to 0xffffffff.
3783 * This is followed by a series of firmware fragments which are
3784 * individually identical to previous firmware. i.e. they have the
3785 * firmware header and followed by data for that fragment. The version
3786 * field of the individual fragment header is unused.
3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3793 if (tg3_rxcpu_pause(tp))
3796 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3797 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3799 tg3_rxcpu_resume(tp);
3802 /* tp->lock is held. */
3803 static int tg3_load_tso_firmware(struct tg3 *tp)
3805 const struct tg3_firmware_hdr *fw_hdr;
3806 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3809 if (!tg3_flag(tp, FW_TSO))
3812 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3814 /* Firmware blob starts with version numbers, followed by
3815 start address and length. We are setting complete length.
3816 length = end_address_of_bss - start_address_of_text.
3817 Remainder is the blob to be loaded contiguously
3818 from start address. */
3820 cpu_scratch_size = tp->fw_len;
3822 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3823 cpu_base = RX_CPU_BASE;
3824 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3826 cpu_base = TX_CPU_BASE;
3827 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3828 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3831 err = tg3_load_firmware_cpu(tp, cpu_base,
3832 cpu_scratch_base, cpu_scratch_size,
3837 /* Now startup the cpu. */
3838 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3839 be32_to_cpu(fw_hdr->base_addr));
3842 "%s fails to set CPU PC, is %08x should be %08x\n",
3843 __func__, tr32(cpu_base + CPU_PC),
3844 be32_to_cpu(fw_hdr->base_addr));
3848 tg3_resume_cpu(tp, cpu_base);
3853 /* tp->lock is held. */
3854 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3856 u32 addr_high, addr_low;
3859 addr_high = ((tp->dev->dev_addr[0] << 8) |
3860 tp->dev->dev_addr[1]);
3861 addr_low = ((tp->dev->dev_addr[2] << 24) |
3862 (tp->dev->dev_addr[3] << 16) |
3863 (tp->dev->dev_addr[4] << 8) |
3864 (tp->dev->dev_addr[5] << 0));
3865 for (i = 0; i < 4; i++) {
3866 if (i == 1 && skip_mac_1)
3868 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3869 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3872 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3873 tg3_asic_rev(tp) == ASIC_REV_5704) {
3874 for (i = 0; i < 12; i++) {
3875 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3876 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3880 addr_high = (tp->dev->dev_addr[0] +
3881 tp->dev->dev_addr[1] +
3882 tp->dev->dev_addr[2] +
3883 tp->dev->dev_addr[3] +
3884 tp->dev->dev_addr[4] +
3885 tp->dev->dev_addr[5]) &
3886 TX_BACKOFF_SEED_MASK;
3887 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3890 static void tg3_enable_register_access(struct tg3 *tp)
3893 * Make sure register accesses (indirect or otherwise) will function
3896 pci_write_config_dword(tp->pdev,
3897 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3900 static int tg3_power_up(struct tg3 *tp)
3904 tg3_enable_register_access(tp);
3906 err = pci_set_power_state(tp->pdev, PCI_D0);
3908 /* Switch out of Vaux if it is a NIC */
3909 tg3_pwrsrc_switch_to_vmain(tp);
3911 netdev_err(tp->dev, "Transition to D0 failed\n");
3917 static int tg3_setup_phy(struct tg3 *, int);
3919 static int tg3_power_down_prepare(struct tg3 *tp)
3922 bool device_should_wake, do_low_power;
3924 tg3_enable_register_access(tp);
3926 /* Restore the CLKREQ setting. */
3927 if (tg3_flag(tp, CLKREQ_BUG))
3928 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3929 PCI_EXP_LNKCTL_CLKREQ_EN);
3931 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3932 tw32(TG3PCI_MISC_HOST_CTRL,
3933 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3935 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3936 tg3_flag(tp, WOL_ENABLE);
3938 if (tg3_flag(tp, USE_PHYLIB)) {
3939 do_low_power = false;
3940 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3941 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3942 struct phy_device *phydev;
3943 u32 phyid, advertising;
3945 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3947 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3949 tp->link_config.speed = phydev->speed;
3950 tp->link_config.duplex = phydev->duplex;
3951 tp->link_config.autoneg = phydev->autoneg;
3952 tp->link_config.advertising = phydev->advertising;
3954 advertising = ADVERTISED_TP |
3956 ADVERTISED_Autoneg |
3957 ADVERTISED_10baseT_Half;
3959 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3960 if (tg3_flag(tp, WOL_SPEED_100MB))
3962 ADVERTISED_100baseT_Half |
3963 ADVERTISED_100baseT_Full |
3964 ADVERTISED_10baseT_Full;
3966 advertising |= ADVERTISED_10baseT_Full;
3969 phydev->advertising = advertising;
3971 phy_start_aneg(phydev);
3973 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3974 if (phyid != PHY_ID_BCMAC131) {
3975 phyid &= PHY_BCM_OUI_MASK;
3976 if (phyid == PHY_BCM_OUI_1 ||
3977 phyid == PHY_BCM_OUI_2 ||
3978 phyid == PHY_BCM_OUI_3)
3979 do_low_power = true;
3983 do_low_power = true;
3985 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3986 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3989 tg3_setup_phy(tp, 0);
3992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3995 val = tr32(GRC_VCPU_EXT_CTRL);
3996 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3997 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4001 for (i = 0; i < 200; i++) {
4002 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4003 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4008 if (tg3_flag(tp, WOL_CAP))
4009 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4010 WOL_DRV_STATE_SHUTDOWN |
4014 if (device_should_wake) {
4017 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4019 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4020 tg3_phy_auxctl_write(tp,
4021 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4022 MII_TG3_AUXCTL_PCTL_WOL_EN |
4023 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4024 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4028 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4029 mac_mode = MAC_MODE_PORT_MODE_GMII;
4030 else if (tp->phy_flags &
4031 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4032 if (tp->link_config.active_speed == SPEED_1000)
4033 mac_mode = MAC_MODE_PORT_MODE_GMII;
4035 mac_mode = MAC_MODE_PORT_MODE_MII;
4037 mac_mode = MAC_MODE_PORT_MODE_MII;
4039 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4040 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4041 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4042 SPEED_100 : SPEED_10;
4043 if (tg3_5700_link_polarity(tp, speed))
4044 mac_mode |= MAC_MODE_LINK_POLARITY;
4046 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4049 mac_mode = MAC_MODE_PORT_MODE_TBI;
4052 if (!tg3_flag(tp, 5750_PLUS))
4053 tw32(MAC_LED_CTRL, tp->led_ctrl);
4055 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4056 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4057 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4058 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4060 if (tg3_flag(tp, ENABLE_APE))
4061 mac_mode |= MAC_MODE_APE_TX_EN |
4062 MAC_MODE_APE_RX_EN |
4063 MAC_MODE_TDE_ENABLE;
4065 tw32_f(MAC_MODE, mac_mode);
4068 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4072 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4073 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4074 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4077 base_val = tp->pci_clock_ctrl;
4078 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4079 CLOCK_CTRL_TXCLK_DISABLE);
4081 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4082 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4083 } else if (tg3_flag(tp, 5780_CLASS) ||
4084 tg3_flag(tp, CPMU_PRESENT) ||
4085 tg3_asic_rev(tp) == ASIC_REV_5906) {
4087 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4088 u32 newbits1, newbits2;
4090 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4091 tg3_asic_rev(tp) == ASIC_REV_5701) {
4092 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4093 CLOCK_CTRL_TXCLK_DISABLE |
4095 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4096 } else if (tg3_flag(tp, 5705_PLUS)) {
4097 newbits1 = CLOCK_CTRL_625_CORE;
4098 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4100 newbits1 = CLOCK_CTRL_ALTCLK;
4101 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4104 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4107 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4110 if (!tg3_flag(tp, 5705_PLUS)) {
4113 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4114 tg3_asic_rev(tp) == ASIC_REV_5701) {
4115 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4116 CLOCK_CTRL_TXCLK_DISABLE |
4117 CLOCK_CTRL_44MHZ_CORE);
4119 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4122 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4123 tp->pci_clock_ctrl | newbits3, 40);
4127 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4128 tg3_power_down_phy(tp, do_low_power);
4130 tg3_frob_aux_power(tp, true);
4132 /* Workaround for unstable PLL clock */
4133 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4134 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4135 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4136 u32 val = tr32(0x7d00);
4138 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4140 if (!tg3_flag(tp, ENABLE_ASF)) {
4143 err = tg3_nvram_lock(tp);
4144 tg3_halt_cpu(tp, RX_CPU_BASE);
4146 tg3_nvram_unlock(tp);
4150 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4155 static void tg3_power_down(struct tg3 *tp)
4157 tg3_power_down_prepare(tp);
4159 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4160 pci_set_power_state(tp->pdev, PCI_D3hot);
4163 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4165 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4166 case MII_TG3_AUX_STAT_10HALF:
4168 *duplex = DUPLEX_HALF;
4171 case MII_TG3_AUX_STAT_10FULL:
4173 *duplex = DUPLEX_FULL;
4176 case MII_TG3_AUX_STAT_100HALF:
4178 *duplex = DUPLEX_HALF;
4181 case MII_TG3_AUX_STAT_100FULL:
4183 *duplex = DUPLEX_FULL;
4186 case MII_TG3_AUX_STAT_1000HALF:
4187 *speed = SPEED_1000;
4188 *duplex = DUPLEX_HALF;
4191 case MII_TG3_AUX_STAT_1000FULL:
4192 *speed = SPEED_1000;
4193 *duplex = DUPLEX_FULL;
4197 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4198 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4200 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4204 *speed = SPEED_UNKNOWN;
4205 *duplex = DUPLEX_UNKNOWN;
4210 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4215 new_adv = ADVERTISE_CSMA;
4216 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4217 new_adv |= mii_advertise_flowctrl(flowctrl);
4219 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4223 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4224 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4227 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4228 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4230 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4235 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4238 tw32(TG3_CPMU_EEE_MODE,
4239 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4241 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4246 /* Advertise 100-BaseTX EEE ability */
4247 if (advertise & ADVERTISED_100baseT_Full)
4248 val |= MDIO_AN_EEE_ADV_100TX;
4249 /* Advertise 1000-BaseT EEE ability */
4250 if (advertise & ADVERTISED_1000baseT_Full)
4251 val |= MDIO_AN_EEE_ADV_1000T;
4252 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4256 switch (tg3_asic_rev(tp)) {
4258 case ASIC_REV_57765:
4259 case ASIC_REV_57766:
4261 /* If we advertised any eee advertisements above... */
4263 val = MII_TG3_DSP_TAP26_ALNOKO |
4264 MII_TG3_DSP_TAP26_RMRXSTO |
4265 MII_TG3_DSP_TAP26_OPCSINPT;
4266 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4270 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4271 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4272 MII_TG3_DSP_CH34TP2_HIBW01);
4275 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4284 static void tg3_phy_copper_begin(struct tg3 *tp)
4286 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4287 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4290 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4291 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4292 adv = ADVERTISED_10baseT_Half |
4293 ADVERTISED_10baseT_Full;
4294 if (tg3_flag(tp, WOL_SPEED_100MB))
4295 adv |= ADVERTISED_100baseT_Half |
4296 ADVERTISED_100baseT_Full;
4297 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4298 adv |= ADVERTISED_1000baseT_Half |
4299 ADVERTISED_1000baseT_Full;
4301 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4303 adv = tp->link_config.advertising;
4304 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4305 adv &= ~(ADVERTISED_1000baseT_Half |
4306 ADVERTISED_1000baseT_Full);
4308 fc = tp->link_config.flowctrl;
4311 tg3_phy_autoneg_cfg(tp, adv, fc);
4313 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4314 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4315 /* Normally during power down we want to autonegotiate
4316 * the lowest possible speed for WOL. However, to avoid
4317 * link flap, we leave it untouched.
4322 tg3_writephy(tp, MII_BMCR,
4323 BMCR_ANENABLE | BMCR_ANRESTART);
4326 u32 bmcr, orig_bmcr;
4328 tp->link_config.active_speed = tp->link_config.speed;
4329 tp->link_config.active_duplex = tp->link_config.duplex;
4331 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4332 /* With autoneg disabled, 5715 only links up when the
4333 * advertisement register has the configured speed
4336 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4340 switch (tp->link_config.speed) {
4346 bmcr |= BMCR_SPEED100;
4350 bmcr |= BMCR_SPEED1000;
4354 if (tp->link_config.duplex == DUPLEX_FULL)
4355 bmcr |= BMCR_FULLDPLX;
4357 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4358 (bmcr != orig_bmcr)) {
4359 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4360 for (i = 0; i < 1500; i++) {
4364 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4365 tg3_readphy(tp, MII_BMSR, &tmp))
4367 if (!(tmp & BMSR_LSTATUS)) {
4372 tg3_writephy(tp, MII_BMCR, bmcr);
4378 static int tg3_phy_pull_config(struct tg3 *tp)
4383 err = tg3_readphy(tp, MII_BMCR, &val);
4387 if (!(val & BMCR_ANENABLE)) {
4388 tp->link_config.autoneg = AUTONEG_DISABLE;
4389 tp->link_config.advertising = 0;
4390 tg3_flag_clear(tp, PAUSE_AUTONEG);
4394 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4396 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4399 tp->link_config.speed = SPEED_10;
4402 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4405 tp->link_config.speed = SPEED_100;
4407 case BMCR_SPEED1000:
4408 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4409 tp->link_config.speed = SPEED_1000;
4417 if (val & BMCR_FULLDPLX)
4418 tp->link_config.duplex = DUPLEX_FULL;
4420 tp->link_config.duplex = DUPLEX_HALF;
4422 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4428 tp->link_config.autoneg = AUTONEG_ENABLE;
4429 tp->link_config.advertising = ADVERTISED_Autoneg;
4430 tg3_flag_set(tp, PAUSE_AUTONEG);
4432 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4435 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4439 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4440 tp->link_config.advertising |= adv | ADVERTISED_TP;
4442 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4444 tp->link_config.advertising |= ADVERTISED_FIBRE;
4447 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4450 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4451 err = tg3_readphy(tp, MII_CTRL1000, &val);
4455 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4457 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4461 adv = tg3_decode_flowctrl_1000X(val);
4462 tp->link_config.flowctrl = adv;
4464 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4465 adv = mii_adv_to_ethtool_adv_x(val);
4468 tp->link_config.advertising |= adv;
4475 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4479 /* Turn off tap power management. */
4480 /* Set Extended packet length bit */
4481 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4483 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4484 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4485 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4486 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4487 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4494 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4496 u32 advmsk, tgtadv, advertising;
4498 advertising = tp->link_config.advertising;
4499 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4501 advmsk = ADVERTISE_ALL;
4502 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4503 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4504 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4507 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4510 if ((*lcladv & advmsk) != tgtadv)
4513 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4516 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4518 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4522 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4523 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4524 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4525 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4526 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4528 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4531 if (tg3_ctrl != tgtadv)
4538 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4542 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4545 if (tg3_readphy(tp, MII_STAT1000, &val))
4548 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4551 if (tg3_readphy(tp, MII_LPA, rmtadv))
4554 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4555 tp->link_config.rmt_adv = lpeth;
4560 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4562 if (curr_link_up != tp->link_up) {
4564 netif_carrier_on(tp->dev);
4566 netif_carrier_off(tp->dev);
4567 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4568 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4571 tg3_link_report(tp);
4578 static void tg3_clear_mac_status(struct tg3 *tp)
4583 MAC_STATUS_SYNC_CHANGED |
4584 MAC_STATUS_CFG_CHANGED |
4585 MAC_STATUS_MI_COMPLETION |
4586 MAC_STATUS_LNKSTATE_CHANGED);
4590 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4592 int current_link_up;
4594 u32 lcl_adv, rmt_adv;
4599 tg3_clear_mac_status(tp);
4601 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4603 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4607 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4609 /* Some third-party PHYs need to be reset on link going
4612 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4613 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4614 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4616 tg3_readphy(tp, MII_BMSR, &bmsr);
4617 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4618 !(bmsr & BMSR_LSTATUS))
4624 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4625 tg3_readphy(tp, MII_BMSR, &bmsr);
4626 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4627 !tg3_flag(tp, INIT_COMPLETE))
4630 if (!(bmsr & BMSR_LSTATUS)) {
4631 err = tg3_init_5401phy_dsp(tp);
4635 tg3_readphy(tp, MII_BMSR, &bmsr);
4636 for (i = 0; i < 1000; i++) {
4638 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4639 (bmsr & BMSR_LSTATUS)) {
4645 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4646 TG3_PHY_REV_BCM5401_B0 &&
4647 !(bmsr & BMSR_LSTATUS) &&
4648 tp->link_config.active_speed == SPEED_1000) {
4649 err = tg3_phy_reset(tp);
4651 err = tg3_init_5401phy_dsp(tp);
4656 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4657 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4658 /* 5701 {A0,B0} CRC bug workaround */
4659 tg3_writephy(tp, 0x15, 0x0a75);
4660 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4661 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4662 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4665 /* Clear pending interrupts... */
4666 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4667 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4669 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4670 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4671 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4672 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4674 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4675 tg3_asic_rev(tp) == ASIC_REV_5701) {
4676 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4677 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4678 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4680 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4683 current_link_up = 0;
4684 current_speed = SPEED_UNKNOWN;
4685 current_duplex = DUPLEX_UNKNOWN;
4686 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4687 tp->link_config.rmt_adv = 0;
4689 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4690 err = tg3_phy_auxctl_read(tp,
4691 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4693 if (!err && !(val & (1 << 10))) {
4694 tg3_phy_auxctl_write(tp,
4695 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4702 for (i = 0; i < 100; i++) {
4703 tg3_readphy(tp, MII_BMSR, &bmsr);
4704 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4705 (bmsr & BMSR_LSTATUS))
4710 if (bmsr & BMSR_LSTATUS) {
4713 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4714 for (i = 0; i < 2000; i++) {
4716 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4721 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4726 for (i = 0; i < 200; i++) {
4727 tg3_readphy(tp, MII_BMCR, &bmcr);
4728 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4730 if (bmcr && bmcr != 0x7fff)
4738 tp->link_config.active_speed = current_speed;
4739 tp->link_config.active_duplex = current_duplex;
4741 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4742 if ((bmcr & BMCR_ANENABLE) &&
4743 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4744 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4745 current_link_up = 1;
4747 if (!(bmcr & BMCR_ANENABLE) &&
4748 tp->link_config.speed == current_speed &&
4749 tp->link_config.duplex == current_duplex) {
4750 current_link_up = 1;
4754 if (current_link_up == 1 &&
4755 tp->link_config.active_duplex == DUPLEX_FULL) {
4758 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4759 reg = MII_TG3_FET_GEN_STAT;
4760 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4762 reg = MII_TG3_EXT_STAT;
4763 bit = MII_TG3_EXT_STAT_MDIX;
4766 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4767 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4769 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4774 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4775 tg3_phy_copper_begin(tp);
4777 if (tg3_flag(tp, ROBOSWITCH)) {
4778 current_link_up = 1;
4779 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4780 current_speed = SPEED_1000;
4781 current_duplex = DUPLEX_FULL;
4782 tp->link_config.active_speed = current_speed;
4783 tp->link_config.active_duplex = current_duplex;
4786 tg3_readphy(tp, MII_BMSR, &bmsr);
4787 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4788 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4789 current_link_up = 1;
4792 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4793 if (current_link_up == 1) {
4794 if (tp->link_config.active_speed == SPEED_100 ||
4795 tp->link_config.active_speed == SPEED_10)
4796 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4798 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4799 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4800 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4802 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4804 /* In order for the 5750 core in BCM4785 chip to work properly
4805 * in RGMII mode, the Led Control Register must be set up.
4807 if (tg3_flag(tp, RGMII_MODE)) {
4808 u32 led_ctrl = tr32(MAC_LED_CTRL);
4809 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4811 if (tp->link_config.active_speed == SPEED_10)
4812 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4813 else if (tp->link_config.active_speed == SPEED_100)
4814 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4815 LED_CTRL_100MBPS_ON);
4816 else if (tp->link_config.active_speed == SPEED_1000)
4817 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4818 LED_CTRL_1000MBPS_ON);
4820 tw32(MAC_LED_CTRL, led_ctrl);
4824 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4825 if (tp->link_config.active_duplex == DUPLEX_HALF)
4826 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4828 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4829 if (current_link_up == 1 &&
4830 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4831 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4833 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4836 /* ??? Without this setting Netgear GA302T PHY does not
4837 * ??? send/receive packets...
4839 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4840 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4841 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4842 tw32_f(MAC_MI_MODE, tp->mi_mode);
4846 tw32_f(MAC_MODE, tp->mac_mode);
4849 tg3_phy_eee_adjust(tp, current_link_up);
4851 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4852 /* Polled via timer. */
4853 tw32_f(MAC_EVENT, 0);
4855 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4859 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4860 current_link_up == 1 &&
4861 tp->link_config.active_speed == SPEED_1000 &&
4862 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4865 (MAC_STATUS_SYNC_CHANGED |
4866 MAC_STATUS_CFG_CHANGED));
4869 NIC_SRAM_FIRMWARE_MBOX,
4870 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4873 /* Prevent send BD corruption. */
4874 if (tg3_flag(tp, CLKREQ_BUG)) {
4875 if (tp->link_config.active_speed == SPEED_100 ||
4876 tp->link_config.active_speed == SPEED_10)
4877 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4878 PCI_EXP_LNKCTL_CLKREQ_EN);
4880 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4881 PCI_EXP_LNKCTL_CLKREQ_EN);
4884 tg3_test_and_report_link_chg(tp, current_link_up);
4889 struct tg3_fiber_aneginfo {
4891 #define ANEG_STATE_UNKNOWN 0
4892 #define ANEG_STATE_AN_ENABLE 1
4893 #define ANEG_STATE_RESTART_INIT 2
4894 #define ANEG_STATE_RESTART 3
4895 #define ANEG_STATE_DISABLE_LINK_OK 4
4896 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4897 #define ANEG_STATE_ABILITY_DETECT 6
4898 #define ANEG_STATE_ACK_DETECT_INIT 7
4899 #define ANEG_STATE_ACK_DETECT 8
4900 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4901 #define ANEG_STATE_COMPLETE_ACK 10
4902 #define ANEG_STATE_IDLE_DETECT_INIT 11
4903 #define ANEG_STATE_IDLE_DETECT 12
4904 #define ANEG_STATE_LINK_OK 13
4905 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4906 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4909 #define MR_AN_ENABLE 0x00000001
4910 #define MR_RESTART_AN 0x00000002
4911 #define MR_AN_COMPLETE 0x00000004
4912 #define MR_PAGE_RX 0x00000008
4913 #define MR_NP_LOADED 0x00000010
4914 #define MR_TOGGLE_TX 0x00000020
4915 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4916 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4917 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4918 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4919 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4920 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4921 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4922 #define MR_TOGGLE_RX 0x00002000
4923 #define MR_NP_RX 0x00004000
4925 #define MR_LINK_OK 0x80000000
4927 unsigned long link_time, cur_time;
4929 u32 ability_match_cfg;
4930 int ability_match_count;
4932 char ability_match, idle_match, ack_match;
4934 u32 txconfig, rxconfig;
4935 #define ANEG_CFG_NP 0x00000080
4936 #define ANEG_CFG_ACK 0x00000040
4937 #define ANEG_CFG_RF2 0x00000020
4938 #define ANEG_CFG_RF1 0x00000010
4939 #define ANEG_CFG_PS2 0x00000001
4940 #define ANEG_CFG_PS1 0x00008000
4941 #define ANEG_CFG_HD 0x00004000
4942 #define ANEG_CFG_FD 0x00002000
4943 #define ANEG_CFG_INVAL 0x00001f06
4948 #define ANEG_TIMER_ENAB 2
4949 #define ANEG_FAILED -1
4951 #define ANEG_STATE_SETTLE_TIME 10000
4953 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4954 struct tg3_fiber_aneginfo *ap)
4957 unsigned long delta;
4961 if (ap->state == ANEG_STATE_UNKNOWN) {
4965 ap->ability_match_cfg = 0;
4966 ap->ability_match_count = 0;
4967 ap->ability_match = 0;
4973 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4974 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4976 if (rx_cfg_reg != ap->ability_match_cfg) {
4977 ap->ability_match_cfg = rx_cfg_reg;
4978 ap->ability_match = 0;
4979 ap->ability_match_count = 0;
4981 if (++ap->ability_match_count > 1) {
4982 ap->ability_match = 1;
4983 ap->ability_match_cfg = rx_cfg_reg;
4986 if (rx_cfg_reg & ANEG_CFG_ACK)
4994 ap->ability_match_cfg = 0;
4995 ap->ability_match_count = 0;
4996 ap->ability_match = 0;
5002 ap->rxconfig = rx_cfg_reg;
5005 switch (ap->state) {
5006 case ANEG_STATE_UNKNOWN:
5007 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5008 ap->state = ANEG_STATE_AN_ENABLE;
5011 case ANEG_STATE_AN_ENABLE:
5012 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5013 if (ap->flags & MR_AN_ENABLE) {
5016 ap->ability_match_cfg = 0;
5017 ap->ability_match_count = 0;
5018 ap->ability_match = 0;
5022 ap->state = ANEG_STATE_RESTART_INIT;
5024 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5028 case ANEG_STATE_RESTART_INIT:
5029 ap->link_time = ap->cur_time;
5030 ap->flags &= ~(MR_NP_LOADED);
5032 tw32(MAC_TX_AUTO_NEG, 0);
5033 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5034 tw32_f(MAC_MODE, tp->mac_mode);
5037 ret = ANEG_TIMER_ENAB;
5038 ap->state = ANEG_STATE_RESTART;
5041 case ANEG_STATE_RESTART:
5042 delta = ap->cur_time - ap->link_time;
5043 if (delta > ANEG_STATE_SETTLE_TIME)
5044 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5046 ret = ANEG_TIMER_ENAB;
5049 case ANEG_STATE_DISABLE_LINK_OK:
5053 case ANEG_STATE_ABILITY_DETECT_INIT:
5054 ap->flags &= ~(MR_TOGGLE_TX);
5055 ap->txconfig = ANEG_CFG_FD;
5056 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5057 if (flowctrl & ADVERTISE_1000XPAUSE)
5058 ap->txconfig |= ANEG_CFG_PS1;
5059 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5060 ap->txconfig |= ANEG_CFG_PS2;
5061 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5062 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5063 tw32_f(MAC_MODE, tp->mac_mode);
5066 ap->state = ANEG_STATE_ABILITY_DETECT;
5069 case ANEG_STATE_ABILITY_DETECT:
5070 if (ap->ability_match != 0 && ap->rxconfig != 0)
5071 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5074 case ANEG_STATE_ACK_DETECT_INIT:
5075 ap->txconfig |= ANEG_CFG_ACK;
5076 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5077 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5078 tw32_f(MAC_MODE, tp->mac_mode);
5081 ap->state = ANEG_STATE_ACK_DETECT;
5084 case ANEG_STATE_ACK_DETECT:
5085 if (ap->ack_match != 0) {
5086 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5087 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5088 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5090 ap->state = ANEG_STATE_AN_ENABLE;
5092 } else if (ap->ability_match != 0 &&
5093 ap->rxconfig == 0) {
5094 ap->state = ANEG_STATE_AN_ENABLE;
5098 case ANEG_STATE_COMPLETE_ACK_INIT:
5099 if (ap->rxconfig & ANEG_CFG_INVAL) {
5103 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5104 MR_LP_ADV_HALF_DUPLEX |
5105 MR_LP_ADV_SYM_PAUSE |
5106 MR_LP_ADV_ASYM_PAUSE |
5107 MR_LP_ADV_REMOTE_FAULT1 |
5108 MR_LP_ADV_REMOTE_FAULT2 |
5109 MR_LP_ADV_NEXT_PAGE |
5112 if (ap->rxconfig & ANEG_CFG_FD)
5113 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5114 if (ap->rxconfig & ANEG_CFG_HD)
5115 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5116 if (ap->rxconfig & ANEG_CFG_PS1)
5117 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5118 if (ap->rxconfig & ANEG_CFG_PS2)
5119 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5120 if (ap->rxconfig & ANEG_CFG_RF1)
5121 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5122 if (ap->rxconfig & ANEG_CFG_RF2)
5123 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5124 if (ap->rxconfig & ANEG_CFG_NP)
5125 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5127 ap->link_time = ap->cur_time;
5129 ap->flags ^= (MR_TOGGLE_TX);
5130 if (ap->rxconfig & 0x0008)
5131 ap->flags |= MR_TOGGLE_RX;
5132 if (ap->rxconfig & ANEG_CFG_NP)
5133 ap->flags |= MR_NP_RX;
5134 ap->flags |= MR_PAGE_RX;
5136 ap->state = ANEG_STATE_COMPLETE_ACK;
5137 ret = ANEG_TIMER_ENAB;
5140 case ANEG_STATE_COMPLETE_ACK:
5141 if (ap->ability_match != 0 &&
5142 ap->rxconfig == 0) {
5143 ap->state = ANEG_STATE_AN_ENABLE;
5146 delta = ap->cur_time - ap->link_time;
5147 if (delta > ANEG_STATE_SETTLE_TIME) {
5148 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5149 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5151 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5152 !(ap->flags & MR_NP_RX)) {
5153 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5161 case ANEG_STATE_IDLE_DETECT_INIT:
5162 ap->link_time = ap->cur_time;
5163 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5164 tw32_f(MAC_MODE, tp->mac_mode);
5167 ap->state = ANEG_STATE_IDLE_DETECT;
5168 ret = ANEG_TIMER_ENAB;
5171 case ANEG_STATE_IDLE_DETECT:
5172 if (ap->ability_match != 0 &&
5173 ap->rxconfig == 0) {
5174 ap->state = ANEG_STATE_AN_ENABLE;
5177 delta = ap->cur_time - ap->link_time;
5178 if (delta > ANEG_STATE_SETTLE_TIME) {
5179 /* XXX another gem from the Broadcom driver :( */
5180 ap->state = ANEG_STATE_LINK_OK;
5184 case ANEG_STATE_LINK_OK:
5185 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5189 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5190 /* ??? unimplemented */
5193 case ANEG_STATE_NEXT_PAGE_WAIT:
5194 /* ??? unimplemented */
5205 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5208 struct tg3_fiber_aneginfo aninfo;
5209 int status = ANEG_FAILED;
5213 tw32_f(MAC_TX_AUTO_NEG, 0);
5215 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5216 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5219 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5222 memset(&aninfo, 0, sizeof(aninfo));
5223 aninfo.flags |= MR_AN_ENABLE;
5224 aninfo.state = ANEG_STATE_UNKNOWN;
5225 aninfo.cur_time = 0;
5227 while (++tick < 195000) {
5228 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5229 if (status == ANEG_DONE || status == ANEG_FAILED)
5235 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5236 tw32_f(MAC_MODE, tp->mac_mode);
5239 *txflags = aninfo.txconfig;
5240 *rxflags = aninfo.flags;
5242 if (status == ANEG_DONE &&
5243 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5244 MR_LP_ADV_FULL_DUPLEX)))
5250 static void tg3_init_bcm8002(struct tg3 *tp)
5252 u32 mac_status = tr32(MAC_STATUS);
5255 /* Reset when initting first time or we have a link. */
5256 if (tg3_flag(tp, INIT_COMPLETE) &&
5257 !(mac_status & MAC_STATUS_PCS_SYNCED))
5260 /* Set PLL lock range. */
5261 tg3_writephy(tp, 0x16, 0x8007);
5264 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5266 /* Wait for reset to complete. */
5267 /* XXX schedule_timeout() ... */
5268 for (i = 0; i < 500; i++)
5271 /* Config mode; select PMA/Ch 1 regs. */
5272 tg3_writephy(tp, 0x10, 0x8411);
5274 /* Enable auto-lock and comdet, select txclk for tx. */
5275 tg3_writephy(tp, 0x11, 0x0a10);
5277 tg3_writephy(tp, 0x18, 0x00a0);
5278 tg3_writephy(tp, 0x16, 0x41ff);
5280 /* Assert and deassert POR. */
5281 tg3_writephy(tp, 0x13, 0x0400);
5283 tg3_writephy(tp, 0x13, 0x0000);
5285 tg3_writephy(tp, 0x11, 0x0a50);
5287 tg3_writephy(tp, 0x11, 0x0a10);
5289 /* Wait for signal to stabilize */
5290 /* XXX schedule_timeout() ... */
5291 for (i = 0; i < 15000; i++)
5294 /* Deselect the channel register so we can read the PHYID
5297 tg3_writephy(tp, 0x10, 0x8011);
5300 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5303 u32 sg_dig_ctrl, sg_dig_status;
5304 u32 serdes_cfg, expected_sg_dig_ctrl;
5305 int workaround, port_a;
5306 int current_link_up;
5309 expected_sg_dig_ctrl = 0;
5312 current_link_up = 0;
5314 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5315 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5317 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5320 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5321 /* preserve bits 20-23 for voltage regulator */
5322 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5325 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5327 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5328 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5330 u32 val = serdes_cfg;
5336 tw32_f(MAC_SERDES_CFG, val);
5339 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5341 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5342 tg3_setup_flow_control(tp, 0, 0);
5343 current_link_up = 1;
5348 /* Want auto-negotiation. */
5349 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5351 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5352 if (flowctrl & ADVERTISE_1000XPAUSE)
5353 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5354 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5355 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5357 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5358 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5359 tp->serdes_counter &&
5360 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5361 MAC_STATUS_RCVD_CFG)) ==
5362 MAC_STATUS_PCS_SYNCED)) {
5363 tp->serdes_counter--;
5364 current_link_up = 1;
5369 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5370 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5372 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5374 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5375 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5376 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5377 MAC_STATUS_SIGNAL_DET)) {
5378 sg_dig_status = tr32(SG_DIG_STATUS);
5379 mac_status = tr32(MAC_STATUS);
5381 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5382 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5383 u32 local_adv = 0, remote_adv = 0;
5385 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5386 local_adv |= ADVERTISE_1000XPAUSE;
5387 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5388 local_adv |= ADVERTISE_1000XPSE_ASYM;
5390 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5391 remote_adv |= LPA_1000XPAUSE;
5392 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5393 remote_adv |= LPA_1000XPAUSE_ASYM;
5395 tp->link_config.rmt_adv =
5396 mii_adv_to_ethtool_adv_x(remote_adv);
5398 tg3_setup_flow_control(tp, local_adv, remote_adv);
5399 current_link_up = 1;
5400 tp->serdes_counter = 0;
5401 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5402 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5403 if (tp->serdes_counter)
5404 tp->serdes_counter--;
5407 u32 val = serdes_cfg;
5414 tw32_f(MAC_SERDES_CFG, val);
5417 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5420 /* Link parallel detection - link is up */
5421 /* only if we have PCS_SYNC and not */
5422 /* receiving config code words */
5423 mac_status = tr32(MAC_STATUS);
5424 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5425 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5426 tg3_setup_flow_control(tp, 0, 0);
5427 current_link_up = 1;
5429 TG3_PHYFLG_PARALLEL_DETECT;
5430 tp->serdes_counter =
5431 SERDES_PARALLEL_DET_TIMEOUT;
5433 goto restart_autoneg;
5437 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5438 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5442 return current_link_up;
5445 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5447 int current_link_up = 0;
5449 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5452 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5453 u32 txflags, rxflags;
5456 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5457 u32 local_adv = 0, remote_adv = 0;
5459 if (txflags & ANEG_CFG_PS1)
5460 local_adv |= ADVERTISE_1000XPAUSE;
5461 if (txflags & ANEG_CFG_PS2)
5462 local_adv |= ADVERTISE_1000XPSE_ASYM;
5464 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5465 remote_adv |= LPA_1000XPAUSE;
5466 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5467 remote_adv |= LPA_1000XPAUSE_ASYM;
5469 tp->link_config.rmt_adv =
5470 mii_adv_to_ethtool_adv_x(remote_adv);
5472 tg3_setup_flow_control(tp, local_adv, remote_adv);
5474 current_link_up = 1;
5476 for (i = 0; i < 30; i++) {
5479 (MAC_STATUS_SYNC_CHANGED |
5480 MAC_STATUS_CFG_CHANGED));
5482 if ((tr32(MAC_STATUS) &
5483 (MAC_STATUS_SYNC_CHANGED |
5484 MAC_STATUS_CFG_CHANGED)) == 0)
5488 mac_status = tr32(MAC_STATUS);
5489 if (current_link_up == 0 &&
5490 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5491 !(mac_status & MAC_STATUS_RCVD_CFG))
5492 current_link_up = 1;
5494 tg3_setup_flow_control(tp, 0, 0);
5496 /* Forcing 1000FD link up. */
5497 current_link_up = 1;
5499 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5502 tw32_f(MAC_MODE, tp->mac_mode);
5507 return current_link_up;
5510 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5513 u16 orig_active_speed;
5514 u8 orig_active_duplex;
5516 int current_link_up;
5519 orig_pause_cfg = tp->link_config.active_flowctrl;
5520 orig_active_speed = tp->link_config.active_speed;
5521 orig_active_duplex = tp->link_config.active_duplex;
5523 if (!tg3_flag(tp, HW_AUTONEG) &&
5525 tg3_flag(tp, INIT_COMPLETE)) {
5526 mac_status = tr32(MAC_STATUS);
5527 mac_status &= (MAC_STATUS_PCS_SYNCED |
5528 MAC_STATUS_SIGNAL_DET |
5529 MAC_STATUS_CFG_CHANGED |
5530 MAC_STATUS_RCVD_CFG);
5531 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5532 MAC_STATUS_SIGNAL_DET)) {
5533 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5534 MAC_STATUS_CFG_CHANGED));
5539 tw32_f(MAC_TX_AUTO_NEG, 0);
5541 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5542 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5543 tw32_f(MAC_MODE, tp->mac_mode);
5546 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5547 tg3_init_bcm8002(tp);
5549 /* Enable link change event even when serdes polling. */
5550 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5553 current_link_up = 0;
5554 tp->link_config.rmt_adv = 0;
5555 mac_status = tr32(MAC_STATUS);
5557 if (tg3_flag(tp, HW_AUTONEG))
5558 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5560 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5562 tp->napi[0].hw_status->status =
5563 (SD_STATUS_UPDATED |
5564 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5566 for (i = 0; i < 100; i++) {
5567 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5568 MAC_STATUS_CFG_CHANGED));
5570 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5571 MAC_STATUS_CFG_CHANGED |
5572 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5576 mac_status = tr32(MAC_STATUS);
5577 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5578 current_link_up = 0;
5579 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5580 tp->serdes_counter == 0) {
5581 tw32_f(MAC_MODE, (tp->mac_mode |
5582 MAC_MODE_SEND_CONFIGS));
5584 tw32_f(MAC_MODE, tp->mac_mode);
5588 if (current_link_up == 1) {
5589 tp->link_config.active_speed = SPEED_1000;
5590 tp->link_config.active_duplex = DUPLEX_FULL;
5591 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5592 LED_CTRL_LNKLED_OVERRIDE |
5593 LED_CTRL_1000MBPS_ON));
5595 tp->link_config.active_speed = SPEED_UNKNOWN;
5596 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5597 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5598 LED_CTRL_LNKLED_OVERRIDE |
5599 LED_CTRL_TRAFFIC_OVERRIDE));
5602 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5603 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5604 if (orig_pause_cfg != now_pause_cfg ||
5605 orig_active_speed != tp->link_config.active_speed ||
5606 orig_active_duplex != tp->link_config.active_duplex)
5607 tg3_link_report(tp);
5613 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5615 int current_link_up = 0, err = 0;
5617 u16 current_speed = SPEED_UNKNOWN;
5618 u8 current_duplex = DUPLEX_UNKNOWN;
5619 u32 local_adv, remote_adv, sgsr;
5621 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5622 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5623 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5624 (sgsr & SERDES_TG3_SGMII_MODE)) {
5629 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5631 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5632 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5634 current_link_up = 1;
5635 if (sgsr & SERDES_TG3_SPEED_1000) {
5636 current_speed = SPEED_1000;
5637 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5638 } else if (sgsr & SERDES_TG3_SPEED_100) {
5639 current_speed = SPEED_100;
5640 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5642 current_speed = SPEED_10;
5643 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5646 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5647 current_duplex = DUPLEX_FULL;
5649 current_duplex = DUPLEX_HALF;
5652 tw32_f(MAC_MODE, tp->mac_mode);
5655 tg3_clear_mac_status(tp);
5657 goto fiber_setup_done;
5660 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5661 tw32_f(MAC_MODE, tp->mac_mode);
5664 tg3_clear_mac_status(tp);
5669 tp->link_config.rmt_adv = 0;
5671 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5672 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5673 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5674 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5675 bmsr |= BMSR_LSTATUS;
5677 bmsr &= ~BMSR_LSTATUS;
5680 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5682 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5683 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5684 /* do nothing, just check for link up at the end */
5685 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5688 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5689 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5690 ADVERTISE_1000XPAUSE |
5691 ADVERTISE_1000XPSE_ASYM |
5694 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5695 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5697 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5698 tg3_writephy(tp, MII_ADVERTISE, newadv);
5699 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5700 tg3_writephy(tp, MII_BMCR, bmcr);
5702 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5703 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5704 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5711 bmcr &= ~BMCR_SPEED1000;
5712 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5714 if (tp->link_config.duplex == DUPLEX_FULL)
5715 new_bmcr |= BMCR_FULLDPLX;
5717 if (new_bmcr != bmcr) {
5718 /* BMCR_SPEED1000 is a reserved bit that needs
5719 * to be set on write.
5721 new_bmcr |= BMCR_SPEED1000;
5723 /* Force a linkdown */
5727 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5728 adv &= ~(ADVERTISE_1000XFULL |
5729 ADVERTISE_1000XHALF |
5731 tg3_writephy(tp, MII_ADVERTISE, adv);
5732 tg3_writephy(tp, MII_BMCR, bmcr |
5736 tg3_carrier_off(tp);
5738 tg3_writephy(tp, MII_BMCR, new_bmcr);
5740 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5741 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5742 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5743 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5744 bmsr |= BMSR_LSTATUS;
5746 bmsr &= ~BMSR_LSTATUS;
5748 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5752 if (bmsr & BMSR_LSTATUS) {
5753 current_speed = SPEED_1000;
5754 current_link_up = 1;
5755 if (bmcr & BMCR_FULLDPLX)
5756 current_duplex = DUPLEX_FULL;
5758 current_duplex = DUPLEX_HALF;
5763 if (bmcr & BMCR_ANENABLE) {
5766 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5767 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5768 common = local_adv & remote_adv;
5769 if (common & (ADVERTISE_1000XHALF |
5770 ADVERTISE_1000XFULL)) {
5771 if (common & ADVERTISE_1000XFULL)
5772 current_duplex = DUPLEX_FULL;
5774 current_duplex = DUPLEX_HALF;
5776 tp->link_config.rmt_adv =
5777 mii_adv_to_ethtool_adv_x(remote_adv);
5778 } else if (!tg3_flag(tp, 5780_CLASS)) {
5779 /* Link is up via parallel detect */
5781 current_link_up = 0;
5787 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5788 tg3_setup_flow_control(tp, local_adv, remote_adv);
5790 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5791 if (tp->link_config.active_duplex == DUPLEX_HALF)
5792 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5794 tw32_f(MAC_MODE, tp->mac_mode);
5797 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5799 tp->link_config.active_speed = current_speed;
5800 tp->link_config.active_duplex = current_duplex;
5802 tg3_test_and_report_link_chg(tp, current_link_up);
5806 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5808 if (tp->serdes_counter) {
5809 /* Give autoneg time to complete. */
5810 tp->serdes_counter--;
5815 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5818 tg3_readphy(tp, MII_BMCR, &bmcr);
5819 if (bmcr & BMCR_ANENABLE) {
5822 /* Select shadow register 0x1f */
5823 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5824 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5826 /* Select expansion interrupt status register */
5827 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5828 MII_TG3_DSP_EXP1_INT_STAT);
5829 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5830 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5832 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5833 /* We have signal detect and not receiving
5834 * config code words, link is up by parallel
5838 bmcr &= ~BMCR_ANENABLE;
5839 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5840 tg3_writephy(tp, MII_BMCR, bmcr);
5841 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5844 } else if (tp->link_up &&
5845 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5846 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5849 /* Select expansion interrupt status register */
5850 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5851 MII_TG3_DSP_EXP1_INT_STAT);
5852 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5856 /* Config code words received, turn on autoneg. */
5857 tg3_readphy(tp, MII_BMCR, &bmcr);
5858 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5860 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5866 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5871 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5872 err = tg3_setup_fiber_phy(tp, force_reset);
5873 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5874 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5876 err = tg3_setup_copper_phy(tp, force_reset);
5878 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5881 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5882 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5884 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5889 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5890 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5891 tw32(GRC_MISC_CFG, val);
5894 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5895 (6 << TX_LENGTHS_IPG_SHIFT);
5896 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5897 tg3_asic_rev(tp) == ASIC_REV_5762)
5898 val |= tr32(MAC_TX_LENGTHS) &
5899 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5900 TX_LENGTHS_CNT_DWN_VAL_MSK);
5902 if (tp->link_config.active_speed == SPEED_1000 &&
5903 tp->link_config.active_duplex == DUPLEX_HALF)
5904 tw32(MAC_TX_LENGTHS, val |
5905 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5907 tw32(MAC_TX_LENGTHS, val |
5908 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5910 if (!tg3_flag(tp, 5705_PLUS)) {
5912 tw32(HOSTCC_STAT_COAL_TICKS,
5913 tp->coal.stats_block_coalesce_usecs);
5915 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5919 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5920 val = tr32(PCIE_PWR_MGMT_THRESH);
5922 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5925 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5926 tw32(PCIE_PWR_MGMT_THRESH, val);
5932 /* tp->lock must be held */
5933 static u64 tg3_refclk_read(struct tg3 *tp)
5935 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5936 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5939 /* tp->lock must be held */
5940 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5942 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5943 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5944 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5945 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5948 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5949 static inline void tg3_full_unlock(struct tg3 *tp);
5950 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5952 struct tg3 *tp = netdev_priv(dev);
5954 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5955 SOF_TIMESTAMPING_RX_SOFTWARE |
5956 SOF_TIMESTAMPING_SOFTWARE |
5957 SOF_TIMESTAMPING_TX_HARDWARE |
5958 SOF_TIMESTAMPING_RX_HARDWARE |
5959 SOF_TIMESTAMPING_RAW_HARDWARE;
5962 info->phc_index = ptp_clock_index(tp->ptp_clock);
5964 info->phc_index = -1;
5966 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5968 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5969 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5970 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5971 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5975 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5977 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5978 bool neg_adj = false;
5986 /* Frequency adjustment is performed using hardware with a 24 bit
5987 * accumulator and a programmable correction value. On each clk, the
5988 * correction value gets added to the accumulator and when it
5989 * overflows, the time counter is incremented/decremented.
5991 * So conversion from ppb to correction value is
5992 * ppb * (1 << 24) / 1000000000
5994 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5995 TG3_EAV_REF_CLK_CORRECT_MASK;
5997 tg3_full_lock(tp, 0);
6000 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6001 TG3_EAV_REF_CLK_CORRECT_EN |
6002 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6004 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6006 tg3_full_unlock(tp);
6011 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6013 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6015 tg3_full_lock(tp, 0);
6016 tp->ptp_adjust += delta;
6017 tg3_full_unlock(tp);
6022 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6026 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6028 tg3_full_lock(tp, 0);
6029 ns = tg3_refclk_read(tp);
6030 ns += tp->ptp_adjust;
6031 tg3_full_unlock(tp);
6033 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6034 ts->tv_nsec = remainder;
6039 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6040 const struct timespec *ts)
6043 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6045 ns = timespec_to_ns(ts);
6047 tg3_full_lock(tp, 0);
6048 tg3_refclk_write(tp, ns);
6050 tg3_full_unlock(tp);
6055 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6056 struct ptp_clock_request *rq, int on)
6061 static const struct ptp_clock_info tg3_ptp_caps = {
6062 .owner = THIS_MODULE,
6063 .name = "tg3 clock",
6064 .max_adj = 250000000,
6069 .adjfreq = tg3_ptp_adjfreq,
6070 .adjtime = tg3_ptp_adjtime,
6071 .gettime = tg3_ptp_gettime,
6072 .settime = tg3_ptp_settime,
6073 .enable = tg3_ptp_enable,
6076 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6077 struct skb_shared_hwtstamps *timestamp)
6079 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6080 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6084 /* tp->lock must be held */
6085 static void tg3_ptp_init(struct tg3 *tp)
6087 if (!tg3_flag(tp, PTP_CAPABLE))
6090 /* Initialize the hardware clock to the system time. */
6091 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6093 tp->ptp_info = tg3_ptp_caps;
6096 /* tp->lock must be held */
6097 static void tg3_ptp_resume(struct tg3 *tp)
6099 if (!tg3_flag(tp, PTP_CAPABLE))
6102 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6106 static void tg3_ptp_fini(struct tg3 *tp)
6108 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6111 ptp_clock_unregister(tp->ptp_clock);
6112 tp->ptp_clock = NULL;
6116 static inline int tg3_irq_sync(struct tg3 *tp)
6118 return tp->irq_sync;
6121 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6125 dst = (u32 *)((u8 *)dst + off);
6126 for (i = 0; i < len; i += sizeof(u32))
6127 *dst++ = tr32(off + i);
6130 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6132 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6133 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6134 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6135 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6136 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6137 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6138 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6139 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6140 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6141 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6142 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6143 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6144 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6145 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6146 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6147 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6148 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6149 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6150 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6152 if (tg3_flag(tp, SUPPORT_MSIX))
6153 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6155 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6156 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6157 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6158 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6159 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6160 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6161 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6162 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6164 if (!tg3_flag(tp, 5705_PLUS)) {
6165 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6166 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6167 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6170 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6171 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6172 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6173 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6174 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6176 if (tg3_flag(tp, NVRAM))
6177 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6180 static void tg3_dump_state(struct tg3 *tp)
6185 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6189 if (tg3_flag(tp, PCI_EXPRESS)) {
6190 /* Read up to but not including private PCI registers */
6191 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6192 regs[i / sizeof(u32)] = tr32(i);
6194 tg3_dump_legacy_regs(tp, regs);
6196 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6197 if (!regs[i + 0] && !regs[i + 1] &&
6198 !regs[i + 2] && !regs[i + 3])
6201 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6203 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6208 for (i = 0; i < tp->irq_cnt; i++) {
6209 struct tg3_napi *tnapi = &tp->napi[i];
6211 /* SW status block */
6213 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6215 tnapi->hw_status->status,
6216 tnapi->hw_status->status_tag,
6217 tnapi->hw_status->rx_jumbo_consumer,
6218 tnapi->hw_status->rx_consumer,
6219 tnapi->hw_status->rx_mini_consumer,
6220 tnapi->hw_status->idx[0].rx_producer,
6221 tnapi->hw_status->idx[0].tx_consumer);
6224 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6226 tnapi->last_tag, tnapi->last_irq_tag,
6227 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6229 tnapi->prodring.rx_std_prod_idx,
6230 tnapi->prodring.rx_std_cons_idx,
6231 tnapi->prodring.rx_jmb_prod_idx,
6232 tnapi->prodring.rx_jmb_cons_idx);
6236 /* This is called whenever we suspect that the system chipset is re-
6237 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6238 * is bogus tx completions. We try to recover by setting the
6239 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6242 static void tg3_tx_recover(struct tg3 *tp)
6244 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6245 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6247 netdev_warn(tp->dev,
6248 "The system may be re-ordering memory-mapped I/O "
6249 "cycles to the network device, attempting to recover. "
6250 "Please report the problem to the driver maintainer "
6251 "and include system chipset information.\n");
6253 spin_lock(&tp->lock);
6254 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6255 spin_unlock(&tp->lock);
6258 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6260 /* Tell compiler to fetch tx indices from memory. */
6262 return tnapi->tx_pending -
6263 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6266 /* Tigon3 never reports partial packet sends. So we do not
6267 * need special logic to handle SKBs that have not had all
6268 * of their frags sent yet, like SunGEM does.
6270 static void tg3_tx(struct tg3_napi *tnapi)
6272 struct tg3 *tp = tnapi->tp;
6273 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6274 u32 sw_idx = tnapi->tx_cons;
6275 struct netdev_queue *txq;
6276 int index = tnapi - tp->napi;
6277 unsigned int pkts_compl = 0, bytes_compl = 0;
6279 if (tg3_flag(tp, ENABLE_TSS))
6282 txq = netdev_get_tx_queue(tp->dev, index);
6284 while (sw_idx != hw_idx) {
6285 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6286 struct sk_buff *skb = ri->skb;
6289 if (unlikely(skb == NULL)) {
6294 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6295 struct skb_shared_hwtstamps timestamp;
6296 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6297 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6299 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6301 skb_tstamp_tx(skb, ×tamp);
6304 pci_unmap_single(tp->pdev,
6305 dma_unmap_addr(ri, mapping),
6311 while (ri->fragmented) {
6312 ri->fragmented = false;
6313 sw_idx = NEXT_TX(sw_idx);
6314 ri = &tnapi->tx_buffers[sw_idx];
6317 sw_idx = NEXT_TX(sw_idx);
6319 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6320 ri = &tnapi->tx_buffers[sw_idx];
6321 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6324 pci_unmap_page(tp->pdev,
6325 dma_unmap_addr(ri, mapping),
6326 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6329 while (ri->fragmented) {
6330 ri->fragmented = false;
6331 sw_idx = NEXT_TX(sw_idx);
6332 ri = &tnapi->tx_buffers[sw_idx];
6335 sw_idx = NEXT_TX(sw_idx);
6339 bytes_compl += skb->len;
6343 if (unlikely(tx_bug)) {
6349 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6351 tnapi->tx_cons = sw_idx;
6353 /* Need to make the tx_cons update visible to tg3_start_xmit()
6354 * before checking for netif_queue_stopped(). Without the
6355 * memory barrier, there is a small possibility that tg3_start_xmit()
6356 * will miss it and cause the queue to be stopped forever.
6360 if (unlikely(netif_tx_queue_stopped(txq) &&
6361 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6362 __netif_tx_lock(txq, smp_processor_id());
6363 if (netif_tx_queue_stopped(txq) &&
6364 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6365 netif_tx_wake_queue(txq);
6366 __netif_tx_unlock(txq);
6370 static void tg3_frag_free(bool is_frag, void *data)
6373 put_page(virt_to_head_page(data));
6378 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6380 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6381 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6386 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6387 map_sz, PCI_DMA_FROMDEVICE);
6388 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6393 /* Returns size of skb allocated or < 0 on error.
6395 * We only need to fill in the address because the other members
6396 * of the RX descriptor are invariant, see tg3_init_rings.
6398 * Note the purposeful assymetry of cpu vs. chip accesses. For
6399 * posting buffers we only dirty the first cache line of the RX
6400 * descriptor (containing the address). Whereas for the RX status
6401 * buffers the cpu only reads the last cacheline of the RX descriptor
6402 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6404 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6405 u32 opaque_key, u32 dest_idx_unmasked,
6406 unsigned int *frag_size)
6408 struct tg3_rx_buffer_desc *desc;
6409 struct ring_info *map;
6412 int skb_size, data_size, dest_idx;
6414 switch (opaque_key) {
6415 case RXD_OPAQUE_RING_STD:
6416 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6417 desc = &tpr->rx_std[dest_idx];
6418 map = &tpr->rx_std_buffers[dest_idx];
6419 data_size = tp->rx_pkt_map_sz;
6422 case RXD_OPAQUE_RING_JUMBO:
6423 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6424 desc = &tpr->rx_jmb[dest_idx].std;
6425 map = &tpr->rx_jmb_buffers[dest_idx];
6426 data_size = TG3_RX_JMB_MAP_SZ;
6433 /* Do not overwrite any of the map or rp information
6434 * until we are sure we can commit to a new buffer.
6436 * Callers depend upon this behavior and assume that
6437 * we leave everything unchanged if we fail.
6439 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6440 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6441 if (skb_size <= PAGE_SIZE) {
6442 data = netdev_alloc_frag(skb_size);
6443 *frag_size = skb_size;
6445 data = kmalloc(skb_size, GFP_ATOMIC);
6451 mapping = pci_map_single(tp->pdev,
6452 data + TG3_RX_OFFSET(tp),
6454 PCI_DMA_FROMDEVICE);
6455 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6456 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6461 dma_unmap_addr_set(map, mapping, mapping);
6463 desc->addr_hi = ((u64)mapping >> 32);
6464 desc->addr_lo = ((u64)mapping & 0xffffffff);
6469 /* We only need to move over in the address because the other
6470 * members of the RX descriptor are invariant. See notes above
6471 * tg3_alloc_rx_data for full details.
6473 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6474 struct tg3_rx_prodring_set *dpr,
6475 u32 opaque_key, int src_idx,
6476 u32 dest_idx_unmasked)
6478 struct tg3 *tp = tnapi->tp;
6479 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6480 struct ring_info *src_map, *dest_map;
6481 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6484 switch (opaque_key) {
6485 case RXD_OPAQUE_RING_STD:
6486 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6487 dest_desc = &dpr->rx_std[dest_idx];
6488 dest_map = &dpr->rx_std_buffers[dest_idx];
6489 src_desc = &spr->rx_std[src_idx];
6490 src_map = &spr->rx_std_buffers[src_idx];
6493 case RXD_OPAQUE_RING_JUMBO:
6494 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6495 dest_desc = &dpr->rx_jmb[dest_idx].std;
6496 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6497 src_desc = &spr->rx_jmb[src_idx].std;
6498 src_map = &spr->rx_jmb_buffers[src_idx];
6505 dest_map->data = src_map->data;
6506 dma_unmap_addr_set(dest_map, mapping,
6507 dma_unmap_addr(src_map, mapping));
6508 dest_desc->addr_hi = src_desc->addr_hi;
6509 dest_desc->addr_lo = src_desc->addr_lo;
6511 /* Ensure that the update to the skb happens after the physical
6512 * addresses have been transferred to the new BD location.
6516 src_map->data = NULL;
6519 /* The RX ring scheme is composed of multiple rings which post fresh
6520 * buffers to the chip, and one special ring the chip uses to report
6521 * status back to the host.
6523 * The special ring reports the status of received packets to the
6524 * host. The chip does not write into the original descriptor the
6525 * RX buffer was obtained from. The chip simply takes the original
6526 * descriptor as provided by the host, updates the status and length
6527 * field, then writes this into the next status ring entry.
6529 * Each ring the host uses to post buffers to the chip is described
6530 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6531 * it is first placed into the on-chip ram. When the packet's length
6532 * is known, it walks down the TG3_BDINFO entries to select the ring.
6533 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6534 * which is within the range of the new packet's length is chosen.
6536 * The "separate ring for rx status" scheme may sound queer, but it makes
6537 * sense from a cache coherency perspective. If only the host writes
6538 * to the buffer post rings, and only the chip writes to the rx status
6539 * rings, then cache lines never move beyond shared-modified state.
6540 * If both the host and chip were to write into the same ring, cache line
6541 * eviction could occur since both entities want it in an exclusive state.
6543 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6545 struct tg3 *tp = tnapi->tp;
6546 u32 work_mask, rx_std_posted = 0;
6547 u32 std_prod_idx, jmb_prod_idx;
6548 u32 sw_idx = tnapi->rx_rcb_ptr;
6551 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6553 hw_idx = *(tnapi->rx_rcb_prod_idx);
6555 * We need to order the read of hw_idx and the read of
6556 * the opaque cookie.
6561 std_prod_idx = tpr->rx_std_prod_idx;
6562 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6563 while (sw_idx != hw_idx && budget > 0) {
6564 struct ring_info *ri;
6565 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6567 struct sk_buff *skb;
6568 dma_addr_t dma_addr;
6569 u32 opaque_key, desc_idx, *post_ptr;
6573 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6574 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6575 if (opaque_key == RXD_OPAQUE_RING_STD) {
6576 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6577 dma_addr = dma_unmap_addr(ri, mapping);
6579 post_ptr = &std_prod_idx;
6581 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6582 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6583 dma_addr = dma_unmap_addr(ri, mapping);
6585 post_ptr = &jmb_prod_idx;
6587 goto next_pkt_nopost;
6589 work_mask |= opaque_key;
6591 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6592 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6594 tg3_recycle_rx(tnapi, tpr, opaque_key,
6595 desc_idx, *post_ptr);
6597 /* Other statistics kept track of by card. */
6602 prefetch(data + TG3_RX_OFFSET(tp));
6603 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6606 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6607 RXD_FLAG_PTPSTAT_PTPV1 ||
6608 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6609 RXD_FLAG_PTPSTAT_PTPV2) {
6610 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6611 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6614 if (len > TG3_RX_COPY_THRESH(tp)) {
6616 unsigned int frag_size;
6618 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6619 *post_ptr, &frag_size);
6623 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6624 PCI_DMA_FROMDEVICE);
6626 skb = build_skb(data, frag_size);
6628 tg3_frag_free(frag_size != 0, data);
6629 goto drop_it_no_recycle;
6631 skb_reserve(skb, TG3_RX_OFFSET(tp));
6632 /* Ensure that the update to the data happens
6633 * after the usage of the old DMA mapping.
6640 tg3_recycle_rx(tnapi, tpr, opaque_key,
6641 desc_idx, *post_ptr);
6643 skb = netdev_alloc_skb(tp->dev,
6644 len + TG3_RAW_IP_ALIGN);
6646 goto drop_it_no_recycle;
6648 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6649 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6651 data + TG3_RX_OFFSET(tp),
6653 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6658 tg3_hwclock_to_timestamp(tp, tstamp,
6659 skb_hwtstamps(skb));
6661 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6662 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6663 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6664 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6665 skb->ip_summed = CHECKSUM_UNNECESSARY;
6667 skb_checksum_none_assert(skb);
6669 skb->protocol = eth_type_trans(skb, tp->dev);
6671 if (len > (tp->dev->mtu + ETH_HLEN) &&
6672 skb->protocol != htons(ETH_P_8021Q)) {
6674 goto drop_it_no_recycle;
6677 if (desc->type_flags & RXD_FLAG_VLAN &&
6678 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6679 __vlan_hwaccel_put_tag(skb,
6680 desc->err_vlan & RXD_VLAN_MASK);
6682 napi_gro_receive(&tnapi->napi, skb);
6690 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6691 tpr->rx_std_prod_idx = std_prod_idx &
6692 tp->rx_std_ring_mask;
6693 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6694 tpr->rx_std_prod_idx);
6695 work_mask &= ~RXD_OPAQUE_RING_STD;
6700 sw_idx &= tp->rx_ret_ring_mask;
6702 /* Refresh hw_idx to see if there is new work */
6703 if (sw_idx == hw_idx) {
6704 hw_idx = *(tnapi->rx_rcb_prod_idx);
6709 /* ACK the status ring. */
6710 tnapi->rx_rcb_ptr = sw_idx;
6711 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6713 /* Refill RX ring(s). */
6714 if (!tg3_flag(tp, ENABLE_RSS)) {
6715 /* Sync BD data before updating mailbox */
6718 if (work_mask & RXD_OPAQUE_RING_STD) {
6719 tpr->rx_std_prod_idx = std_prod_idx &
6720 tp->rx_std_ring_mask;
6721 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6722 tpr->rx_std_prod_idx);
6724 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6725 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6726 tp->rx_jmb_ring_mask;
6727 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6728 tpr->rx_jmb_prod_idx);
6731 } else if (work_mask) {
6732 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6733 * updated before the producer indices can be updated.
6737 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6738 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6740 if (tnapi != &tp->napi[1]) {
6741 tp->rx_refill = true;
6742 napi_schedule(&tp->napi[1].napi);
6749 static void tg3_poll_link(struct tg3 *tp)
6751 /* handle link change and other phy events */
6752 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6753 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6755 if (sblk->status & SD_STATUS_LINK_CHG) {
6756 sblk->status = SD_STATUS_UPDATED |
6757 (sblk->status & ~SD_STATUS_LINK_CHG);
6758 spin_lock(&tp->lock);
6759 if (tg3_flag(tp, USE_PHYLIB)) {
6761 (MAC_STATUS_SYNC_CHANGED |
6762 MAC_STATUS_CFG_CHANGED |
6763 MAC_STATUS_MI_COMPLETION |
6764 MAC_STATUS_LNKSTATE_CHANGED));
6767 tg3_setup_phy(tp, 0);
6768 spin_unlock(&tp->lock);
6773 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6774 struct tg3_rx_prodring_set *dpr,
6775 struct tg3_rx_prodring_set *spr)
6777 u32 si, di, cpycnt, src_prod_idx;
6781 src_prod_idx = spr->rx_std_prod_idx;
6783 /* Make sure updates to the rx_std_buffers[] entries and the
6784 * standard producer index are seen in the correct order.
6788 if (spr->rx_std_cons_idx == src_prod_idx)
6791 if (spr->rx_std_cons_idx < src_prod_idx)
6792 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6794 cpycnt = tp->rx_std_ring_mask + 1 -
6795 spr->rx_std_cons_idx;
6797 cpycnt = min(cpycnt,
6798 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6800 si = spr->rx_std_cons_idx;
6801 di = dpr->rx_std_prod_idx;
6803 for (i = di; i < di + cpycnt; i++) {
6804 if (dpr->rx_std_buffers[i].data) {
6814 /* Ensure that updates to the rx_std_buffers ring and the
6815 * shadowed hardware producer ring from tg3_recycle_skb() are
6816 * ordered correctly WRT the skb check above.
6820 memcpy(&dpr->rx_std_buffers[di],
6821 &spr->rx_std_buffers[si],
6822 cpycnt * sizeof(struct ring_info));
6824 for (i = 0; i < cpycnt; i++, di++, si++) {
6825 struct tg3_rx_buffer_desc *sbd, *dbd;
6826 sbd = &spr->rx_std[si];
6827 dbd = &dpr->rx_std[di];
6828 dbd->addr_hi = sbd->addr_hi;
6829 dbd->addr_lo = sbd->addr_lo;
6832 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6833 tp->rx_std_ring_mask;
6834 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6835 tp->rx_std_ring_mask;
6839 src_prod_idx = spr->rx_jmb_prod_idx;
6841 /* Make sure updates to the rx_jmb_buffers[] entries and
6842 * the jumbo producer index are seen in the correct order.
6846 if (spr->rx_jmb_cons_idx == src_prod_idx)
6849 if (spr->rx_jmb_cons_idx < src_prod_idx)
6850 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6852 cpycnt = tp->rx_jmb_ring_mask + 1 -
6853 spr->rx_jmb_cons_idx;
6855 cpycnt = min(cpycnt,
6856 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6858 si = spr->rx_jmb_cons_idx;
6859 di = dpr->rx_jmb_prod_idx;
6861 for (i = di; i < di + cpycnt; i++) {
6862 if (dpr->rx_jmb_buffers[i].data) {
6872 /* Ensure that updates to the rx_jmb_buffers ring and the
6873 * shadowed hardware producer ring from tg3_recycle_skb() are
6874 * ordered correctly WRT the skb check above.
6878 memcpy(&dpr->rx_jmb_buffers[di],
6879 &spr->rx_jmb_buffers[si],
6880 cpycnt * sizeof(struct ring_info));
6882 for (i = 0; i < cpycnt; i++, di++, si++) {
6883 struct tg3_rx_buffer_desc *sbd, *dbd;
6884 sbd = &spr->rx_jmb[si].std;
6885 dbd = &dpr->rx_jmb[di].std;
6886 dbd->addr_hi = sbd->addr_hi;
6887 dbd->addr_lo = sbd->addr_lo;
6890 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6891 tp->rx_jmb_ring_mask;
6892 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6893 tp->rx_jmb_ring_mask;
6899 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6901 struct tg3 *tp = tnapi->tp;
6903 /* run TX completion thread */
6904 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6906 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6910 if (!tnapi->rx_rcb_prod_idx)
6913 /* run RX thread, within the bounds set by NAPI.
6914 * All RX "locking" is done by ensuring outside
6915 * code synchronizes with tg3->napi.poll()
6917 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6918 work_done += tg3_rx(tnapi, budget - work_done);
6920 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6921 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6923 u32 std_prod_idx = dpr->rx_std_prod_idx;
6924 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6926 tp->rx_refill = false;
6927 for (i = 1; i <= tp->rxq_cnt; i++)
6928 err |= tg3_rx_prodring_xfer(tp, dpr,
6929 &tp->napi[i].prodring);
6933 if (std_prod_idx != dpr->rx_std_prod_idx)
6934 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6935 dpr->rx_std_prod_idx);
6937 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6938 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6939 dpr->rx_jmb_prod_idx);
6944 tw32_f(HOSTCC_MODE, tp->coal_now);
6950 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6952 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6953 schedule_work(&tp->reset_task);
6956 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6958 cancel_work_sync(&tp->reset_task);
6959 tg3_flag_clear(tp, RESET_TASK_PENDING);
6960 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6963 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6965 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6966 struct tg3 *tp = tnapi->tp;
6968 struct tg3_hw_status *sblk = tnapi->hw_status;
6971 work_done = tg3_poll_work(tnapi, work_done, budget);
6973 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6976 if (unlikely(work_done >= budget))
6979 /* tp->last_tag is used in tg3_int_reenable() below
6980 * to tell the hw how much work has been processed,
6981 * so we must read it before checking for more work.
6983 tnapi->last_tag = sblk->status_tag;
6984 tnapi->last_irq_tag = tnapi->last_tag;
6987 /* check for RX/TX work to do */
6988 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6989 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6991 /* This test here is not race free, but will reduce
6992 * the number of interrupts by looping again.
6994 if (tnapi == &tp->napi[1] && tp->rx_refill)
6997 napi_complete(napi);
6998 /* Reenable interrupts. */
6999 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7001 /* This test here is synchronized by napi_schedule()
7002 * and napi_complete() to close the race condition.
7004 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7005 tw32(HOSTCC_MODE, tp->coalesce_mode |
7006 HOSTCC_MODE_ENABLE |
7017 /* work_done is guaranteed to be less than budget. */
7018 napi_complete(napi);
7019 tg3_reset_task_schedule(tp);
7023 static void tg3_process_error(struct tg3 *tp)
7026 bool real_error = false;
7028 if (tg3_flag(tp, ERROR_PROCESSED))
7031 /* Check Flow Attention register */
7032 val = tr32(HOSTCC_FLOW_ATTN);
7033 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7034 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7038 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7039 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7043 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7044 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7053 tg3_flag_set(tp, ERROR_PROCESSED);
7054 tg3_reset_task_schedule(tp);
7057 static int tg3_poll(struct napi_struct *napi, int budget)
7059 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7060 struct tg3 *tp = tnapi->tp;
7062 struct tg3_hw_status *sblk = tnapi->hw_status;
7065 if (sblk->status & SD_STATUS_ERROR)
7066 tg3_process_error(tp);
7070 work_done = tg3_poll_work(tnapi, work_done, budget);
7072 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7075 if (unlikely(work_done >= budget))
7078 if (tg3_flag(tp, TAGGED_STATUS)) {
7079 /* tp->last_tag is used in tg3_int_reenable() below
7080 * to tell the hw how much work has been processed,
7081 * so we must read it before checking for more work.
7083 tnapi->last_tag = sblk->status_tag;
7084 tnapi->last_irq_tag = tnapi->last_tag;
7087 sblk->status &= ~SD_STATUS_UPDATED;
7089 if (likely(!tg3_has_work(tnapi))) {
7090 napi_complete(napi);
7091 tg3_int_reenable(tnapi);
7099 /* work_done is guaranteed to be less than budget. */
7100 napi_complete(napi);
7101 tg3_reset_task_schedule(tp);
7105 static void tg3_napi_disable(struct tg3 *tp)
7109 for (i = tp->irq_cnt - 1; i >= 0; i--)
7110 napi_disable(&tp->napi[i].napi);
7113 static void tg3_napi_enable(struct tg3 *tp)
7117 for (i = 0; i < tp->irq_cnt; i++)
7118 napi_enable(&tp->napi[i].napi);
7121 static void tg3_napi_init(struct tg3 *tp)
7125 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7126 for (i = 1; i < tp->irq_cnt; i++)
7127 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7130 static void tg3_napi_fini(struct tg3 *tp)
7134 for (i = 0; i < tp->irq_cnt; i++)
7135 netif_napi_del(&tp->napi[i].napi);
7138 static inline void tg3_netif_stop(struct tg3 *tp)
7140 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7141 tg3_napi_disable(tp);
7142 netif_carrier_off(tp->dev);
7143 netif_tx_disable(tp->dev);
7146 /* tp->lock must be held */
7147 static inline void tg3_netif_start(struct tg3 *tp)
7151 /* NOTE: unconditional netif_tx_wake_all_queues is only
7152 * appropriate so long as all callers are assured to
7153 * have free tx slots (such as after tg3_init_hw)
7155 netif_tx_wake_all_queues(tp->dev);
7158 netif_carrier_on(tp->dev);
7160 tg3_napi_enable(tp);
7161 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7162 tg3_enable_ints(tp);
7165 static void tg3_irq_quiesce(struct tg3 *tp)
7169 BUG_ON(tp->irq_sync);
7174 for (i = 0; i < tp->irq_cnt; i++)
7175 synchronize_irq(tp->napi[i].irq_vec);
7178 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7179 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7180 * with as well. Most of the time, this is not necessary except when
7181 * shutting down the device.
7183 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7185 spin_lock_bh(&tp->lock);
7187 tg3_irq_quiesce(tp);
7190 static inline void tg3_full_unlock(struct tg3 *tp)
7192 spin_unlock_bh(&tp->lock);
7195 /* One-shot MSI handler - Chip automatically disables interrupt
7196 * after sending MSI so driver doesn't have to do it.
7198 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7200 struct tg3_napi *tnapi = dev_id;
7201 struct tg3 *tp = tnapi->tp;
7203 prefetch(tnapi->hw_status);
7205 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7207 if (likely(!tg3_irq_sync(tp)))
7208 napi_schedule(&tnapi->napi);
7213 /* MSI ISR - No need to check for interrupt sharing and no need to
7214 * flush status block and interrupt mailbox. PCI ordering rules
7215 * guarantee that MSI will arrive after the status block.
7217 static irqreturn_t tg3_msi(int irq, void *dev_id)
7219 struct tg3_napi *tnapi = dev_id;
7220 struct tg3 *tp = tnapi->tp;
7222 prefetch(tnapi->hw_status);
7224 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7226 * Writing any value to intr-mbox-0 clears PCI INTA# and
7227 * chip-internal interrupt pending events.
7228 * Writing non-zero to intr-mbox-0 additional tells the
7229 * NIC to stop sending us irqs, engaging "in-intr-handler"
7232 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7233 if (likely(!tg3_irq_sync(tp)))
7234 napi_schedule(&tnapi->napi);
7236 return IRQ_RETVAL(1);
7239 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7241 struct tg3_napi *tnapi = dev_id;
7242 struct tg3 *tp = tnapi->tp;
7243 struct tg3_hw_status *sblk = tnapi->hw_status;
7244 unsigned int handled = 1;
7246 /* In INTx mode, it is possible for the interrupt to arrive at
7247 * the CPU before the status block posted prior to the interrupt.
7248 * Reading the PCI State register will confirm whether the
7249 * interrupt is ours and will flush the status block.
7251 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7252 if (tg3_flag(tp, CHIP_RESETTING) ||
7253 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7260 * Writing any value to intr-mbox-0 clears PCI INTA# and
7261 * chip-internal interrupt pending events.
7262 * Writing non-zero to intr-mbox-0 additional tells the
7263 * NIC to stop sending us irqs, engaging "in-intr-handler"
7266 * Flush the mailbox to de-assert the IRQ immediately to prevent
7267 * spurious interrupts. The flush impacts performance but
7268 * excessive spurious interrupts can be worse in some cases.
7270 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7271 if (tg3_irq_sync(tp))
7273 sblk->status &= ~SD_STATUS_UPDATED;
7274 if (likely(tg3_has_work(tnapi))) {
7275 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7276 napi_schedule(&tnapi->napi);
7278 /* No work, shared interrupt perhaps? re-enable
7279 * interrupts, and flush that PCI write
7281 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7285 return IRQ_RETVAL(handled);
7288 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7290 struct tg3_napi *tnapi = dev_id;
7291 struct tg3 *tp = tnapi->tp;
7292 struct tg3_hw_status *sblk = tnapi->hw_status;
7293 unsigned int handled = 1;
7295 /* In INTx mode, it is possible for the interrupt to arrive at
7296 * the CPU before the status block posted prior to the interrupt.
7297 * Reading the PCI State register will confirm whether the
7298 * interrupt is ours and will flush the status block.
7300 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7301 if (tg3_flag(tp, CHIP_RESETTING) ||
7302 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7309 * writing any value to intr-mbox-0 clears PCI INTA# and
7310 * chip-internal interrupt pending events.
7311 * writing non-zero to intr-mbox-0 additional tells the
7312 * NIC to stop sending us irqs, engaging "in-intr-handler"
7315 * Flush the mailbox to de-assert the IRQ immediately to prevent
7316 * spurious interrupts. The flush impacts performance but
7317 * excessive spurious interrupts can be worse in some cases.
7319 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7322 * In a shared interrupt configuration, sometimes other devices'
7323 * interrupts will scream. We record the current status tag here
7324 * so that the above check can report that the screaming interrupts
7325 * are unhandled. Eventually they will be silenced.
7327 tnapi->last_irq_tag = sblk->status_tag;
7329 if (tg3_irq_sync(tp))
7332 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7334 napi_schedule(&tnapi->napi);
7337 return IRQ_RETVAL(handled);
7340 /* ISR for interrupt test */
7341 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7343 struct tg3_napi *tnapi = dev_id;
7344 struct tg3 *tp = tnapi->tp;
7345 struct tg3_hw_status *sblk = tnapi->hw_status;
7347 if ((sblk->status & SD_STATUS_UPDATED) ||
7348 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7349 tg3_disable_ints(tp);
7350 return IRQ_RETVAL(1);
7352 return IRQ_RETVAL(0);
7355 #ifdef CONFIG_NET_POLL_CONTROLLER
7356 static void tg3_poll_controller(struct net_device *dev)
7359 struct tg3 *tp = netdev_priv(dev);
7361 if (tg3_irq_sync(tp))
7364 for (i = 0; i < tp->irq_cnt; i++)
7365 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7369 static void tg3_tx_timeout(struct net_device *dev)
7371 struct tg3 *tp = netdev_priv(dev);
7373 if (netif_msg_tx_err(tp)) {
7374 netdev_err(dev, "transmit timed out, resetting\n");
7378 tg3_reset_task_schedule(tp);
7381 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7382 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7384 u32 base = (u32) mapping & 0xffffffff;
7386 return (base > 0xffffdcc0) && (base + len + 8 < base);
7389 /* Test for DMA addresses > 40-bit */
7390 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7393 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7394 if (tg3_flag(tp, 40BIT_DMA_BUG))
7395 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7402 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7403 dma_addr_t mapping, u32 len, u32 flags,
7406 txbd->addr_hi = ((u64) mapping >> 32);
7407 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7408 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7409 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7412 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7413 dma_addr_t map, u32 len, u32 flags,
7416 struct tg3 *tp = tnapi->tp;
7419 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7422 if (tg3_4g_overflow_test(map, len))
7425 if (tg3_40bit_overflow_test(tp, map, len))
7428 if (tp->dma_limit) {
7429 u32 prvidx = *entry;
7430 u32 tmp_flag = flags & ~TXD_FLAG_END;
7431 while (len > tp->dma_limit && *budget) {
7432 u32 frag_len = tp->dma_limit;
7433 len -= tp->dma_limit;
7435 /* Avoid the 8byte DMA problem */
7437 len += tp->dma_limit / 2;
7438 frag_len = tp->dma_limit / 2;
7441 tnapi->tx_buffers[*entry].fragmented = true;
7443 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7444 frag_len, tmp_flag, mss, vlan);
7447 *entry = NEXT_TX(*entry);
7454 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7455 len, flags, mss, vlan);
7457 *entry = NEXT_TX(*entry);
7460 tnapi->tx_buffers[prvidx].fragmented = false;
7464 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7465 len, flags, mss, vlan);
7466 *entry = NEXT_TX(*entry);
7472 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7475 struct sk_buff *skb;
7476 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7481 pci_unmap_single(tnapi->tp->pdev,
7482 dma_unmap_addr(txb, mapping),
7486 while (txb->fragmented) {
7487 txb->fragmented = false;
7488 entry = NEXT_TX(entry);
7489 txb = &tnapi->tx_buffers[entry];
7492 for (i = 0; i <= last; i++) {
7493 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7495 entry = NEXT_TX(entry);
7496 txb = &tnapi->tx_buffers[entry];
7498 pci_unmap_page(tnapi->tp->pdev,
7499 dma_unmap_addr(txb, mapping),
7500 skb_frag_size(frag), PCI_DMA_TODEVICE);
7502 while (txb->fragmented) {
7503 txb->fragmented = false;
7504 entry = NEXT_TX(entry);
7505 txb = &tnapi->tx_buffers[entry];
7510 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7511 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7512 struct sk_buff **pskb,
7513 u32 *entry, u32 *budget,
7514 u32 base_flags, u32 mss, u32 vlan)
7516 struct tg3 *tp = tnapi->tp;
7517 struct sk_buff *new_skb, *skb = *pskb;
7518 dma_addr_t new_addr = 0;
7521 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7522 new_skb = skb_copy(skb, GFP_ATOMIC);
7524 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7526 new_skb = skb_copy_expand(skb,
7527 skb_headroom(skb) + more_headroom,
7528 skb_tailroom(skb), GFP_ATOMIC);
7534 /* New SKB is guaranteed to be linear. */
7535 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7537 /* Make sure the mapping succeeded */
7538 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7539 dev_kfree_skb(new_skb);
7542 u32 save_entry = *entry;
7544 base_flags |= TXD_FLAG_END;
7546 tnapi->tx_buffers[*entry].skb = new_skb;
7547 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7550 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7551 new_skb->len, base_flags,
7553 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7554 dev_kfree_skb(new_skb);
7565 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7567 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7568 * TSO header is greater than 80 bytes.
7570 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7572 struct sk_buff *segs, *nskb;
7573 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7575 /* Estimate the number of fragments in the worst case */
7576 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7577 netif_stop_queue(tp->dev);
7579 /* netif_tx_stop_queue() must be done before checking
7580 * checking tx index in tg3_tx_avail() below, because in
7581 * tg3_tx(), we update tx index before checking for
7582 * netif_tx_queue_stopped().
7585 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7586 return NETDEV_TX_BUSY;
7588 netif_wake_queue(tp->dev);
7591 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7593 goto tg3_tso_bug_end;
7599 tg3_start_xmit(nskb, tp->dev);
7605 return NETDEV_TX_OK;
7608 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7609 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7611 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7613 struct tg3 *tp = netdev_priv(dev);
7614 u32 len, entry, base_flags, mss, vlan = 0;
7616 int i = -1, would_hit_hwbug;
7618 struct tg3_napi *tnapi;
7619 struct netdev_queue *txq;
7622 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7623 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7624 if (tg3_flag(tp, ENABLE_TSS))
7627 budget = tg3_tx_avail(tnapi);
7629 /* We are running in BH disabled context with netif_tx_lock
7630 * and TX reclaim runs via tp->napi.poll inside of a software
7631 * interrupt. Furthermore, IRQ processing runs lockless so we have
7632 * no IRQ context deadlocks to worry about either. Rejoice!
7634 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7635 if (!netif_tx_queue_stopped(txq)) {
7636 netif_tx_stop_queue(txq);
7638 /* This is a hard error, log it. */
7640 "BUG! Tx Ring full when queue awake!\n");
7642 return NETDEV_TX_BUSY;
7645 entry = tnapi->tx_prod;
7647 if (skb->ip_summed == CHECKSUM_PARTIAL)
7648 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7650 mss = skb_shinfo(skb)->gso_size;
7653 u32 tcp_opt_len, hdr_len;
7655 if (skb_header_cloned(skb) &&
7656 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7660 tcp_opt_len = tcp_optlen(skb);
7662 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7664 if (!skb_is_gso_v6(skb)) {
7666 iph->tot_len = htons(mss + hdr_len);
7669 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7670 tg3_flag(tp, TSO_BUG))
7671 return tg3_tso_bug(tp, skb);
7673 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7674 TXD_FLAG_CPU_POST_DMA);
7676 if (tg3_flag(tp, HW_TSO_1) ||
7677 tg3_flag(tp, HW_TSO_2) ||
7678 tg3_flag(tp, HW_TSO_3)) {
7679 tcp_hdr(skb)->check = 0;
7680 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7682 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7687 if (tg3_flag(tp, HW_TSO_3)) {
7688 mss |= (hdr_len & 0xc) << 12;
7690 base_flags |= 0x00000010;
7691 base_flags |= (hdr_len & 0x3e0) << 5;
7692 } else if (tg3_flag(tp, HW_TSO_2))
7693 mss |= hdr_len << 9;
7694 else if (tg3_flag(tp, HW_TSO_1) ||
7695 tg3_asic_rev(tp) == ASIC_REV_5705) {
7696 if (tcp_opt_len || iph->ihl > 5) {
7699 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7700 mss |= (tsflags << 11);
7703 if (tcp_opt_len || iph->ihl > 5) {
7706 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7707 base_flags |= tsflags << 12;
7712 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7713 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7714 base_flags |= TXD_FLAG_JMB_PKT;
7716 if (vlan_tx_tag_present(skb)) {
7717 base_flags |= TXD_FLAG_VLAN;
7718 vlan = vlan_tx_tag_get(skb);
7721 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7722 tg3_flag(tp, TX_TSTAMP_EN)) {
7723 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7724 base_flags |= TXD_FLAG_HWTSTAMP;
7727 len = skb_headlen(skb);
7729 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7730 if (pci_dma_mapping_error(tp->pdev, mapping))
7734 tnapi->tx_buffers[entry].skb = skb;
7735 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7737 would_hit_hwbug = 0;
7739 if (tg3_flag(tp, 5701_DMA_BUG))
7740 would_hit_hwbug = 1;
7742 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7743 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7745 would_hit_hwbug = 1;
7746 } else if (skb_shinfo(skb)->nr_frags > 0) {
7749 if (!tg3_flag(tp, HW_TSO_1) &&
7750 !tg3_flag(tp, HW_TSO_2) &&
7751 !tg3_flag(tp, HW_TSO_3))
7754 /* Now loop through additional data
7755 * fragments, and queue them.
7757 last = skb_shinfo(skb)->nr_frags - 1;
7758 for (i = 0; i <= last; i++) {
7759 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7761 len = skb_frag_size(frag);
7762 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7763 len, DMA_TO_DEVICE);
7765 tnapi->tx_buffers[entry].skb = NULL;
7766 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7768 if (dma_mapping_error(&tp->pdev->dev, mapping))
7772 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7774 ((i == last) ? TXD_FLAG_END : 0),
7776 would_hit_hwbug = 1;
7782 if (would_hit_hwbug) {
7783 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7785 /* If the workaround fails due to memory/mapping
7786 * failure, silently drop this packet.
7788 entry = tnapi->tx_prod;
7789 budget = tg3_tx_avail(tnapi);
7790 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7791 base_flags, mss, vlan))
7795 skb_tx_timestamp(skb);
7796 netdev_tx_sent_queue(txq, skb->len);
7798 /* Sync BD data before updating mailbox */
7801 /* Packets are ready, update Tx producer idx local and on card. */
7802 tw32_tx_mbox(tnapi->prodmbox, entry);
7804 tnapi->tx_prod = entry;
7805 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7806 netif_tx_stop_queue(txq);
7808 /* netif_tx_stop_queue() must be done before checking
7809 * checking tx index in tg3_tx_avail() below, because in
7810 * tg3_tx(), we update tx index before checking for
7811 * netif_tx_queue_stopped().
7814 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7815 netif_tx_wake_queue(txq);
7819 return NETDEV_TX_OK;
7822 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7823 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7828 return NETDEV_TX_OK;
7831 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7834 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7835 MAC_MODE_PORT_MODE_MASK);
7837 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7839 if (!tg3_flag(tp, 5705_PLUS))
7840 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7842 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7843 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7845 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7847 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7849 if (tg3_flag(tp, 5705_PLUS) ||
7850 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7851 tg3_asic_rev(tp) == ASIC_REV_5700)
7852 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7855 tw32(MAC_MODE, tp->mac_mode);
7859 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7861 u32 val, bmcr, mac_mode, ptest = 0;
7863 tg3_phy_toggle_apd(tp, false);
7864 tg3_phy_toggle_automdix(tp, 0);
7866 if (extlpbk && tg3_phy_set_extloopbk(tp))
7869 bmcr = BMCR_FULLDPLX;
7874 bmcr |= BMCR_SPEED100;
7878 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7880 bmcr |= BMCR_SPEED100;
7883 bmcr |= BMCR_SPEED1000;
7888 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7889 tg3_readphy(tp, MII_CTRL1000, &val);
7890 val |= CTL1000_AS_MASTER |
7891 CTL1000_ENABLE_MASTER;
7892 tg3_writephy(tp, MII_CTRL1000, val);
7894 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7895 MII_TG3_FET_PTEST_TRIM_2;
7896 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7899 bmcr |= BMCR_LOOPBACK;
7901 tg3_writephy(tp, MII_BMCR, bmcr);
7903 /* The write needs to be flushed for the FETs */
7904 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7905 tg3_readphy(tp, MII_BMCR, &bmcr);
7909 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7910 tg3_asic_rev(tp) == ASIC_REV_5785) {
7911 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7912 MII_TG3_FET_PTEST_FRC_TX_LINK |
7913 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7915 /* The write needs to be flushed for the AC131 */
7916 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7919 /* Reset to prevent losing 1st rx packet intermittently */
7920 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7921 tg3_flag(tp, 5780_CLASS)) {
7922 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7924 tw32_f(MAC_RX_MODE, tp->rx_mode);
7927 mac_mode = tp->mac_mode &
7928 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7929 if (speed == SPEED_1000)
7930 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7932 mac_mode |= MAC_MODE_PORT_MODE_MII;
7934 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7935 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7937 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7938 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7939 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7940 mac_mode |= MAC_MODE_LINK_POLARITY;
7942 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7943 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7946 tw32(MAC_MODE, mac_mode);
7952 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7954 struct tg3 *tp = netdev_priv(dev);
7956 if (features & NETIF_F_LOOPBACK) {
7957 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7960 spin_lock_bh(&tp->lock);
7961 tg3_mac_loopback(tp, true);
7962 netif_carrier_on(tp->dev);
7963 spin_unlock_bh(&tp->lock);
7964 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7966 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7969 spin_lock_bh(&tp->lock);
7970 tg3_mac_loopback(tp, false);
7971 /* Force link status check */
7972 tg3_setup_phy(tp, 1);
7973 spin_unlock_bh(&tp->lock);
7974 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7978 static netdev_features_t tg3_fix_features(struct net_device *dev,
7979 netdev_features_t features)
7981 struct tg3 *tp = netdev_priv(dev);
7983 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7984 features &= ~NETIF_F_ALL_TSO;
7989 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7991 netdev_features_t changed = dev->features ^ features;
7993 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7994 tg3_set_loopback(dev, features);
7999 static void tg3_rx_prodring_free(struct tg3 *tp,
8000 struct tg3_rx_prodring_set *tpr)
8004 if (tpr != &tp->napi[0].prodring) {
8005 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8006 i = (i + 1) & tp->rx_std_ring_mask)
8007 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8010 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8011 for (i = tpr->rx_jmb_cons_idx;
8012 i != tpr->rx_jmb_prod_idx;
8013 i = (i + 1) & tp->rx_jmb_ring_mask) {
8014 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8022 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8023 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8026 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8027 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8028 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8033 /* Initialize rx rings for packet processing.
8035 * The chip has been shut down and the driver detached from
8036 * the networking, so no interrupts or new tx packets will
8037 * end up in the driver. tp->{tx,}lock are held and thus
8040 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8041 struct tg3_rx_prodring_set *tpr)
8043 u32 i, rx_pkt_dma_sz;
8045 tpr->rx_std_cons_idx = 0;
8046 tpr->rx_std_prod_idx = 0;
8047 tpr->rx_jmb_cons_idx = 0;
8048 tpr->rx_jmb_prod_idx = 0;
8050 if (tpr != &tp->napi[0].prodring) {
8051 memset(&tpr->rx_std_buffers[0], 0,
8052 TG3_RX_STD_BUFF_RING_SIZE(tp));
8053 if (tpr->rx_jmb_buffers)
8054 memset(&tpr->rx_jmb_buffers[0], 0,
8055 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8059 /* Zero out all descriptors. */
8060 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8062 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8063 if (tg3_flag(tp, 5780_CLASS) &&
8064 tp->dev->mtu > ETH_DATA_LEN)
8065 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8066 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8068 /* Initialize invariants of the rings, we only set this
8069 * stuff once. This works because the card does not
8070 * write into the rx buffer posting rings.
8072 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8073 struct tg3_rx_buffer_desc *rxd;
8075 rxd = &tpr->rx_std[i];
8076 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8077 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8078 rxd->opaque = (RXD_OPAQUE_RING_STD |
8079 (i << RXD_OPAQUE_INDEX_SHIFT));
8082 /* Now allocate fresh SKBs for each rx ring. */
8083 for (i = 0; i < tp->rx_pending; i++) {
8084 unsigned int frag_size;
8086 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8088 netdev_warn(tp->dev,
8089 "Using a smaller RX standard ring. Only "
8090 "%d out of %d buffers were allocated "
8091 "successfully\n", i, tp->rx_pending);
8099 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8102 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8104 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8107 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8108 struct tg3_rx_buffer_desc *rxd;
8110 rxd = &tpr->rx_jmb[i].std;
8111 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8112 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8114 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8115 (i << RXD_OPAQUE_INDEX_SHIFT));
8118 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8119 unsigned int frag_size;
8121 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8123 netdev_warn(tp->dev,
8124 "Using a smaller RX jumbo ring. Only %d "
8125 "out of %d buffers were allocated "
8126 "successfully\n", i, tp->rx_jumbo_pending);
8129 tp->rx_jumbo_pending = i;
8138 tg3_rx_prodring_free(tp, tpr);
8142 static void tg3_rx_prodring_fini(struct tg3 *tp,
8143 struct tg3_rx_prodring_set *tpr)
8145 kfree(tpr->rx_std_buffers);
8146 tpr->rx_std_buffers = NULL;
8147 kfree(tpr->rx_jmb_buffers);
8148 tpr->rx_jmb_buffers = NULL;
8150 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8151 tpr->rx_std, tpr->rx_std_mapping);
8155 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8156 tpr->rx_jmb, tpr->rx_jmb_mapping);
8161 static int tg3_rx_prodring_init(struct tg3 *tp,
8162 struct tg3_rx_prodring_set *tpr)
8164 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8166 if (!tpr->rx_std_buffers)
8169 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8170 TG3_RX_STD_RING_BYTES(tp),
8171 &tpr->rx_std_mapping,
8176 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8177 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8179 if (!tpr->rx_jmb_buffers)
8182 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8183 TG3_RX_JMB_RING_BYTES(tp),
8184 &tpr->rx_jmb_mapping,
8193 tg3_rx_prodring_fini(tp, tpr);
8197 /* Free up pending packets in all rx/tx rings.
8199 * The chip has been shut down and the driver detached from
8200 * the networking, so no interrupts or new tx packets will
8201 * end up in the driver. tp->{tx,}lock is not held and we are not
8202 * in an interrupt context and thus may sleep.
8204 static void tg3_free_rings(struct tg3 *tp)
8208 for (j = 0; j < tp->irq_cnt; j++) {
8209 struct tg3_napi *tnapi = &tp->napi[j];
8211 tg3_rx_prodring_free(tp, &tnapi->prodring);
8213 if (!tnapi->tx_buffers)
8216 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8217 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8222 tg3_tx_skb_unmap(tnapi, i,
8223 skb_shinfo(skb)->nr_frags - 1);
8225 dev_kfree_skb_any(skb);
8227 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8231 /* Initialize tx/rx rings for packet processing.
8233 * The chip has been shut down and the driver detached from
8234 * the networking, so no interrupts or new tx packets will
8235 * end up in the driver. tp->{tx,}lock are held and thus
8238 static int tg3_init_rings(struct tg3 *tp)
8242 /* Free up all the SKBs. */
8245 for (i = 0; i < tp->irq_cnt; i++) {
8246 struct tg3_napi *tnapi = &tp->napi[i];
8248 tnapi->last_tag = 0;
8249 tnapi->last_irq_tag = 0;
8250 tnapi->hw_status->status = 0;
8251 tnapi->hw_status->status_tag = 0;
8252 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8257 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8259 tnapi->rx_rcb_ptr = 0;
8261 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8263 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8272 static void tg3_mem_tx_release(struct tg3 *tp)
8276 for (i = 0; i < tp->irq_max; i++) {
8277 struct tg3_napi *tnapi = &tp->napi[i];
8279 if (tnapi->tx_ring) {
8280 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8281 tnapi->tx_ring, tnapi->tx_desc_mapping);
8282 tnapi->tx_ring = NULL;
8285 kfree(tnapi->tx_buffers);
8286 tnapi->tx_buffers = NULL;
8290 static int tg3_mem_tx_acquire(struct tg3 *tp)
8293 struct tg3_napi *tnapi = &tp->napi[0];
8295 /* If multivector TSS is enabled, vector 0 does not handle
8296 * tx interrupts. Don't allocate any resources for it.
8298 if (tg3_flag(tp, ENABLE_TSS))
8301 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8302 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8303 TG3_TX_RING_SIZE, GFP_KERNEL);
8304 if (!tnapi->tx_buffers)
8307 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8309 &tnapi->tx_desc_mapping,
8311 if (!tnapi->tx_ring)
8318 tg3_mem_tx_release(tp);
8322 static void tg3_mem_rx_release(struct tg3 *tp)
8326 for (i = 0; i < tp->irq_max; i++) {
8327 struct tg3_napi *tnapi = &tp->napi[i];
8329 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8334 dma_free_coherent(&tp->pdev->dev,
8335 TG3_RX_RCB_RING_BYTES(tp),
8337 tnapi->rx_rcb_mapping);
8338 tnapi->rx_rcb = NULL;
8342 static int tg3_mem_rx_acquire(struct tg3 *tp)
8344 unsigned int i, limit;
8346 limit = tp->rxq_cnt;
8348 /* If RSS is enabled, we need a (dummy) producer ring
8349 * set on vector zero. This is the true hw prodring.
8351 if (tg3_flag(tp, ENABLE_RSS))
8354 for (i = 0; i < limit; i++) {
8355 struct tg3_napi *tnapi = &tp->napi[i];
8357 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8360 /* If multivector RSS is enabled, vector 0
8361 * does not handle rx or tx interrupts.
8362 * Don't allocate any resources for it.
8364 if (!i && tg3_flag(tp, ENABLE_RSS))
8367 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8368 TG3_RX_RCB_RING_BYTES(tp),
8369 &tnapi->rx_rcb_mapping,
8370 GFP_KERNEL | __GFP_ZERO);
8378 tg3_mem_rx_release(tp);
8383 * Must not be invoked with interrupt sources disabled and
8384 * the hardware shutdown down.
8386 static void tg3_free_consistent(struct tg3 *tp)
8390 for (i = 0; i < tp->irq_cnt; i++) {
8391 struct tg3_napi *tnapi = &tp->napi[i];
8393 if (tnapi->hw_status) {
8394 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8396 tnapi->status_mapping);
8397 tnapi->hw_status = NULL;
8401 tg3_mem_rx_release(tp);
8402 tg3_mem_tx_release(tp);
8405 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8406 tp->hw_stats, tp->stats_mapping);
8407 tp->hw_stats = NULL;
8412 * Must not be invoked with interrupt sources disabled and
8413 * the hardware shutdown down. Can sleep.
8415 static int tg3_alloc_consistent(struct tg3 *tp)
8419 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8420 sizeof(struct tg3_hw_stats),
8422 GFP_KERNEL | __GFP_ZERO);
8426 for (i = 0; i < tp->irq_cnt; i++) {
8427 struct tg3_napi *tnapi = &tp->napi[i];
8428 struct tg3_hw_status *sblk;
8430 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8432 &tnapi->status_mapping,
8433 GFP_KERNEL | __GFP_ZERO);
8434 if (!tnapi->hw_status)
8437 sblk = tnapi->hw_status;
8439 if (tg3_flag(tp, ENABLE_RSS)) {
8440 u16 *prodptr = NULL;
8443 * When RSS is enabled, the status block format changes
8444 * slightly. The "rx_jumbo_consumer", "reserved",
8445 * and "rx_mini_consumer" members get mapped to the
8446 * other three rx return ring producer indexes.
8450 prodptr = &sblk->idx[0].rx_producer;
8453 prodptr = &sblk->rx_jumbo_consumer;
8456 prodptr = &sblk->reserved;
8459 prodptr = &sblk->rx_mini_consumer;
8462 tnapi->rx_rcb_prod_idx = prodptr;
8464 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8468 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8474 tg3_free_consistent(tp);
8478 #define MAX_WAIT_CNT 1000
8480 /* To stop a block, clear the enable bit and poll till it
8481 * clears. tp->lock is held.
8483 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8488 if (tg3_flag(tp, 5705_PLUS)) {
8495 /* We can't enable/disable these bits of the
8496 * 5705/5750, just say success.
8509 for (i = 0; i < MAX_WAIT_CNT; i++) {
8512 if ((val & enable_bit) == 0)
8516 if (i == MAX_WAIT_CNT && !silent) {
8517 dev_err(&tp->pdev->dev,
8518 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8526 /* tp->lock is held. */
8527 static int tg3_abort_hw(struct tg3 *tp, int silent)
8531 tg3_disable_ints(tp);
8533 tp->rx_mode &= ~RX_MODE_ENABLE;
8534 tw32_f(MAC_RX_MODE, tp->rx_mode);
8537 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8538 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8539 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8540 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8541 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8542 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8544 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8545 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8546 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8547 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8548 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8549 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8550 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8552 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8553 tw32_f(MAC_MODE, tp->mac_mode);
8556 tp->tx_mode &= ~TX_MODE_ENABLE;
8557 tw32_f(MAC_TX_MODE, tp->tx_mode);
8559 for (i = 0; i < MAX_WAIT_CNT; i++) {
8561 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8564 if (i >= MAX_WAIT_CNT) {
8565 dev_err(&tp->pdev->dev,
8566 "%s timed out, TX_MODE_ENABLE will not clear "
8567 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8571 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8572 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8573 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8575 tw32(FTQ_RESET, 0xffffffff);
8576 tw32(FTQ_RESET, 0x00000000);
8578 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8579 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8581 for (i = 0; i < tp->irq_cnt; i++) {
8582 struct tg3_napi *tnapi = &tp->napi[i];
8583 if (tnapi->hw_status)
8584 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8590 /* Save PCI command register before chip reset */
8591 static void tg3_save_pci_state(struct tg3 *tp)
8593 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8596 /* Restore PCI state after chip reset */
8597 static void tg3_restore_pci_state(struct tg3 *tp)
8601 /* Re-enable indirect register accesses. */
8602 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8603 tp->misc_host_ctrl);
8605 /* Set MAX PCI retry to zero. */
8606 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8607 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8608 tg3_flag(tp, PCIX_MODE))
8609 val |= PCISTATE_RETRY_SAME_DMA;
8610 /* Allow reads and writes to the APE register and memory space. */
8611 if (tg3_flag(tp, ENABLE_APE))
8612 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8613 PCISTATE_ALLOW_APE_SHMEM_WR |
8614 PCISTATE_ALLOW_APE_PSPACE_WR;
8615 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8617 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8619 if (!tg3_flag(tp, PCI_EXPRESS)) {
8620 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8621 tp->pci_cacheline_sz);
8622 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8626 /* Make sure PCI-X relaxed ordering bit is clear. */
8627 if (tg3_flag(tp, PCIX_MODE)) {
8630 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8632 pcix_cmd &= ~PCI_X_CMD_ERO;
8633 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8637 if (tg3_flag(tp, 5780_CLASS)) {
8639 /* Chip reset on 5780 will reset MSI enable bit,
8640 * so need to restore it.
8642 if (tg3_flag(tp, USING_MSI)) {
8645 pci_read_config_word(tp->pdev,
8646 tp->msi_cap + PCI_MSI_FLAGS,
8648 pci_write_config_word(tp->pdev,
8649 tp->msi_cap + PCI_MSI_FLAGS,
8650 ctrl | PCI_MSI_FLAGS_ENABLE);
8651 val = tr32(MSGINT_MODE);
8652 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8657 /* tp->lock is held. */
8658 static int tg3_chip_reset(struct tg3 *tp)
8661 void (*write_op)(struct tg3 *, u32, u32);
8666 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8668 /* No matching tg3_nvram_unlock() after this because
8669 * chip reset below will undo the nvram lock.
8671 tp->nvram_lock_cnt = 0;
8673 /* GRC_MISC_CFG core clock reset will clear the memory
8674 * enable bit in PCI register 4 and the MSI enable bit
8675 * on some chips, so we save relevant registers here.
8677 tg3_save_pci_state(tp);
8679 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8680 tg3_flag(tp, 5755_PLUS))
8681 tw32(GRC_FASTBOOT_PC, 0);
8684 * We must avoid the readl() that normally takes place.
8685 * It locks machines, causes machine checks, and other
8686 * fun things. So, temporarily disable the 5701
8687 * hardware workaround, while we do the reset.
8689 write_op = tp->write32;
8690 if (write_op == tg3_write_flush_reg32)
8691 tp->write32 = tg3_write32;
8693 /* Prevent the irq handler from reading or writing PCI registers
8694 * during chip reset when the memory enable bit in the PCI command
8695 * register may be cleared. The chip does not generate interrupt
8696 * at this time, but the irq handler may still be called due to irq
8697 * sharing or irqpoll.
8699 tg3_flag_set(tp, CHIP_RESETTING);
8700 for (i = 0; i < tp->irq_cnt; i++) {
8701 struct tg3_napi *tnapi = &tp->napi[i];
8702 if (tnapi->hw_status) {
8703 tnapi->hw_status->status = 0;
8704 tnapi->hw_status->status_tag = 0;
8706 tnapi->last_tag = 0;
8707 tnapi->last_irq_tag = 0;
8711 for (i = 0; i < tp->irq_cnt; i++)
8712 synchronize_irq(tp->napi[i].irq_vec);
8714 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8715 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8716 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8720 val = GRC_MISC_CFG_CORECLK_RESET;
8722 if (tg3_flag(tp, PCI_EXPRESS)) {
8723 /* Force PCIe 1.0a mode */
8724 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8725 !tg3_flag(tp, 57765_PLUS) &&
8726 tr32(TG3_PCIE_PHY_TSTCTL) ==
8727 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8728 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8730 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8731 tw32(GRC_MISC_CFG, (1 << 29));
8736 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8737 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8738 tw32(GRC_VCPU_EXT_CTRL,
8739 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8742 /* Manage gphy power for all CPMU absent PCIe devices. */
8743 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8744 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8746 tw32(GRC_MISC_CFG, val);
8748 /* restore 5701 hardware bug workaround write method */
8749 tp->write32 = write_op;
8751 /* Unfortunately, we have to delay before the PCI read back.
8752 * Some 575X chips even will not respond to a PCI cfg access
8753 * when the reset command is given to the chip.
8755 * How do these hardware designers expect things to work
8756 * properly if the PCI write is posted for a long period
8757 * of time? It is always necessary to have some method by
8758 * which a register read back can occur to push the write
8759 * out which does the reset.
8761 * For most tg3 variants the trick below was working.
8766 /* Flush PCI posted writes. The normal MMIO registers
8767 * are inaccessible at this time so this is the only
8768 * way to make this reliably (actually, this is no longer
8769 * the case, see above). I tried to use indirect
8770 * register read/write but this upset some 5701 variants.
8772 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8776 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8779 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8783 /* Wait for link training to complete. */
8784 for (j = 0; j < 5000; j++)
8787 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8788 pci_write_config_dword(tp->pdev, 0xc4,
8789 cfg_val | (1 << 15));
8792 /* Clear the "no snoop" and "relaxed ordering" bits. */
8793 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8795 * Older PCIe devices only support the 128 byte
8796 * MPS setting. Enforce the restriction.
8798 if (!tg3_flag(tp, CPMU_PRESENT))
8799 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8800 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8802 /* Clear error status */
8803 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8804 PCI_EXP_DEVSTA_CED |
8805 PCI_EXP_DEVSTA_NFED |
8806 PCI_EXP_DEVSTA_FED |
8807 PCI_EXP_DEVSTA_URD);
8810 tg3_restore_pci_state(tp);
8812 tg3_flag_clear(tp, CHIP_RESETTING);
8813 tg3_flag_clear(tp, ERROR_PROCESSED);
8816 if (tg3_flag(tp, 5780_CLASS))
8817 val = tr32(MEMARB_MODE);
8818 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8820 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8822 tw32(0x5000, 0x400);
8825 if (tg3_flag(tp, IS_SSB_CORE)) {
8827 * BCM4785: In order to avoid repercussions from using
8828 * potentially defective internal ROM, stop the Rx RISC CPU,
8829 * which is not required.
8832 tg3_halt_cpu(tp, RX_CPU_BASE);
8835 tw32(GRC_MODE, tp->grc_mode);
8837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8840 tw32(0xc4, val | (1 << 15));
8843 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8844 tg3_asic_rev(tp) == ASIC_REV_5705) {
8845 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8846 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8847 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8848 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8851 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8852 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8854 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8855 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8860 tw32_f(MAC_MODE, val);
8863 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8865 err = tg3_poll_fw(tp);
8871 if (tg3_flag(tp, PCI_EXPRESS) &&
8872 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8873 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8874 !tg3_flag(tp, 57765_PLUS)) {
8877 tw32(0x7c00, val | (1 << 25));
8880 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8881 val = tr32(TG3_CPMU_CLCK_ORIDE);
8882 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8885 /* Reprobe ASF enable state. */
8886 tg3_flag_clear(tp, ENABLE_ASF);
8887 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8888 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8890 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8891 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8892 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8895 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8896 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8897 tg3_flag_set(tp, ENABLE_ASF);
8898 tp->last_event_jiffies = jiffies;
8899 if (tg3_flag(tp, 5750_PLUS))
8900 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8902 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8903 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8904 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8905 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8906 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8913 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8914 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8916 /* tp->lock is held. */
8917 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8923 tg3_write_sig_pre_reset(tp, kind);
8925 tg3_abort_hw(tp, silent);
8926 err = tg3_chip_reset(tp);
8928 __tg3_set_mac_addr(tp, 0);
8930 tg3_write_sig_legacy(tp, kind);
8931 tg3_write_sig_post_reset(tp, kind);
8934 /* Save the stats across chip resets... */
8935 tg3_get_nstats(tp, &tp->net_stats_prev);
8936 tg3_get_estats(tp, &tp->estats_prev);
8938 /* And make sure the next sample is new data */
8939 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8948 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8950 struct tg3 *tp = netdev_priv(dev);
8951 struct sockaddr *addr = p;
8952 int err = 0, skip_mac_1 = 0;
8954 if (!is_valid_ether_addr(addr->sa_data))
8955 return -EADDRNOTAVAIL;
8957 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8959 if (!netif_running(dev))
8962 if (tg3_flag(tp, ENABLE_ASF)) {
8963 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8965 addr0_high = tr32(MAC_ADDR_0_HIGH);
8966 addr0_low = tr32(MAC_ADDR_0_LOW);
8967 addr1_high = tr32(MAC_ADDR_1_HIGH);
8968 addr1_low = tr32(MAC_ADDR_1_LOW);
8970 /* Skip MAC addr 1 if ASF is using it. */
8971 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8972 !(addr1_high == 0 && addr1_low == 0))
8975 spin_lock_bh(&tp->lock);
8976 __tg3_set_mac_addr(tp, skip_mac_1);
8977 spin_unlock_bh(&tp->lock);
8982 /* tp->lock is held. */
8983 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8984 dma_addr_t mapping, u32 maxlen_flags,
8988 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8989 ((u64) mapping >> 32));
8991 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8992 ((u64) mapping & 0xffffffff));
8994 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8997 if (!tg3_flag(tp, 5705_PLUS))
8999 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9004 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9008 if (!tg3_flag(tp, ENABLE_TSS)) {
9009 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9010 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9011 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9013 tw32(HOSTCC_TXCOL_TICKS, 0);
9014 tw32(HOSTCC_TXMAX_FRAMES, 0);
9015 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9017 for (; i < tp->txq_cnt; i++) {
9020 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9021 tw32(reg, ec->tx_coalesce_usecs);
9022 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9023 tw32(reg, ec->tx_max_coalesced_frames);
9024 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9025 tw32(reg, ec->tx_max_coalesced_frames_irq);
9029 for (; i < tp->irq_max - 1; i++) {
9030 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9031 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9032 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9036 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9039 u32 limit = tp->rxq_cnt;
9041 if (!tg3_flag(tp, ENABLE_RSS)) {
9042 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9043 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9044 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9047 tw32(HOSTCC_RXCOL_TICKS, 0);
9048 tw32(HOSTCC_RXMAX_FRAMES, 0);
9049 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9052 for (; i < limit; i++) {
9055 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9056 tw32(reg, ec->rx_coalesce_usecs);
9057 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9058 tw32(reg, ec->rx_max_coalesced_frames);
9059 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9060 tw32(reg, ec->rx_max_coalesced_frames_irq);
9063 for (; i < tp->irq_max - 1; i++) {
9064 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9065 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9066 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9070 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9072 tg3_coal_tx_init(tp, ec);
9073 tg3_coal_rx_init(tp, ec);
9075 if (!tg3_flag(tp, 5705_PLUS)) {
9076 u32 val = ec->stats_block_coalesce_usecs;
9078 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9079 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9084 tw32(HOSTCC_STAT_COAL_TICKS, val);
9088 /* tp->lock is held. */
9089 static void tg3_rings_reset(struct tg3 *tp)
9092 u32 stblk, txrcb, rxrcb, limit;
9093 struct tg3_napi *tnapi = &tp->napi[0];
9095 /* Disable all transmit rings but the first. */
9096 if (!tg3_flag(tp, 5705_PLUS))
9097 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9098 else if (tg3_flag(tp, 5717_PLUS))
9099 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9100 else if (tg3_flag(tp, 57765_CLASS) ||
9101 tg3_asic_rev(tp) == ASIC_REV_5762)
9102 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9104 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9106 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9107 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9108 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9109 BDINFO_FLAGS_DISABLED);
9112 /* Disable all receive return rings but the first. */
9113 if (tg3_flag(tp, 5717_PLUS))
9114 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9115 else if (!tg3_flag(tp, 5705_PLUS))
9116 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9117 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9118 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9119 tg3_flag(tp, 57765_CLASS))
9120 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9122 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9124 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9125 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9126 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9127 BDINFO_FLAGS_DISABLED);
9129 /* Disable interrupts */
9130 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9131 tp->napi[0].chk_msi_cnt = 0;
9132 tp->napi[0].last_rx_cons = 0;
9133 tp->napi[0].last_tx_cons = 0;
9135 /* Zero mailbox registers. */
9136 if (tg3_flag(tp, SUPPORT_MSIX)) {
9137 for (i = 1; i < tp->irq_max; i++) {
9138 tp->napi[i].tx_prod = 0;
9139 tp->napi[i].tx_cons = 0;
9140 if (tg3_flag(tp, ENABLE_TSS))
9141 tw32_mailbox(tp->napi[i].prodmbox, 0);
9142 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9143 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9144 tp->napi[i].chk_msi_cnt = 0;
9145 tp->napi[i].last_rx_cons = 0;
9146 tp->napi[i].last_tx_cons = 0;
9148 if (!tg3_flag(tp, ENABLE_TSS))
9149 tw32_mailbox(tp->napi[0].prodmbox, 0);
9151 tp->napi[0].tx_prod = 0;
9152 tp->napi[0].tx_cons = 0;
9153 tw32_mailbox(tp->napi[0].prodmbox, 0);
9154 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9157 /* Make sure the NIC-based send BD rings are disabled. */
9158 if (!tg3_flag(tp, 5705_PLUS)) {
9159 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9160 for (i = 0; i < 16; i++)
9161 tw32_tx_mbox(mbox + i * 8, 0);
9164 txrcb = NIC_SRAM_SEND_RCB;
9165 rxrcb = NIC_SRAM_RCV_RET_RCB;
9167 /* Clear status block in ram. */
9168 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9170 /* Set status block DMA address */
9171 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9172 ((u64) tnapi->status_mapping >> 32));
9173 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9174 ((u64) tnapi->status_mapping & 0xffffffff));
9176 if (tnapi->tx_ring) {
9177 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9178 (TG3_TX_RING_SIZE <<
9179 BDINFO_FLAGS_MAXLEN_SHIFT),
9180 NIC_SRAM_TX_BUFFER_DESC);
9181 txrcb += TG3_BDINFO_SIZE;
9184 if (tnapi->rx_rcb) {
9185 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9186 (tp->rx_ret_ring_mask + 1) <<
9187 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9188 rxrcb += TG3_BDINFO_SIZE;
9191 stblk = HOSTCC_STATBLCK_RING1;
9193 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9194 u64 mapping = (u64)tnapi->status_mapping;
9195 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9196 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9198 /* Clear status block in ram. */
9199 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9201 if (tnapi->tx_ring) {
9202 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9203 (TG3_TX_RING_SIZE <<
9204 BDINFO_FLAGS_MAXLEN_SHIFT),
9205 NIC_SRAM_TX_BUFFER_DESC);
9206 txrcb += TG3_BDINFO_SIZE;
9209 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9210 ((tp->rx_ret_ring_mask + 1) <<
9211 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9214 rxrcb += TG3_BDINFO_SIZE;
9218 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9220 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9222 if (!tg3_flag(tp, 5750_PLUS) ||
9223 tg3_flag(tp, 5780_CLASS) ||
9224 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9225 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9226 tg3_flag(tp, 57765_PLUS))
9227 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9228 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9229 tg3_asic_rev(tp) == ASIC_REV_5787)
9230 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9232 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9234 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9235 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9237 val = min(nic_rep_thresh, host_rep_thresh);
9238 tw32(RCVBDI_STD_THRESH, val);
9240 if (tg3_flag(tp, 57765_PLUS))
9241 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9243 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9246 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9248 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9250 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9251 tw32(RCVBDI_JUMBO_THRESH, val);
9253 if (tg3_flag(tp, 57765_PLUS))
9254 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9257 static inline u32 calc_crc(unsigned char *buf, int len)
9265 for (j = 0; j < len; j++) {
9268 for (k = 0; k < 8; k++) {
9281 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9283 /* accept or reject all multicast frames */
9284 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9285 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9286 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9287 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9290 static void __tg3_set_rx_mode(struct net_device *dev)
9292 struct tg3 *tp = netdev_priv(dev);
9295 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9296 RX_MODE_KEEP_VLAN_TAG);
9298 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9299 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9302 if (!tg3_flag(tp, ENABLE_ASF))
9303 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9306 if (dev->flags & IFF_PROMISC) {
9307 /* Promiscuous mode. */
9308 rx_mode |= RX_MODE_PROMISC;
9309 } else if (dev->flags & IFF_ALLMULTI) {
9310 /* Accept all multicast. */
9311 tg3_set_multi(tp, 1);
9312 } else if (netdev_mc_empty(dev)) {
9313 /* Reject all multicast. */
9314 tg3_set_multi(tp, 0);
9316 /* Accept one or more multicast(s). */
9317 struct netdev_hw_addr *ha;
9318 u32 mc_filter[4] = { 0, };
9323 netdev_for_each_mc_addr(ha, dev) {
9324 crc = calc_crc(ha->addr, ETH_ALEN);
9326 regidx = (bit & 0x60) >> 5;
9328 mc_filter[regidx] |= (1 << bit);
9331 tw32(MAC_HASH_REG_0, mc_filter[0]);
9332 tw32(MAC_HASH_REG_1, mc_filter[1]);
9333 tw32(MAC_HASH_REG_2, mc_filter[2]);
9334 tw32(MAC_HASH_REG_3, mc_filter[3]);
9337 if (rx_mode != tp->rx_mode) {
9338 tp->rx_mode = rx_mode;
9339 tw32_f(MAC_RX_MODE, rx_mode);
9344 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9348 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9349 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9352 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9356 if (!tg3_flag(tp, SUPPORT_MSIX))
9359 if (tp->rxq_cnt == 1) {
9360 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9364 /* Validate table against current IRQ count */
9365 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9366 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9370 if (i != TG3_RSS_INDIR_TBL_SIZE)
9371 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9374 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9377 u32 reg = MAC_RSS_INDIR_TBL_0;
9379 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9380 u32 val = tp->rss_ind_tbl[i];
9382 for (; i % 8; i++) {
9384 val |= tp->rss_ind_tbl[i];
9391 /* tp->lock is held. */
9392 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9394 u32 val, rdmac_mode;
9396 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9398 tg3_disable_ints(tp);
9402 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9404 if (tg3_flag(tp, INIT_COMPLETE))
9405 tg3_abort_hw(tp, 1);
9407 /* Enable MAC control of LPI */
9408 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9409 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9410 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9411 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9412 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9414 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9416 tw32_f(TG3_CPMU_EEE_CTRL,
9417 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9419 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9420 TG3_CPMU_EEEMD_LPI_IN_TX |
9421 TG3_CPMU_EEEMD_LPI_IN_RX |
9422 TG3_CPMU_EEEMD_EEE_ENABLE;
9424 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9425 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9427 if (tg3_flag(tp, ENABLE_APE))
9428 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9430 tw32_f(TG3_CPMU_EEE_MODE, val);
9432 tw32_f(TG3_CPMU_EEE_DBTMR1,
9433 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9434 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9436 tw32_f(TG3_CPMU_EEE_DBTMR2,
9437 TG3_CPMU_DBTMR2_APE_TX_2047US |
9438 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9441 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9442 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9443 tg3_phy_pull_config(tp);
9444 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9450 err = tg3_chip_reset(tp);
9454 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9456 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9457 val = tr32(TG3_CPMU_CTRL);
9458 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9459 tw32(TG3_CPMU_CTRL, val);
9461 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9462 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9463 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9464 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9466 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9467 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9468 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9469 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9471 val = tr32(TG3_CPMU_HST_ACC);
9472 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9473 val |= CPMU_HST_ACC_MACCLK_6_25;
9474 tw32(TG3_CPMU_HST_ACC, val);
9477 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9478 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9479 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9480 PCIE_PWR_MGMT_L1_THRESH_4MS;
9481 tw32(PCIE_PWR_MGMT_THRESH, val);
9483 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9484 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9486 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9488 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9489 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9492 if (tg3_flag(tp, L1PLLPD_EN)) {
9493 u32 grc_mode = tr32(GRC_MODE);
9495 /* Access the lower 1K of PL PCIE block registers. */
9496 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9497 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9499 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9500 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9501 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9503 tw32(GRC_MODE, grc_mode);
9506 if (tg3_flag(tp, 57765_CLASS)) {
9507 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9508 u32 grc_mode = tr32(GRC_MODE);
9510 /* Access the lower 1K of PL PCIE block registers. */
9511 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9512 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9514 val = tr32(TG3_PCIE_TLDLPL_PORT +
9515 TG3_PCIE_PL_LO_PHYCTL5);
9516 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9517 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9519 tw32(GRC_MODE, grc_mode);
9522 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9525 /* Fix transmit hangs */
9526 val = tr32(TG3_CPMU_PADRNG_CTL);
9527 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9528 tw32(TG3_CPMU_PADRNG_CTL, val);
9530 grc_mode = tr32(GRC_MODE);
9532 /* Access the lower 1K of DL PCIE block registers. */
9533 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9534 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9536 val = tr32(TG3_PCIE_TLDLPL_PORT +
9537 TG3_PCIE_DL_LO_FTSMAX);
9538 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9539 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9540 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9542 tw32(GRC_MODE, grc_mode);
9545 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9546 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9547 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9548 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9551 /* This works around an issue with Athlon chipsets on
9552 * B3 tigon3 silicon. This bit has no effect on any
9553 * other revision. But do not set this on PCI Express
9554 * chips and don't even touch the clocks if the CPMU is present.
9556 if (!tg3_flag(tp, CPMU_PRESENT)) {
9557 if (!tg3_flag(tp, PCI_EXPRESS))
9558 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9559 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9562 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9563 tg3_flag(tp, PCIX_MODE)) {
9564 val = tr32(TG3PCI_PCISTATE);
9565 val |= PCISTATE_RETRY_SAME_DMA;
9566 tw32(TG3PCI_PCISTATE, val);
9569 if (tg3_flag(tp, ENABLE_APE)) {
9570 /* Allow reads and writes to the
9571 * APE register and memory space.
9573 val = tr32(TG3PCI_PCISTATE);
9574 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9575 PCISTATE_ALLOW_APE_SHMEM_WR |
9576 PCISTATE_ALLOW_APE_PSPACE_WR;
9577 tw32(TG3PCI_PCISTATE, val);
9580 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9581 /* Enable some hw fixes. */
9582 val = tr32(TG3PCI_MSI_DATA);
9583 val |= (1 << 26) | (1 << 28) | (1 << 29);
9584 tw32(TG3PCI_MSI_DATA, val);
9587 /* Descriptor ring init may make accesses to the
9588 * NIC SRAM area to setup the TX descriptors, so we
9589 * can only do this after the hardware has been
9590 * successfully reset.
9592 err = tg3_init_rings(tp);
9596 if (tg3_flag(tp, 57765_PLUS)) {
9597 val = tr32(TG3PCI_DMA_RW_CTRL) &
9598 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9599 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9600 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9601 if (!tg3_flag(tp, 57765_CLASS) &&
9602 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9603 tg3_asic_rev(tp) != ASIC_REV_5762)
9604 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9605 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9606 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9607 tg3_asic_rev(tp) != ASIC_REV_5761) {
9608 /* This value is determined during the probe time DMA
9609 * engine test, tg3_test_dma.
9611 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9614 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9615 GRC_MODE_4X_NIC_SEND_RINGS |
9616 GRC_MODE_NO_TX_PHDR_CSUM |
9617 GRC_MODE_NO_RX_PHDR_CSUM);
9618 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9620 /* Pseudo-header checksum is done by hardware logic and not
9621 * the offload processers, so make the chip do the pseudo-
9622 * header checksums on receive. For transmit it is more
9623 * convenient to do the pseudo-header checksum in software
9624 * as Linux does that on transmit for us in all cases.
9626 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9628 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9630 tw32(TG3_RX_PTP_CTL,
9631 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9633 if (tg3_flag(tp, PTP_CAPABLE))
9634 val |= GRC_MODE_TIME_SYNC_ENABLE;
9636 tw32(GRC_MODE, tp->grc_mode | val);
9638 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9639 val = tr32(GRC_MISC_CFG);
9641 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9642 tw32(GRC_MISC_CFG, val);
9644 /* Initialize MBUF/DESC pool. */
9645 if (tg3_flag(tp, 5750_PLUS)) {
9647 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9648 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9649 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9650 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9652 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9653 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9654 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9655 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9658 fw_len = tp->fw_len;
9659 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9660 tw32(BUFMGR_MB_POOL_ADDR,
9661 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9662 tw32(BUFMGR_MB_POOL_SIZE,
9663 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9666 if (tp->dev->mtu <= ETH_DATA_LEN) {
9667 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9668 tp->bufmgr_config.mbuf_read_dma_low_water);
9669 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9670 tp->bufmgr_config.mbuf_mac_rx_low_water);
9671 tw32(BUFMGR_MB_HIGH_WATER,
9672 tp->bufmgr_config.mbuf_high_water);
9674 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9675 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9676 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9677 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9678 tw32(BUFMGR_MB_HIGH_WATER,
9679 tp->bufmgr_config.mbuf_high_water_jumbo);
9681 tw32(BUFMGR_DMA_LOW_WATER,
9682 tp->bufmgr_config.dma_low_water);
9683 tw32(BUFMGR_DMA_HIGH_WATER,
9684 tp->bufmgr_config.dma_high_water);
9686 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9687 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9688 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9689 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9690 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9691 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9692 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9693 tw32(BUFMGR_MODE, val);
9694 for (i = 0; i < 2000; i++) {
9695 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9700 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9704 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9705 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9707 tg3_setup_rxbd_thresholds(tp);
9709 /* Initialize TG3_BDINFO's at:
9710 * RCVDBDI_STD_BD: standard eth size rx ring
9711 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9712 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9715 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9716 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9717 * ring attribute flags
9718 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9720 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9721 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9723 * The size of each ring is fixed in the firmware, but the location is
9726 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9727 ((u64) tpr->rx_std_mapping >> 32));
9728 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9729 ((u64) tpr->rx_std_mapping & 0xffffffff));
9730 if (!tg3_flag(tp, 5717_PLUS))
9731 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9732 NIC_SRAM_RX_BUFFER_DESC);
9734 /* Disable the mini ring */
9735 if (!tg3_flag(tp, 5705_PLUS))
9736 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9737 BDINFO_FLAGS_DISABLED);
9739 /* Program the jumbo buffer descriptor ring control
9740 * blocks on those devices that have them.
9742 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9743 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9745 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9746 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9747 ((u64) tpr->rx_jmb_mapping >> 32));
9748 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9749 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9750 val = TG3_RX_JMB_RING_SIZE(tp) <<
9751 BDINFO_FLAGS_MAXLEN_SHIFT;
9752 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9753 val | BDINFO_FLAGS_USE_EXT_RECV);
9754 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9755 tg3_flag(tp, 57765_CLASS) ||
9756 tg3_asic_rev(tp) == ASIC_REV_5762)
9757 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9758 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9760 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9761 BDINFO_FLAGS_DISABLED);
9764 if (tg3_flag(tp, 57765_PLUS)) {
9765 val = TG3_RX_STD_RING_SIZE(tp);
9766 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9767 val |= (TG3_RX_STD_DMA_SZ << 2);
9769 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9771 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9773 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9775 tpr->rx_std_prod_idx = tp->rx_pending;
9776 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9778 tpr->rx_jmb_prod_idx =
9779 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9780 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9782 tg3_rings_reset(tp);
9784 /* Initialize MAC address and backoff seed. */
9785 __tg3_set_mac_addr(tp, 0);
9787 /* MTU + ethernet header + FCS + optional VLAN tag */
9788 tw32(MAC_RX_MTU_SIZE,
9789 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9791 /* The slot time is changed by tg3_setup_phy if we
9792 * run at gigabit with half duplex.
9794 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9795 (6 << TX_LENGTHS_IPG_SHIFT) |
9796 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9798 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9799 tg3_asic_rev(tp) == ASIC_REV_5762)
9800 val |= tr32(MAC_TX_LENGTHS) &
9801 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9802 TX_LENGTHS_CNT_DWN_VAL_MSK);
9804 tw32(MAC_TX_LENGTHS, val);
9806 /* Receive rules. */
9807 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9808 tw32(RCVLPC_CONFIG, 0x0181);
9810 /* Calculate RDMAC_MODE setting early, we need it to determine
9811 * the RCVLPC_STATE_ENABLE mask.
9813 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9814 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9815 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9816 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9817 RDMAC_MODE_LNGREAD_ENAB);
9819 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9820 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9822 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9823 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9824 tg3_asic_rev(tp) == ASIC_REV_57780)
9825 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9826 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9827 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9829 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9830 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9831 if (tg3_flag(tp, TSO_CAPABLE) &&
9832 tg3_asic_rev(tp) == ASIC_REV_5705) {
9833 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9834 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9835 !tg3_flag(tp, IS_5788)) {
9836 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9840 if (tg3_flag(tp, PCI_EXPRESS))
9841 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9843 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9845 if (tp->dev->mtu <= ETH_DATA_LEN) {
9846 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9847 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9851 if (tg3_flag(tp, HW_TSO_1) ||
9852 tg3_flag(tp, HW_TSO_2) ||
9853 tg3_flag(tp, HW_TSO_3))
9854 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9856 if (tg3_flag(tp, 57765_PLUS) ||
9857 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9858 tg3_asic_rev(tp) == ASIC_REV_57780)
9859 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9861 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9862 tg3_asic_rev(tp) == ASIC_REV_5762)
9863 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9865 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9866 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9867 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9868 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9869 tg3_flag(tp, 57765_PLUS)) {
9872 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9873 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9875 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9878 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9879 tg3_asic_rev(tp) == ASIC_REV_5762) {
9880 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9881 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9882 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9883 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9884 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9885 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9887 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9890 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9891 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9892 tg3_asic_rev(tp) == ASIC_REV_5762) {
9895 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9896 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9898 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9902 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9903 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9906 /* Receive/send statistics. */
9907 if (tg3_flag(tp, 5750_PLUS)) {
9908 val = tr32(RCVLPC_STATS_ENABLE);
9909 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9910 tw32(RCVLPC_STATS_ENABLE, val);
9911 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9912 tg3_flag(tp, TSO_CAPABLE)) {
9913 val = tr32(RCVLPC_STATS_ENABLE);
9914 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9915 tw32(RCVLPC_STATS_ENABLE, val);
9917 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9919 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9920 tw32(SNDDATAI_STATSENAB, 0xffffff);
9921 tw32(SNDDATAI_STATSCTRL,
9922 (SNDDATAI_SCTRL_ENABLE |
9923 SNDDATAI_SCTRL_FASTUPD));
9925 /* Setup host coalescing engine. */
9926 tw32(HOSTCC_MODE, 0);
9927 for (i = 0; i < 2000; i++) {
9928 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9933 __tg3_set_coalesce(tp, &tp->coal);
9935 if (!tg3_flag(tp, 5705_PLUS)) {
9936 /* Status/statistics block address. See tg3_timer,
9937 * the tg3_periodic_fetch_stats call there, and
9938 * tg3_get_stats to see how this works for 5705/5750 chips.
9940 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9941 ((u64) tp->stats_mapping >> 32));
9942 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9943 ((u64) tp->stats_mapping & 0xffffffff));
9944 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9946 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9948 /* Clear statistics and status block memory areas */
9949 for (i = NIC_SRAM_STATS_BLK;
9950 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9952 tg3_write_mem(tp, i, 0);
9957 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9959 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9960 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9961 if (!tg3_flag(tp, 5705_PLUS))
9962 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9964 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9965 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9966 /* reset to prevent losing 1st rx packet intermittently */
9967 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9971 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9972 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9973 MAC_MODE_FHDE_ENABLE;
9974 if (tg3_flag(tp, ENABLE_APE))
9975 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9976 if (!tg3_flag(tp, 5705_PLUS) &&
9977 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9978 tg3_asic_rev(tp) != ASIC_REV_5700)
9979 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9980 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9983 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9984 * If TG3_FLAG_IS_NIC is zero, we should read the
9985 * register to preserve the GPIO settings for LOMs. The GPIOs,
9986 * whether used as inputs or outputs, are set by boot code after
9989 if (!tg3_flag(tp, IS_NIC)) {
9992 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9993 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9994 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9996 if (tg3_asic_rev(tp) == ASIC_REV_5752)
9997 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9998 GRC_LCLCTRL_GPIO_OUTPUT3;
10000 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10001 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10003 tp->grc_local_ctrl &= ~gpio_mask;
10004 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10006 /* GPIO1 must be driven high for eeprom write protect */
10007 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10008 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10009 GRC_LCLCTRL_GPIO_OUTPUT1);
10011 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10014 if (tg3_flag(tp, USING_MSIX)) {
10015 val = tr32(MSGINT_MODE);
10016 val |= MSGINT_MODE_ENABLE;
10017 if (tp->irq_cnt > 1)
10018 val |= MSGINT_MODE_MULTIVEC_EN;
10019 if (!tg3_flag(tp, 1SHOT_MSI))
10020 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10021 tw32(MSGINT_MODE, val);
10024 if (!tg3_flag(tp, 5705_PLUS)) {
10025 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10029 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10030 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10031 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10032 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10033 WDMAC_MODE_LNGREAD_ENAB);
10035 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10036 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10037 if (tg3_flag(tp, TSO_CAPABLE) &&
10038 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10039 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10041 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10042 !tg3_flag(tp, IS_5788)) {
10043 val |= WDMAC_MODE_RX_ACCEL;
10047 /* Enable host coalescing bug fix */
10048 if (tg3_flag(tp, 5755_PLUS))
10049 val |= WDMAC_MODE_STATUS_TAG_FIX;
10051 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10052 val |= WDMAC_MODE_BURST_ALL_DATA;
10054 tw32_f(WDMAC_MODE, val);
10057 if (tg3_flag(tp, PCIX_MODE)) {
10060 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10062 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10063 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10064 pcix_cmd |= PCI_X_CMD_READ_2K;
10065 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10066 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10067 pcix_cmd |= PCI_X_CMD_READ_2K;
10069 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10073 tw32_f(RDMAC_MODE, rdmac_mode);
10076 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10077 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10078 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10081 if (i < TG3_NUM_RDMA_CHANNELS) {
10082 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10083 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10084 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10085 tg3_flag_set(tp, 5719_RDMA_BUG);
10089 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10090 if (!tg3_flag(tp, 5705_PLUS))
10091 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10093 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10094 tw32(SNDDATAC_MODE,
10095 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10097 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10099 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10100 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10101 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10102 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10103 val |= RCVDBDI_MODE_LRG_RING_SZ;
10104 tw32(RCVDBDI_MODE, val);
10105 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10106 if (tg3_flag(tp, HW_TSO_1) ||
10107 tg3_flag(tp, HW_TSO_2) ||
10108 tg3_flag(tp, HW_TSO_3))
10109 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10110 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10111 if (tg3_flag(tp, ENABLE_TSS))
10112 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10113 tw32(SNDBDI_MODE, val);
10114 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10116 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10117 err = tg3_load_5701_a0_firmware_fix(tp);
10122 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10123 /* Ignore any errors for the firmware download. If download
10124 * fails, the device will operate with EEE disabled
10126 tg3_load_57766_firmware(tp);
10129 if (tg3_flag(tp, TSO_CAPABLE)) {
10130 err = tg3_load_tso_firmware(tp);
10135 tp->tx_mode = TX_MODE_ENABLE;
10137 if (tg3_flag(tp, 5755_PLUS) ||
10138 tg3_asic_rev(tp) == ASIC_REV_5906)
10139 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10141 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10142 tg3_asic_rev(tp) == ASIC_REV_5762) {
10143 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10144 tp->tx_mode &= ~val;
10145 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10148 tw32_f(MAC_TX_MODE, tp->tx_mode);
10151 if (tg3_flag(tp, ENABLE_RSS)) {
10152 tg3_rss_write_indir_tbl(tp);
10154 /* Setup the "secret" hash key. */
10155 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10156 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10157 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10158 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10159 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10160 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10161 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10162 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10163 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10164 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10167 tp->rx_mode = RX_MODE_ENABLE;
10168 if (tg3_flag(tp, 5755_PLUS))
10169 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10171 if (tg3_flag(tp, ENABLE_RSS))
10172 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10173 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10174 RX_MODE_RSS_IPV6_HASH_EN |
10175 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10176 RX_MODE_RSS_IPV4_HASH_EN |
10177 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10179 tw32_f(MAC_RX_MODE, tp->rx_mode);
10182 tw32(MAC_LED_CTRL, tp->led_ctrl);
10184 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10185 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10186 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10189 tw32_f(MAC_RX_MODE, tp->rx_mode);
10192 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10193 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10194 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10195 /* Set drive transmission level to 1.2V */
10196 /* only if the signal pre-emphasis bit is not set */
10197 val = tr32(MAC_SERDES_CFG);
10200 tw32(MAC_SERDES_CFG, val);
10202 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10203 tw32(MAC_SERDES_CFG, 0x616000);
10206 /* Prevent chip from dropping frames when flow control
10209 if (tg3_flag(tp, 57765_CLASS))
10213 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10215 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10216 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10217 /* Use hardware link auto-negotiation */
10218 tg3_flag_set(tp, HW_AUTONEG);
10221 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10222 tg3_asic_rev(tp) == ASIC_REV_5714) {
10225 tmp = tr32(SERDES_RX_CTRL);
10226 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10227 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10228 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10229 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10232 if (!tg3_flag(tp, USE_PHYLIB)) {
10233 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10234 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10236 err = tg3_setup_phy(tp, 0);
10240 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10241 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10244 /* Clear CRC stats. */
10245 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10246 tg3_writephy(tp, MII_TG3_TEST1,
10247 tmp | MII_TG3_TEST1_CRC_EN);
10248 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10253 __tg3_set_rx_mode(tp->dev);
10255 /* Initialize receive rules. */
10256 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10257 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10258 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10259 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10261 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10265 if (tg3_flag(tp, ENABLE_ASF))
10269 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10271 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10273 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10275 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10277 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10279 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10281 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10283 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10285 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10287 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10289 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10291 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10293 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10295 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10303 if (tg3_flag(tp, ENABLE_APE))
10304 /* Write our heartbeat update interval to APE. */
10305 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10306 APE_HOST_HEARTBEAT_INT_DISABLE);
10308 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10313 /* Called at device open time to get the chip ready for
10314 * packet processing. Invoked with tp->lock held.
10316 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10318 tg3_switch_clocks(tp);
10320 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10322 return tg3_reset_hw(tp, reset_phy);
10325 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10329 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10330 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10332 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10335 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10336 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10337 memset(ocir, 0, TG3_OCIR_LEN);
10341 /* sysfs attributes for hwmon */
10342 static ssize_t tg3_show_temp(struct device *dev,
10343 struct device_attribute *devattr, char *buf)
10345 struct pci_dev *pdev = to_pci_dev(dev);
10346 struct net_device *netdev = pci_get_drvdata(pdev);
10347 struct tg3 *tp = netdev_priv(netdev);
10348 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10351 spin_lock_bh(&tp->lock);
10352 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10353 sizeof(temperature));
10354 spin_unlock_bh(&tp->lock);
10355 return sprintf(buf, "%u\n", temperature);
10359 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10360 TG3_TEMP_SENSOR_OFFSET);
10361 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10362 TG3_TEMP_CAUTION_OFFSET);
10363 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10364 TG3_TEMP_MAX_OFFSET);
10366 static struct attribute *tg3_attributes[] = {
10367 &sensor_dev_attr_temp1_input.dev_attr.attr,
10368 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10369 &sensor_dev_attr_temp1_max.dev_attr.attr,
10373 static const struct attribute_group tg3_group = {
10374 .attrs = tg3_attributes,
10377 static void tg3_hwmon_close(struct tg3 *tp)
10379 if (tp->hwmon_dev) {
10380 hwmon_device_unregister(tp->hwmon_dev);
10381 tp->hwmon_dev = NULL;
10382 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10386 static void tg3_hwmon_open(struct tg3 *tp)
10390 struct pci_dev *pdev = tp->pdev;
10391 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10393 tg3_sd_scan_scratchpad(tp, ocirs);
10395 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10396 if (!ocirs[i].src_data_length)
10399 size += ocirs[i].src_hdr_length;
10400 size += ocirs[i].src_data_length;
10406 /* Register hwmon sysfs hooks */
10407 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10409 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10413 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10414 if (IS_ERR(tp->hwmon_dev)) {
10415 tp->hwmon_dev = NULL;
10416 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10417 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10422 #define TG3_STAT_ADD32(PSTAT, REG) \
10423 do { u32 __val = tr32(REG); \
10424 (PSTAT)->low += __val; \
10425 if ((PSTAT)->low < __val) \
10426 (PSTAT)->high += 1; \
10429 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10431 struct tg3_hw_stats *sp = tp->hw_stats;
10436 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10437 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10438 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10439 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10440 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10441 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10442 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10443 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10444 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10445 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10446 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10447 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10448 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10449 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10450 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10451 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10454 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10455 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10456 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10457 tg3_flag_clear(tp, 5719_RDMA_BUG);
10460 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10461 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10462 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10463 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10464 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10465 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10466 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10467 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10468 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10469 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10470 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10471 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10472 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10473 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10475 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10476 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10477 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10478 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10479 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10481 u32 val = tr32(HOSTCC_FLOW_ATTN);
10482 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10484 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10485 sp->rx_discards.low += val;
10486 if (sp->rx_discards.low < val)
10487 sp->rx_discards.high += 1;
10489 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10491 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10494 static void tg3_chk_missed_msi(struct tg3 *tp)
10498 for (i = 0; i < tp->irq_cnt; i++) {
10499 struct tg3_napi *tnapi = &tp->napi[i];
10501 if (tg3_has_work(tnapi)) {
10502 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10503 tnapi->last_tx_cons == tnapi->tx_cons) {
10504 if (tnapi->chk_msi_cnt < 1) {
10505 tnapi->chk_msi_cnt++;
10511 tnapi->chk_msi_cnt = 0;
10512 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10513 tnapi->last_tx_cons = tnapi->tx_cons;
10517 static void tg3_timer(unsigned long __opaque)
10519 struct tg3 *tp = (struct tg3 *) __opaque;
10521 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10522 goto restart_timer;
10524 spin_lock(&tp->lock);
10526 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10527 tg3_flag(tp, 57765_CLASS))
10528 tg3_chk_missed_msi(tp);
10530 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10531 /* BCM4785: Flush posted writes from GbE to host memory. */
10535 if (!tg3_flag(tp, TAGGED_STATUS)) {
10536 /* All of this garbage is because when using non-tagged
10537 * IRQ status the mailbox/status_block protocol the chip
10538 * uses with the cpu is race prone.
10540 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10541 tw32(GRC_LOCAL_CTRL,
10542 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10544 tw32(HOSTCC_MODE, tp->coalesce_mode |
10545 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10548 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10549 spin_unlock(&tp->lock);
10550 tg3_reset_task_schedule(tp);
10551 goto restart_timer;
10555 /* This part only runs once per second. */
10556 if (!--tp->timer_counter) {
10557 if (tg3_flag(tp, 5705_PLUS))
10558 tg3_periodic_fetch_stats(tp);
10560 if (tp->setlpicnt && !--tp->setlpicnt)
10561 tg3_phy_eee_enable(tp);
10563 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10567 mac_stat = tr32(MAC_STATUS);
10570 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10571 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10573 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10577 tg3_setup_phy(tp, 0);
10578 } else if (tg3_flag(tp, POLL_SERDES)) {
10579 u32 mac_stat = tr32(MAC_STATUS);
10580 int need_setup = 0;
10583 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10586 if (!tp->link_up &&
10587 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10588 MAC_STATUS_SIGNAL_DET))) {
10592 if (!tp->serdes_counter) {
10595 ~MAC_MODE_PORT_MODE_MASK));
10597 tw32_f(MAC_MODE, tp->mac_mode);
10600 tg3_setup_phy(tp, 0);
10602 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10603 tg3_flag(tp, 5780_CLASS)) {
10604 tg3_serdes_parallel_detect(tp);
10607 tp->timer_counter = tp->timer_multiplier;
10610 /* Heartbeat is only sent once every 2 seconds.
10612 * The heartbeat is to tell the ASF firmware that the host
10613 * driver is still alive. In the event that the OS crashes,
10614 * ASF needs to reset the hardware to free up the FIFO space
10615 * that may be filled with rx packets destined for the host.
10616 * If the FIFO is full, ASF will no longer function properly.
10618 * Unintended resets have been reported on real time kernels
10619 * where the timer doesn't run on time. Netpoll will also have
10622 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10623 * to check the ring condition when the heartbeat is expiring
10624 * before doing the reset. This will prevent most unintended
10627 if (!--tp->asf_counter) {
10628 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10629 tg3_wait_for_event_ack(tp);
10631 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10632 FWCMD_NICDRV_ALIVE3);
10633 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10634 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10635 TG3_FW_UPDATE_TIMEOUT_SEC);
10637 tg3_generate_fw_event(tp);
10639 tp->asf_counter = tp->asf_multiplier;
10642 spin_unlock(&tp->lock);
10645 tp->timer.expires = jiffies + tp->timer_offset;
10646 add_timer(&tp->timer);
10649 static void tg3_timer_init(struct tg3 *tp)
10651 if (tg3_flag(tp, TAGGED_STATUS) &&
10652 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10653 !tg3_flag(tp, 57765_CLASS))
10654 tp->timer_offset = HZ;
10656 tp->timer_offset = HZ / 10;
10658 BUG_ON(tp->timer_offset > HZ);
10660 tp->timer_multiplier = (HZ / tp->timer_offset);
10661 tp->asf_multiplier = (HZ / tp->timer_offset) *
10662 TG3_FW_UPDATE_FREQ_SEC;
10664 init_timer(&tp->timer);
10665 tp->timer.data = (unsigned long) tp;
10666 tp->timer.function = tg3_timer;
10669 static void tg3_timer_start(struct tg3 *tp)
10671 tp->asf_counter = tp->asf_multiplier;
10672 tp->timer_counter = tp->timer_multiplier;
10674 tp->timer.expires = jiffies + tp->timer_offset;
10675 add_timer(&tp->timer);
10678 static void tg3_timer_stop(struct tg3 *tp)
10680 del_timer_sync(&tp->timer);
10683 /* Restart hardware after configuration changes, self-test, etc.
10684 * Invoked with tp->lock held.
10686 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10687 __releases(tp->lock)
10688 __acquires(tp->lock)
10692 err = tg3_init_hw(tp, reset_phy);
10694 netdev_err(tp->dev,
10695 "Failed to re-initialize device, aborting\n");
10696 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10697 tg3_full_unlock(tp);
10698 tg3_timer_stop(tp);
10700 tg3_napi_enable(tp);
10701 dev_close(tp->dev);
10702 tg3_full_lock(tp, 0);
10707 static void tg3_reset_task(struct work_struct *work)
10709 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10712 tg3_full_lock(tp, 0);
10714 if (!netif_running(tp->dev)) {
10715 tg3_flag_clear(tp, RESET_TASK_PENDING);
10716 tg3_full_unlock(tp);
10720 tg3_full_unlock(tp);
10724 tg3_netif_stop(tp);
10726 tg3_full_lock(tp, 1);
10728 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10729 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10730 tp->write32_rx_mbox = tg3_write_flush_reg32;
10731 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10732 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10735 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10736 err = tg3_init_hw(tp, 1);
10740 tg3_netif_start(tp);
10743 tg3_full_unlock(tp);
10748 tg3_flag_clear(tp, RESET_TASK_PENDING);
10751 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10754 unsigned long flags;
10756 struct tg3_napi *tnapi = &tp->napi[irq_num];
10758 if (tp->irq_cnt == 1)
10759 name = tp->dev->name;
10761 name = &tnapi->irq_lbl[0];
10762 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10763 name[IFNAMSIZ-1] = 0;
10766 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10768 if (tg3_flag(tp, 1SHOT_MSI))
10769 fn = tg3_msi_1shot;
10772 fn = tg3_interrupt;
10773 if (tg3_flag(tp, TAGGED_STATUS))
10774 fn = tg3_interrupt_tagged;
10775 flags = IRQF_SHARED;
10778 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10781 static int tg3_test_interrupt(struct tg3 *tp)
10783 struct tg3_napi *tnapi = &tp->napi[0];
10784 struct net_device *dev = tp->dev;
10785 int err, i, intr_ok = 0;
10788 if (!netif_running(dev))
10791 tg3_disable_ints(tp);
10793 free_irq(tnapi->irq_vec, tnapi);
10796 * Turn off MSI one shot mode. Otherwise this test has no
10797 * observable way to know whether the interrupt was delivered.
10799 if (tg3_flag(tp, 57765_PLUS)) {
10800 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10801 tw32(MSGINT_MODE, val);
10804 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10805 IRQF_SHARED, dev->name, tnapi);
10809 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10810 tg3_enable_ints(tp);
10812 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10815 for (i = 0; i < 5; i++) {
10816 u32 int_mbox, misc_host_ctrl;
10818 int_mbox = tr32_mailbox(tnapi->int_mbox);
10819 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10821 if ((int_mbox != 0) ||
10822 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10827 if (tg3_flag(tp, 57765_PLUS) &&
10828 tnapi->hw_status->status_tag != tnapi->last_tag)
10829 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10834 tg3_disable_ints(tp);
10836 free_irq(tnapi->irq_vec, tnapi);
10838 err = tg3_request_irq(tp, 0);
10844 /* Reenable MSI one shot mode. */
10845 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10846 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10847 tw32(MSGINT_MODE, val);
10855 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10856 * successfully restored
10858 static int tg3_test_msi(struct tg3 *tp)
10863 if (!tg3_flag(tp, USING_MSI))
10866 /* Turn off SERR reporting in case MSI terminates with Master
10869 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10870 pci_write_config_word(tp->pdev, PCI_COMMAND,
10871 pci_cmd & ~PCI_COMMAND_SERR);
10873 err = tg3_test_interrupt(tp);
10875 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10880 /* other failures */
10884 /* MSI test failed, go back to INTx mode */
10885 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10886 "to INTx mode. Please report this failure to the PCI "
10887 "maintainer and include system chipset information\n");
10889 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10891 pci_disable_msi(tp->pdev);
10893 tg3_flag_clear(tp, USING_MSI);
10894 tp->napi[0].irq_vec = tp->pdev->irq;
10896 err = tg3_request_irq(tp, 0);
10900 /* Need to reset the chip because the MSI cycle may have terminated
10901 * with Master Abort.
10903 tg3_full_lock(tp, 1);
10905 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10906 err = tg3_init_hw(tp, 1);
10908 tg3_full_unlock(tp);
10911 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10916 static int tg3_request_firmware(struct tg3 *tp)
10918 const struct tg3_firmware_hdr *fw_hdr;
10920 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10921 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10926 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10928 /* Firmware blob starts with version numbers, followed by
10929 * start address and _full_ length including BSS sections
10930 * (which must be longer than the actual data, of course
10933 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10934 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10935 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10936 tp->fw_len, tp->fw_needed);
10937 release_firmware(tp->fw);
10942 /* We no longer need firmware; we have it. */
10943 tp->fw_needed = NULL;
10947 static u32 tg3_irq_count(struct tg3 *tp)
10949 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10952 /* We want as many rx rings enabled as there are cpus.
10953 * In multiqueue MSI-X mode, the first MSI-X vector
10954 * only deals with link interrupts, etc, so we add
10955 * one to the number of vectors we are requesting.
10957 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10963 static bool tg3_enable_msix(struct tg3 *tp)
10966 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10968 tp->txq_cnt = tp->txq_req;
10969 tp->rxq_cnt = tp->rxq_req;
10971 tp->rxq_cnt = netif_get_num_default_rss_queues();
10972 if (tp->rxq_cnt > tp->rxq_max)
10973 tp->rxq_cnt = tp->rxq_max;
10975 /* Disable multiple TX rings by default. Simple round-robin hardware
10976 * scheduling of the TX rings can cause starvation of rings with
10977 * small packets when other rings have TSO or jumbo packets.
10982 tp->irq_cnt = tg3_irq_count(tp);
10984 for (i = 0; i < tp->irq_max; i++) {
10985 msix_ent[i].entry = i;
10986 msix_ent[i].vector = 0;
10989 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10992 } else if (rc != 0) {
10993 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10995 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10998 tp->rxq_cnt = max(rc - 1, 1);
11000 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11003 for (i = 0; i < tp->irq_max; i++)
11004 tp->napi[i].irq_vec = msix_ent[i].vector;
11006 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11007 pci_disable_msix(tp->pdev);
11011 if (tp->irq_cnt == 1)
11014 tg3_flag_set(tp, ENABLE_RSS);
11016 if (tp->txq_cnt > 1)
11017 tg3_flag_set(tp, ENABLE_TSS);
11019 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11024 static void tg3_ints_init(struct tg3 *tp)
11026 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11027 !tg3_flag(tp, TAGGED_STATUS)) {
11028 /* All MSI supporting chips should support tagged
11029 * status. Assert that this is the case.
11031 netdev_warn(tp->dev,
11032 "MSI without TAGGED_STATUS? Not using MSI\n");
11036 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11037 tg3_flag_set(tp, USING_MSIX);
11038 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11039 tg3_flag_set(tp, USING_MSI);
11041 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11042 u32 msi_mode = tr32(MSGINT_MODE);
11043 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11044 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11045 if (!tg3_flag(tp, 1SHOT_MSI))
11046 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11047 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11050 if (!tg3_flag(tp, USING_MSIX)) {
11052 tp->napi[0].irq_vec = tp->pdev->irq;
11055 if (tp->irq_cnt == 1) {
11058 netif_set_real_num_tx_queues(tp->dev, 1);
11059 netif_set_real_num_rx_queues(tp->dev, 1);
11063 static void tg3_ints_fini(struct tg3 *tp)
11065 if (tg3_flag(tp, USING_MSIX))
11066 pci_disable_msix(tp->pdev);
11067 else if (tg3_flag(tp, USING_MSI))
11068 pci_disable_msi(tp->pdev);
11069 tg3_flag_clear(tp, USING_MSI);
11070 tg3_flag_clear(tp, USING_MSIX);
11071 tg3_flag_clear(tp, ENABLE_RSS);
11072 tg3_flag_clear(tp, ENABLE_TSS);
11075 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11078 struct net_device *dev = tp->dev;
11082 * Setup interrupts first so we know how
11083 * many NAPI resources to allocate
11087 tg3_rss_check_indir_tbl(tp);
11089 /* The placement of this call is tied
11090 * to the setup and use of Host TX descriptors.
11092 err = tg3_alloc_consistent(tp);
11098 tg3_napi_enable(tp);
11100 for (i = 0; i < tp->irq_cnt; i++) {
11101 struct tg3_napi *tnapi = &tp->napi[i];
11102 err = tg3_request_irq(tp, i);
11104 for (i--; i >= 0; i--) {
11105 tnapi = &tp->napi[i];
11106 free_irq(tnapi->irq_vec, tnapi);
11112 tg3_full_lock(tp, 0);
11114 err = tg3_init_hw(tp, reset_phy);
11116 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11117 tg3_free_rings(tp);
11120 tg3_full_unlock(tp);
11125 if (test_irq && tg3_flag(tp, USING_MSI)) {
11126 err = tg3_test_msi(tp);
11129 tg3_full_lock(tp, 0);
11130 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11131 tg3_free_rings(tp);
11132 tg3_full_unlock(tp);
11137 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11138 u32 val = tr32(PCIE_TRANSACTION_CFG);
11140 tw32(PCIE_TRANSACTION_CFG,
11141 val | PCIE_TRANS_CFG_1SHOT_MSI);
11147 tg3_hwmon_open(tp);
11149 tg3_full_lock(tp, 0);
11151 tg3_timer_start(tp);
11152 tg3_flag_set(tp, INIT_COMPLETE);
11153 tg3_enable_ints(tp);
11158 tg3_ptp_resume(tp);
11161 tg3_full_unlock(tp);
11163 netif_tx_start_all_queues(dev);
11166 * Reset loopback feature if it was turned on while the device was down
11167 * make sure that it's installed properly now.
11169 if (dev->features & NETIF_F_LOOPBACK)
11170 tg3_set_loopback(dev, dev->features);
11175 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11176 struct tg3_napi *tnapi = &tp->napi[i];
11177 free_irq(tnapi->irq_vec, tnapi);
11181 tg3_napi_disable(tp);
11183 tg3_free_consistent(tp);
11191 static void tg3_stop(struct tg3 *tp)
11195 tg3_reset_task_cancel(tp);
11196 tg3_netif_stop(tp);
11198 tg3_timer_stop(tp);
11200 tg3_hwmon_close(tp);
11204 tg3_full_lock(tp, 1);
11206 tg3_disable_ints(tp);
11208 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11209 tg3_free_rings(tp);
11210 tg3_flag_clear(tp, INIT_COMPLETE);
11212 tg3_full_unlock(tp);
11214 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11215 struct tg3_napi *tnapi = &tp->napi[i];
11216 free_irq(tnapi->irq_vec, tnapi);
11223 tg3_free_consistent(tp);
11226 static int tg3_open(struct net_device *dev)
11228 struct tg3 *tp = netdev_priv(dev);
11231 if (tp->fw_needed) {
11232 err = tg3_request_firmware(tp);
11233 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11235 netdev_warn(tp->dev, "EEE capability disabled\n");
11236 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11237 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11238 netdev_warn(tp->dev, "EEE capability restored\n");
11239 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11241 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11245 netdev_warn(tp->dev, "TSO capability disabled\n");
11246 tg3_flag_clear(tp, TSO_CAPABLE);
11247 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11248 netdev_notice(tp->dev, "TSO capability restored\n");
11249 tg3_flag_set(tp, TSO_CAPABLE);
11253 tg3_carrier_off(tp);
11255 err = tg3_power_up(tp);
11259 tg3_full_lock(tp, 0);
11261 tg3_disable_ints(tp);
11262 tg3_flag_clear(tp, INIT_COMPLETE);
11264 tg3_full_unlock(tp);
11266 err = tg3_start(tp,
11267 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11270 tg3_frob_aux_power(tp, false);
11271 pci_set_power_state(tp->pdev, PCI_D3hot);
11274 if (tg3_flag(tp, PTP_CAPABLE)) {
11275 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11277 if (IS_ERR(tp->ptp_clock))
11278 tp->ptp_clock = NULL;
11284 static int tg3_close(struct net_device *dev)
11286 struct tg3 *tp = netdev_priv(dev);
11292 /* Clear stats across close / open calls */
11293 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11294 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11296 tg3_power_down(tp);
11298 tg3_carrier_off(tp);
11303 static inline u64 get_stat64(tg3_stat64_t *val)
11305 return ((u64)val->high << 32) | ((u64)val->low);
11308 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11310 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11312 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11313 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11314 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11317 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11318 tg3_writephy(tp, MII_TG3_TEST1,
11319 val | MII_TG3_TEST1_CRC_EN);
11320 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11324 tp->phy_crc_errors += val;
11326 return tp->phy_crc_errors;
11329 return get_stat64(&hw_stats->rx_fcs_errors);
11332 #define ESTAT_ADD(member) \
11333 estats->member = old_estats->member + \
11334 get_stat64(&hw_stats->member)
11336 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11338 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11339 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11341 ESTAT_ADD(rx_octets);
11342 ESTAT_ADD(rx_fragments);
11343 ESTAT_ADD(rx_ucast_packets);
11344 ESTAT_ADD(rx_mcast_packets);
11345 ESTAT_ADD(rx_bcast_packets);
11346 ESTAT_ADD(rx_fcs_errors);
11347 ESTAT_ADD(rx_align_errors);
11348 ESTAT_ADD(rx_xon_pause_rcvd);
11349 ESTAT_ADD(rx_xoff_pause_rcvd);
11350 ESTAT_ADD(rx_mac_ctrl_rcvd);
11351 ESTAT_ADD(rx_xoff_entered);
11352 ESTAT_ADD(rx_frame_too_long_errors);
11353 ESTAT_ADD(rx_jabbers);
11354 ESTAT_ADD(rx_undersize_packets);
11355 ESTAT_ADD(rx_in_length_errors);
11356 ESTAT_ADD(rx_out_length_errors);
11357 ESTAT_ADD(rx_64_or_less_octet_packets);
11358 ESTAT_ADD(rx_65_to_127_octet_packets);
11359 ESTAT_ADD(rx_128_to_255_octet_packets);
11360 ESTAT_ADD(rx_256_to_511_octet_packets);
11361 ESTAT_ADD(rx_512_to_1023_octet_packets);
11362 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11363 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11364 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11365 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11366 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11368 ESTAT_ADD(tx_octets);
11369 ESTAT_ADD(tx_collisions);
11370 ESTAT_ADD(tx_xon_sent);
11371 ESTAT_ADD(tx_xoff_sent);
11372 ESTAT_ADD(tx_flow_control);
11373 ESTAT_ADD(tx_mac_errors);
11374 ESTAT_ADD(tx_single_collisions);
11375 ESTAT_ADD(tx_mult_collisions);
11376 ESTAT_ADD(tx_deferred);
11377 ESTAT_ADD(tx_excessive_collisions);
11378 ESTAT_ADD(tx_late_collisions);
11379 ESTAT_ADD(tx_collide_2times);
11380 ESTAT_ADD(tx_collide_3times);
11381 ESTAT_ADD(tx_collide_4times);
11382 ESTAT_ADD(tx_collide_5times);
11383 ESTAT_ADD(tx_collide_6times);
11384 ESTAT_ADD(tx_collide_7times);
11385 ESTAT_ADD(tx_collide_8times);
11386 ESTAT_ADD(tx_collide_9times);
11387 ESTAT_ADD(tx_collide_10times);
11388 ESTAT_ADD(tx_collide_11times);
11389 ESTAT_ADD(tx_collide_12times);
11390 ESTAT_ADD(tx_collide_13times);
11391 ESTAT_ADD(tx_collide_14times);
11392 ESTAT_ADD(tx_collide_15times);
11393 ESTAT_ADD(tx_ucast_packets);
11394 ESTAT_ADD(tx_mcast_packets);
11395 ESTAT_ADD(tx_bcast_packets);
11396 ESTAT_ADD(tx_carrier_sense_errors);
11397 ESTAT_ADD(tx_discards);
11398 ESTAT_ADD(tx_errors);
11400 ESTAT_ADD(dma_writeq_full);
11401 ESTAT_ADD(dma_write_prioq_full);
11402 ESTAT_ADD(rxbds_empty);
11403 ESTAT_ADD(rx_discards);
11404 ESTAT_ADD(rx_errors);
11405 ESTAT_ADD(rx_threshold_hit);
11407 ESTAT_ADD(dma_readq_full);
11408 ESTAT_ADD(dma_read_prioq_full);
11409 ESTAT_ADD(tx_comp_queue_full);
11411 ESTAT_ADD(ring_set_send_prod_index);
11412 ESTAT_ADD(ring_status_update);
11413 ESTAT_ADD(nic_irqs);
11414 ESTAT_ADD(nic_avoided_irqs);
11415 ESTAT_ADD(nic_tx_threshold_hit);
11417 ESTAT_ADD(mbuf_lwm_thresh_hit);
11420 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11422 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11423 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11425 stats->rx_packets = old_stats->rx_packets +
11426 get_stat64(&hw_stats->rx_ucast_packets) +
11427 get_stat64(&hw_stats->rx_mcast_packets) +
11428 get_stat64(&hw_stats->rx_bcast_packets);
11430 stats->tx_packets = old_stats->tx_packets +
11431 get_stat64(&hw_stats->tx_ucast_packets) +
11432 get_stat64(&hw_stats->tx_mcast_packets) +
11433 get_stat64(&hw_stats->tx_bcast_packets);
11435 stats->rx_bytes = old_stats->rx_bytes +
11436 get_stat64(&hw_stats->rx_octets);
11437 stats->tx_bytes = old_stats->tx_bytes +
11438 get_stat64(&hw_stats->tx_octets);
11440 stats->rx_errors = old_stats->rx_errors +
11441 get_stat64(&hw_stats->rx_errors);
11442 stats->tx_errors = old_stats->tx_errors +
11443 get_stat64(&hw_stats->tx_errors) +
11444 get_stat64(&hw_stats->tx_mac_errors) +
11445 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11446 get_stat64(&hw_stats->tx_discards);
11448 stats->multicast = old_stats->multicast +
11449 get_stat64(&hw_stats->rx_mcast_packets);
11450 stats->collisions = old_stats->collisions +
11451 get_stat64(&hw_stats->tx_collisions);
11453 stats->rx_length_errors = old_stats->rx_length_errors +
11454 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11455 get_stat64(&hw_stats->rx_undersize_packets);
11457 stats->rx_over_errors = old_stats->rx_over_errors +
11458 get_stat64(&hw_stats->rxbds_empty);
11459 stats->rx_frame_errors = old_stats->rx_frame_errors +
11460 get_stat64(&hw_stats->rx_align_errors);
11461 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11462 get_stat64(&hw_stats->tx_discards);
11463 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11464 get_stat64(&hw_stats->tx_carrier_sense_errors);
11466 stats->rx_crc_errors = old_stats->rx_crc_errors +
11467 tg3_calc_crc_errors(tp);
11469 stats->rx_missed_errors = old_stats->rx_missed_errors +
11470 get_stat64(&hw_stats->rx_discards);
11472 stats->rx_dropped = tp->rx_dropped;
11473 stats->tx_dropped = tp->tx_dropped;
11476 static int tg3_get_regs_len(struct net_device *dev)
11478 return TG3_REG_BLK_SIZE;
11481 static void tg3_get_regs(struct net_device *dev,
11482 struct ethtool_regs *regs, void *_p)
11484 struct tg3 *tp = netdev_priv(dev);
11488 memset(_p, 0, TG3_REG_BLK_SIZE);
11490 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11493 tg3_full_lock(tp, 0);
11495 tg3_dump_legacy_regs(tp, (u32 *)_p);
11497 tg3_full_unlock(tp);
11500 static int tg3_get_eeprom_len(struct net_device *dev)
11502 struct tg3 *tp = netdev_priv(dev);
11504 return tp->nvram_size;
11507 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11509 struct tg3 *tp = netdev_priv(dev);
11512 u32 i, offset, len, b_offset, b_count;
11515 if (tg3_flag(tp, NO_NVRAM))
11518 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11521 offset = eeprom->offset;
11525 eeprom->magic = TG3_EEPROM_MAGIC;
11528 /* adjustments to start on required 4 byte boundary */
11529 b_offset = offset & 3;
11530 b_count = 4 - b_offset;
11531 if (b_count > len) {
11532 /* i.e. offset=1 len=2 */
11535 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11538 memcpy(data, ((char *)&val) + b_offset, b_count);
11541 eeprom->len += b_count;
11544 /* read bytes up to the last 4 byte boundary */
11545 pd = &data[eeprom->len];
11546 for (i = 0; i < (len - (len & 3)); i += 4) {
11547 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11552 memcpy(pd + i, &val, 4);
11557 /* read last bytes not ending on 4 byte boundary */
11558 pd = &data[eeprom->len];
11560 b_offset = offset + len - b_count;
11561 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11564 memcpy(pd, &val, b_count);
11565 eeprom->len += b_count;
11570 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11572 struct tg3 *tp = netdev_priv(dev);
11574 u32 offset, len, b_offset, odd_len;
11578 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11581 if (tg3_flag(tp, NO_NVRAM) ||
11582 eeprom->magic != TG3_EEPROM_MAGIC)
11585 offset = eeprom->offset;
11588 if ((b_offset = (offset & 3))) {
11589 /* adjustments to start on required 4 byte boundary */
11590 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11601 /* adjustments to end on required 4 byte boundary */
11603 len = (len + 3) & ~3;
11604 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11610 if (b_offset || odd_len) {
11611 buf = kmalloc(len, GFP_KERNEL);
11615 memcpy(buf, &start, 4);
11617 memcpy(buf+len-4, &end, 4);
11618 memcpy(buf + b_offset, data, eeprom->len);
11621 ret = tg3_nvram_write_block(tp, offset, len, buf);
11629 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11631 struct tg3 *tp = netdev_priv(dev);
11633 if (tg3_flag(tp, USE_PHYLIB)) {
11634 struct phy_device *phydev;
11635 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11637 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11638 return phy_ethtool_gset(phydev, cmd);
11641 cmd->supported = (SUPPORTED_Autoneg);
11643 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11644 cmd->supported |= (SUPPORTED_1000baseT_Half |
11645 SUPPORTED_1000baseT_Full);
11647 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11648 cmd->supported |= (SUPPORTED_100baseT_Half |
11649 SUPPORTED_100baseT_Full |
11650 SUPPORTED_10baseT_Half |
11651 SUPPORTED_10baseT_Full |
11653 cmd->port = PORT_TP;
11655 cmd->supported |= SUPPORTED_FIBRE;
11656 cmd->port = PORT_FIBRE;
11659 cmd->advertising = tp->link_config.advertising;
11660 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11661 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11662 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11663 cmd->advertising |= ADVERTISED_Pause;
11665 cmd->advertising |= ADVERTISED_Pause |
11666 ADVERTISED_Asym_Pause;
11668 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11669 cmd->advertising |= ADVERTISED_Asym_Pause;
11672 if (netif_running(dev) && tp->link_up) {
11673 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11674 cmd->duplex = tp->link_config.active_duplex;
11675 cmd->lp_advertising = tp->link_config.rmt_adv;
11676 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11677 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11678 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11680 cmd->eth_tp_mdix = ETH_TP_MDI;
11683 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11684 cmd->duplex = DUPLEX_UNKNOWN;
11685 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11687 cmd->phy_address = tp->phy_addr;
11688 cmd->transceiver = XCVR_INTERNAL;
11689 cmd->autoneg = tp->link_config.autoneg;
11695 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11697 struct tg3 *tp = netdev_priv(dev);
11698 u32 speed = ethtool_cmd_speed(cmd);
11700 if (tg3_flag(tp, USE_PHYLIB)) {
11701 struct phy_device *phydev;
11702 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11704 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11705 return phy_ethtool_sset(phydev, cmd);
11708 if (cmd->autoneg != AUTONEG_ENABLE &&
11709 cmd->autoneg != AUTONEG_DISABLE)
11712 if (cmd->autoneg == AUTONEG_DISABLE &&
11713 cmd->duplex != DUPLEX_FULL &&
11714 cmd->duplex != DUPLEX_HALF)
11717 if (cmd->autoneg == AUTONEG_ENABLE) {
11718 u32 mask = ADVERTISED_Autoneg |
11720 ADVERTISED_Asym_Pause;
11722 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11723 mask |= ADVERTISED_1000baseT_Half |
11724 ADVERTISED_1000baseT_Full;
11726 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11727 mask |= ADVERTISED_100baseT_Half |
11728 ADVERTISED_100baseT_Full |
11729 ADVERTISED_10baseT_Half |
11730 ADVERTISED_10baseT_Full |
11733 mask |= ADVERTISED_FIBRE;
11735 if (cmd->advertising & ~mask)
11738 mask &= (ADVERTISED_1000baseT_Half |
11739 ADVERTISED_1000baseT_Full |
11740 ADVERTISED_100baseT_Half |
11741 ADVERTISED_100baseT_Full |
11742 ADVERTISED_10baseT_Half |
11743 ADVERTISED_10baseT_Full);
11745 cmd->advertising &= mask;
11747 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11748 if (speed != SPEED_1000)
11751 if (cmd->duplex != DUPLEX_FULL)
11754 if (speed != SPEED_100 &&
11760 tg3_full_lock(tp, 0);
11762 tp->link_config.autoneg = cmd->autoneg;
11763 if (cmd->autoneg == AUTONEG_ENABLE) {
11764 tp->link_config.advertising = (cmd->advertising |
11765 ADVERTISED_Autoneg);
11766 tp->link_config.speed = SPEED_UNKNOWN;
11767 tp->link_config.duplex = DUPLEX_UNKNOWN;
11769 tp->link_config.advertising = 0;
11770 tp->link_config.speed = speed;
11771 tp->link_config.duplex = cmd->duplex;
11774 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11776 tg3_warn_mgmt_link_flap(tp);
11778 if (netif_running(dev))
11779 tg3_setup_phy(tp, 1);
11781 tg3_full_unlock(tp);
11786 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11788 struct tg3 *tp = netdev_priv(dev);
11790 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11791 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11792 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11793 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11796 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11798 struct tg3 *tp = netdev_priv(dev);
11800 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11801 wol->supported = WAKE_MAGIC;
11803 wol->supported = 0;
11805 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11806 wol->wolopts = WAKE_MAGIC;
11807 memset(&wol->sopass, 0, sizeof(wol->sopass));
11810 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11812 struct tg3 *tp = netdev_priv(dev);
11813 struct device *dp = &tp->pdev->dev;
11815 if (wol->wolopts & ~WAKE_MAGIC)
11817 if ((wol->wolopts & WAKE_MAGIC) &&
11818 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11821 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11823 spin_lock_bh(&tp->lock);
11824 if (device_may_wakeup(dp))
11825 tg3_flag_set(tp, WOL_ENABLE);
11827 tg3_flag_clear(tp, WOL_ENABLE);
11828 spin_unlock_bh(&tp->lock);
11833 static u32 tg3_get_msglevel(struct net_device *dev)
11835 struct tg3 *tp = netdev_priv(dev);
11836 return tp->msg_enable;
11839 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11841 struct tg3 *tp = netdev_priv(dev);
11842 tp->msg_enable = value;
11845 static int tg3_nway_reset(struct net_device *dev)
11847 struct tg3 *tp = netdev_priv(dev);
11850 if (!netif_running(dev))
11853 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11856 tg3_warn_mgmt_link_flap(tp);
11858 if (tg3_flag(tp, USE_PHYLIB)) {
11859 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11861 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11865 spin_lock_bh(&tp->lock);
11867 tg3_readphy(tp, MII_BMCR, &bmcr);
11868 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11869 ((bmcr & BMCR_ANENABLE) ||
11870 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11871 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11875 spin_unlock_bh(&tp->lock);
11881 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11883 struct tg3 *tp = netdev_priv(dev);
11885 ering->rx_max_pending = tp->rx_std_ring_mask;
11886 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11887 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11889 ering->rx_jumbo_max_pending = 0;
11891 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11893 ering->rx_pending = tp->rx_pending;
11894 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11895 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11897 ering->rx_jumbo_pending = 0;
11899 ering->tx_pending = tp->napi[0].tx_pending;
11902 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11904 struct tg3 *tp = netdev_priv(dev);
11905 int i, irq_sync = 0, err = 0;
11907 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11908 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11909 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11910 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11911 (tg3_flag(tp, TSO_BUG) &&
11912 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11915 if (netif_running(dev)) {
11917 tg3_netif_stop(tp);
11921 tg3_full_lock(tp, irq_sync);
11923 tp->rx_pending = ering->rx_pending;
11925 if (tg3_flag(tp, MAX_RXPEND_64) &&
11926 tp->rx_pending > 63)
11927 tp->rx_pending = 63;
11928 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11930 for (i = 0; i < tp->irq_max; i++)
11931 tp->napi[i].tx_pending = ering->tx_pending;
11933 if (netif_running(dev)) {
11934 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11935 err = tg3_restart_hw(tp, 0);
11937 tg3_netif_start(tp);
11940 tg3_full_unlock(tp);
11942 if (irq_sync && !err)
11948 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11950 struct tg3 *tp = netdev_priv(dev);
11952 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11954 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11955 epause->rx_pause = 1;
11957 epause->rx_pause = 0;
11959 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11960 epause->tx_pause = 1;
11962 epause->tx_pause = 0;
11965 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11967 struct tg3 *tp = netdev_priv(dev);
11970 if (tp->link_config.autoneg == AUTONEG_ENABLE)
11971 tg3_warn_mgmt_link_flap(tp);
11973 if (tg3_flag(tp, USE_PHYLIB)) {
11975 struct phy_device *phydev;
11977 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11979 if (!(phydev->supported & SUPPORTED_Pause) ||
11980 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11981 (epause->rx_pause != epause->tx_pause)))
11984 tp->link_config.flowctrl = 0;
11985 if (epause->rx_pause) {
11986 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11988 if (epause->tx_pause) {
11989 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11990 newadv = ADVERTISED_Pause;
11992 newadv = ADVERTISED_Pause |
11993 ADVERTISED_Asym_Pause;
11994 } else if (epause->tx_pause) {
11995 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11996 newadv = ADVERTISED_Asym_Pause;
12000 if (epause->autoneg)
12001 tg3_flag_set(tp, PAUSE_AUTONEG);
12003 tg3_flag_clear(tp, PAUSE_AUTONEG);
12005 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12006 u32 oldadv = phydev->advertising &
12007 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12008 if (oldadv != newadv) {
12009 phydev->advertising &=
12010 ~(ADVERTISED_Pause |
12011 ADVERTISED_Asym_Pause);
12012 phydev->advertising |= newadv;
12013 if (phydev->autoneg) {
12015 * Always renegotiate the link to
12016 * inform our link partner of our
12017 * flow control settings, even if the
12018 * flow control is forced. Let
12019 * tg3_adjust_link() do the final
12020 * flow control setup.
12022 return phy_start_aneg(phydev);
12026 if (!epause->autoneg)
12027 tg3_setup_flow_control(tp, 0, 0);
12029 tp->link_config.advertising &=
12030 ~(ADVERTISED_Pause |
12031 ADVERTISED_Asym_Pause);
12032 tp->link_config.advertising |= newadv;
12037 if (netif_running(dev)) {
12038 tg3_netif_stop(tp);
12042 tg3_full_lock(tp, irq_sync);
12044 if (epause->autoneg)
12045 tg3_flag_set(tp, PAUSE_AUTONEG);
12047 tg3_flag_clear(tp, PAUSE_AUTONEG);
12048 if (epause->rx_pause)
12049 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12051 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12052 if (epause->tx_pause)
12053 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12055 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12057 if (netif_running(dev)) {
12058 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12059 err = tg3_restart_hw(tp, 0);
12061 tg3_netif_start(tp);
12064 tg3_full_unlock(tp);
12067 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12072 static int tg3_get_sset_count(struct net_device *dev, int sset)
12076 return TG3_NUM_TEST;
12078 return TG3_NUM_STATS;
12080 return -EOPNOTSUPP;
12084 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12085 u32 *rules __always_unused)
12087 struct tg3 *tp = netdev_priv(dev);
12089 if (!tg3_flag(tp, SUPPORT_MSIX))
12090 return -EOPNOTSUPP;
12092 switch (info->cmd) {
12093 case ETHTOOL_GRXRINGS:
12094 if (netif_running(tp->dev))
12095 info->data = tp->rxq_cnt;
12097 info->data = num_online_cpus();
12098 if (info->data > TG3_RSS_MAX_NUM_QS)
12099 info->data = TG3_RSS_MAX_NUM_QS;
12102 /* The first interrupt vector only
12103 * handles link interrupts.
12109 return -EOPNOTSUPP;
12113 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12116 struct tg3 *tp = netdev_priv(dev);
12118 if (tg3_flag(tp, SUPPORT_MSIX))
12119 size = TG3_RSS_INDIR_TBL_SIZE;
12124 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12126 struct tg3 *tp = netdev_priv(dev);
12129 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12130 indir[i] = tp->rss_ind_tbl[i];
12135 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12137 struct tg3 *tp = netdev_priv(dev);
12140 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12141 tp->rss_ind_tbl[i] = indir[i];
12143 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12146 /* It is legal to write the indirection
12147 * table while the device is running.
12149 tg3_full_lock(tp, 0);
12150 tg3_rss_write_indir_tbl(tp);
12151 tg3_full_unlock(tp);
12156 static void tg3_get_channels(struct net_device *dev,
12157 struct ethtool_channels *channel)
12159 struct tg3 *tp = netdev_priv(dev);
12160 u32 deflt_qs = netif_get_num_default_rss_queues();
12162 channel->max_rx = tp->rxq_max;
12163 channel->max_tx = tp->txq_max;
12165 if (netif_running(dev)) {
12166 channel->rx_count = tp->rxq_cnt;
12167 channel->tx_count = tp->txq_cnt;
12170 channel->rx_count = tp->rxq_req;
12172 channel->rx_count = min(deflt_qs, tp->rxq_max);
12175 channel->tx_count = tp->txq_req;
12177 channel->tx_count = min(deflt_qs, tp->txq_max);
12181 static int tg3_set_channels(struct net_device *dev,
12182 struct ethtool_channels *channel)
12184 struct tg3 *tp = netdev_priv(dev);
12186 if (!tg3_flag(tp, SUPPORT_MSIX))
12187 return -EOPNOTSUPP;
12189 if (channel->rx_count > tp->rxq_max ||
12190 channel->tx_count > tp->txq_max)
12193 tp->rxq_req = channel->rx_count;
12194 tp->txq_req = channel->tx_count;
12196 if (!netif_running(dev))
12201 tg3_carrier_off(tp);
12203 tg3_start(tp, true, false, false);
12208 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12210 switch (stringset) {
12212 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12215 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12218 WARN_ON(1); /* we need a WARN() */
12223 static int tg3_set_phys_id(struct net_device *dev,
12224 enum ethtool_phys_id_state state)
12226 struct tg3 *tp = netdev_priv(dev);
12228 if (!netif_running(tp->dev))
12232 case ETHTOOL_ID_ACTIVE:
12233 return 1; /* cycle on/off once per second */
12235 case ETHTOOL_ID_ON:
12236 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12237 LED_CTRL_1000MBPS_ON |
12238 LED_CTRL_100MBPS_ON |
12239 LED_CTRL_10MBPS_ON |
12240 LED_CTRL_TRAFFIC_OVERRIDE |
12241 LED_CTRL_TRAFFIC_BLINK |
12242 LED_CTRL_TRAFFIC_LED);
12245 case ETHTOOL_ID_OFF:
12246 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12247 LED_CTRL_TRAFFIC_OVERRIDE);
12250 case ETHTOOL_ID_INACTIVE:
12251 tw32(MAC_LED_CTRL, tp->led_ctrl);
12258 static void tg3_get_ethtool_stats(struct net_device *dev,
12259 struct ethtool_stats *estats, u64 *tmp_stats)
12261 struct tg3 *tp = netdev_priv(dev);
12264 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12266 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12269 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12273 u32 offset = 0, len = 0;
12276 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12279 if (magic == TG3_EEPROM_MAGIC) {
12280 for (offset = TG3_NVM_DIR_START;
12281 offset < TG3_NVM_DIR_END;
12282 offset += TG3_NVM_DIRENT_SIZE) {
12283 if (tg3_nvram_read(tp, offset, &val))
12286 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12287 TG3_NVM_DIRTYPE_EXTVPD)
12291 if (offset != TG3_NVM_DIR_END) {
12292 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12293 if (tg3_nvram_read(tp, offset + 4, &offset))
12296 offset = tg3_nvram_logical_addr(tp, offset);
12300 if (!offset || !len) {
12301 offset = TG3_NVM_VPD_OFF;
12302 len = TG3_NVM_VPD_LEN;
12305 buf = kmalloc(len, GFP_KERNEL);
12309 if (magic == TG3_EEPROM_MAGIC) {
12310 for (i = 0; i < len; i += 4) {
12311 /* The data is in little-endian format in NVRAM.
12312 * Use the big-endian read routines to preserve
12313 * the byte order as it exists in NVRAM.
12315 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12321 unsigned int pos = 0;
12323 ptr = (u8 *)&buf[0];
12324 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12325 cnt = pci_read_vpd(tp->pdev, pos,
12327 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12345 #define NVRAM_TEST_SIZE 0x100
12346 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12347 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12348 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12349 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12350 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12351 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12352 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12353 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12355 static int tg3_test_nvram(struct tg3 *tp)
12357 u32 csum, magic, len;
12359 int i, j, k, err = 0, size;
12361 if (tg3_flag(tp, NO_NVRAM))
12364 if (tg3_nvram_read(tp, 0, &magic) != 0)
12367 if (magic == TG3_EEPROM_MAGIC)
12368 size = NVRAM_TEST_SIZE;
12369 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12370 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12371 TG3_EEPROM_SB_FORMAT_1) {
12372 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12373 case TG3_EEPROM_SB_REVISION_0:
12374 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12376 case TG3_EEPROM_SB_REVISION_2:
12377 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12379 case TG3_EEPROM_SB_REVISION_3:
12380 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12382 case TG3_EEPROM_SB_REVISION_4:
12383 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12385 case TG3_EEPROM_SB_REVISION_5:
12386 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12388 case TG3_EEPROM_SB_REVISION_6:
12389 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12396 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12397 size = NVRAM_SELFBOOT_HW_SIZE;
12401 buf = kmalloc(size, GFP_KERNEL);
12406 for (i = 0, j = 0; i < size; i += 4, j++) {
12407 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12414 /* Selfboot format */
12415 magic = be32_to_cpu(buf[0]);
12416 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12417 TG3_EEPROM_MAGIC_FW) {
12418 u8 *buf8 = (u8 *) buf, csum8 = 0;
12420 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12421 TG3_EEPROM_SB_REVISION_2) {
12422 /* For rev 2, the csum doesn't include the MBA. */
12423 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12425 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12428 for (i = 0; i < size; i++)
12441 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12442 TG3_EEPROM_MAGIC_HW) {
12443 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12444 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12445 u8 *buf8 = (u8 *) buf;
12447 /* Separate the parity bits and the data bytes. */
12448 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12449 if ((i == 0) || (i == 8)) {
12453 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12454 parity[k++] = buf8[i] & msk;
12456 } else if (i == 16) {
12460 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12461 parity[k++] = buf8[i] & msk;
12464 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12465 parity[k++] = buf8[i] & msk;
12468 data[j++] = buf8[i];
12472 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12473 u8 hw8 = hweight8(data[i]);
12475 if ((hw8 & 0x1) && parity[i])
12477 else if (!(hw8 & 0x1) && !parity[i])
12486 /* Bootstrap checksum at offset 0x10 */
12487 csum = calc_crc((unsigned char *) buf, 0x10);
12488 if (csum != le32_to_cpu(buf[0x10/4]))
12491 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12492 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12493 if (csum != le32_to_cpu(buf[0xfc/4]))
12498 buf = tg3_vpd_readblock(tp, &len);
12502 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12504 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12508 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12511 i += PCI_VPD_LRDT_TAG_SIZE;
12512 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12513 PCI_VPD_RO_KEYWORD_CHKSUM);
12517 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12519 for (i = 0; i <= j; i++)
12520 csum8 += ((u8 *)buf)[i];
12534 #define TG3_SERDES_TIMEOUT_SEC 2
12535 #define TG3_COPPER_TIMEOUT_SEC 6
12537 static int tg3_test_link(struct tg3 *tp)
12541 if (!netif_running(tp->dev))
12544 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12545 max = TG3_SERDES_TIMEOUT_SEC;
12547 max = TG3_COPPER_TIMEOUT_SEC;
12549 for (i = 0; i < max; i++) {
12553 if (msleep_interruptible(1000))
12560 /* Only test the commonly used registers */
12561 static int tg3_test_registers(struct tg3 *tp)
12563 int i, is_5705, is_5750;
12564 u32 offset, read_mask, write_mask, val, save_val, read_val;
12568 #define TG3_FL_5705 0x1
12569 #define TG3_FL_NOT_5705 0x2
12570 #define TG3_FL_NOT_5788 0x4
12571 #define TG3_FL_NOT_5750 0x8
12575 /* MAC Control Registers */
12576 { MAC_MODE, TG3_FL_NOT_5705,
12577 0x00000000, 0x00ef6f8c },
12578 { MAC_MODE, TG3_FL_5705,
12579 0x00000000, 0x01ef6b8c },
12580 { MAC_STATUS, TG3_FL_NOT_5705,
12581 0x03800107, 0x00000000 },
12582 { MAC_STATUS, TG3_FL_5705,
12583 0x03800100, 0x00000000 },
12584 { MAC_ADDR_0_HIGH, 0x0000,
12585 0x00000000, 0x0000ffff },
12586 { MAC_ADDR_0_LOW, 0x0000,
12587 0x00000000, 0xffffffff },
12588 { MAC_RX_MTU_SIZE, 0x0000,
12589 0x00000000, 0x0000ffff },
12590 { MAC_TX_MODE, 0x0000,
12591 0x00000000, 0x00000070 },
12592 { MAC_TX_LENGTHS, 0x0000,
12593 0x00000000, 0x00003fff },
12594 { MAC_RX_MODE, TG3_FL_NOT_5705,
12595 0x00000000, 0x000007fc },
12596 { MAC_RX_MODE, TG3_FL_5705,
12597 0x00000000, 0x000007dc },
12598 { MAC_HASH_REG_0, 0x0000,
12599 0x00000000, 0xffffffff },
12600 { MAC_HASH_REG_1, 0x0000,
12601 0x00000000, 0xffffffff },
12602 { MAC_HASH_REG_2, 0x0000,
12603 0x00000000, 0xffffffff },
12604 { MAC_HASH_REG_3, 0x0000,
12605 0x00000000, 0xffffffff },
12607 /* Receive Data and Receive BD Initiator Control Registers. */
12608 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12609 0x00000000, 0xffffffff },
12610 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12611 0x00000000, 0xffffffff },
12612 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12613 0x00000000, 0x00000003 },
12614 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12615 0x00000000, 0xffffffff },
12616 { RCVDBDI_STD_BD+0, 0x0000,
12617 0x00000000, 0xffffffff },
12618 { RCVDBDI_STD_BD+4, 0x0000,
12619 0x00000000, 0xffffffff },
12620 { RCVDBDI_STD_BD+8, 0x0000,
12621 0x00000000, 0xffff0002 },
12622 { RCVDBDI_STD_BD+0xc, 0x0000,
12623 0x00000000, 0xffffffff },
12625 /* Receive BD Initiator Control Registers. */
12626 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12627 0x00000000, 0xffffffff },
12628 { RCVBDI_STD_THRESH, TG3_FL_5705,
12629 0x00000000, 0x000003ff },
12630 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12631 0x00000000, 0xffffffff },
12633 /* Host Coalescing Control Registers. */
12634 { HOSTCC_MODE, TG3_FL_NOT_5705,
12635 0x00000000, 0x00000004 },
12636 { HOSTCC_MODE, TG3_FL_5705,
12637 0x00000000, 0x000000f6 },
12638 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12639 0x00000000, 0xffffffff },
12640 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12641 0x00000000, 0x000003ff },
12642 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12643 0x00000000, 0xffffffff },
12644 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12645 0x00000000, 0x000003ff },
12646 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12647 0x00000000, 0xffffffff },
12648 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12649 0x00000000, 0x000000ff },
12650 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12651 0x00000000, 0xffffffff },
12652 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12653 0x00000000, 0x000000ff },
12654 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12655 0x00000000, 0xffffffff },
12656 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12657 0x00000000, 0xffffffff },
12658 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12659 0x00000000, 0xffffffff },
12660 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12661 0x00000000, 0x000000ff },
12662 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12663 0x00000000, 0xffffffff },
12664 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12665 0x00000000, 0x000000ff },
12666 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12667 0x00000000, 0xffffffff },
12668 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12669 0x00000000, 0xffffffff },
12670 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12671 0x00000000, 0xffffffff },
12672 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12673 0x00000000, 0xffffffff },
12674 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12675 0x00000000, 0xffffffff },
12676 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12677 0xffffffff, 0x00000000 },
12678 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12679 0xffffffff, 0x00000000 },
12681 /* Buffer Manager Control Registers. */
12682 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12683 0x00000000, 0x007fff80 },
12684 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12685 0x00000000, 0x007fffff },
12686 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12687 0x00000000, 0x0000003f },
12688 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12689 0x00000000, 0x000001ff },
12690 { BUFMGR_MB_HIGH_WATER, 0x0000,
12691 0x00000000, 0x000001ff },
12692 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12693 0xffffffff, 0x00000000 },
12694 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12695 0xffffffff, 0x00000000 },
12697 /* Mailbox Registers */
12698 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12699 0x00000000, 0x000001ff },
12700 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12701 0x00000000, 0x000001ff },
12702 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12703 0x00000000, 0x000007ff },
12704 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12705 0x00000000, 0x000001ff },
12707 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12710 is_5705 = is_5750 = 0;
12711 if (tg3_flag(tp, 5705_PLUS)) {
12713 if (tg3_flag(tp, 5750_PLUS))
12717 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12718 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12721 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12724 if (tg3_flag(tp, IS_5788) &&
12725 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12728 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12731 offset = (u32) reg_tbl[i].offset;
12732 read_mask = reg_tbl[i].read_mask;
12733 write_mask = reg_tbl[i].write_mask;
12735 /* Save the original register content */
12736 save_val = tr32(offset);
12738 /* Determine the read-only value. */
12739 read_val = save_val & read_mask;
12741 /* Write zero to the register, then make sure the read-only bits
12742 * are not changed and the read/write bits are all zeros.
12746 val = tr32(offset);
12748 /* Test the read-only and read/write bits. */
12749 if (((val & read_mask) != read_val) || (val & write_mask))
12752 /* Write ones to all the bits defined by RdMask and WrMask, then
12753 * make sure the read-only bits are not changed and the
12754 * read/write bits are all ones.
12756 tw32(offset, read_mask | write_mask);
12758 val = tr32(offset);
12760 /* Test the read-only bits. */
12761 if ((val & read_mask) != read_val)
12764 /* Test the read/write bits. */
12765 if ((val & write_mask) != write_mask)
12768 tw32(offset, save_val);
12774 if (netif_msg_hw(tp))
12775 netdev_err(tp->dev,
12776 "Register test failed at offset %x\n", offset);
12777 tw32(offset, save_val);
12781 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12783 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12787 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12788 for (j = 0; j < len; j += 4) {
12791 tg3_write_mem(tp, offset + j, test_pattern[i]);
12792 tg3_read_mem(tp, offset + j, &val);
12793 if (val != test_pattern[i])
12800 static int tg3_test_memory(struct tg3 *tp)
12802 static struct mem_entry {
12805 } mem_tbl_570x[] = {
12806 { 0x00000000, 0x00b50},
12807 { 0x00002000, 0x1c000},
12808 { 0xffffffff, 0x00000}
12809 }, mem_tbl_5705[] = {
12810 { 0x00000100, 0x0000c},
12811 { 0x00000200, 0x00008},
12812 { 0x00004000, 0x00800},
12813 { 0x00006000, 0x01000},
12814 { 0x00008000, 0x02000},
12815 { 0x00010000, 0x0e000},
12816 { 0xffffffff, 0x00000}
12817 }, mem_tbl_5755[] = {
12818 { 0x00000200, 0x00008},
12819 { 0x00004000, 0x00800},
12820 { 0x00006000, 0x00800},
12821 { 0x00008000, 0x02000},
12822 { 0x00010000, 0x0c000},
12823 { 0xffffffff, 0x00000}
12824 }, mem_tbl_5906[] = {
12825 { 0x00000200, 0x00008},
12826 { 0x00004000, 0x00400},
12827 { 0x00006000, 0x00400},
12828 { 0x00008000, 0x01000},
12829 { 0x00010000, 0x01000},
12830 { 0xffffffff, 0x00000}
12831 }, mem_tbl_5717[] = {
12832 { 0x00000200, 0x00008},
12833 { 0x00010000, 0x0a000},
12834 { 0x00020000, 0x13c00},
12835 { 0xffffffff, 0x00000}
12836 }, mem_tbl_57765[] = {
12837 { 0x00000200, 0x00008},
12838 { 0x00004000, 0x00800},
12839 { 0x00006000, 0x09800},
12840 { 0x00010000, 0x0a000},
12841 { 0xffffffff, 0x00000}
12843 struct mem_entry *mem_tbl;
12847 if (tg3_flag(tp, 5717_PLUS))
12848 mem_tbl = mem_tbl_5717;
12849 else if (tg3_flag(tp, 57765_CLASS) ||
12850 tg3_asic_rev(tp) == ASIC_REV_5762)
12851 mem_tbl = mem_tbl_57765;
12852 else if (tg3_flag(tp, 5755_PLUS))
12853 mem_tbl = mem_tbl_5755;
12854 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12855 mem_tbl = mem_tbl_5906;
12856 else if (tg3_flag(tp, 5705_PLUS))
12857 mem_tbl = mem_tbl_5705;
12859 mem_tbl = mem_tbl_570x;
12861 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12862 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12870 #define TG3_TSO_MSS 500
12872 #define TG3_TSO_IP_HDR_LEN 20
12873 #define TG3_TSO_TCP_HDR_LEN 20
12874 #define TG3_TSO_TCP_OPT_LEN 12
12876 static const u8 tg3_tso_header[] = {
12878 0x45, 0x00, 0x00, 0x00,
12879 0x00, 0x00, 0x40, 0x00,
12880 0x40, 0x06, 0x00, 0x00,
12881 0x0a, 0x00, 0x00, 0x01,
12882 0x0a, 0x00, 0x00, 0x02,
12883 0x0d, 0x00, 0xe0, 0x00,
12884 0x00, 0x00, 0x01, 0x00,
12885 0x00, 0x00, 0x02, 0x00,
12886 0x80, 0x10, 0x10, 0x00,
12887 0x14, 0x09, 0x00, 0x00,
12888 0x01, 0x01, 0x08, 0x0a,
12889 0x11, 0x11, 0x11, 0x11,
12890 0x11, 0x11, 0x11, 0x11,
12893 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12895 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12896 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12898 struct sk_buff *skb;
12899 u8 *tx_data, *rx_data;
12901 int num_pkts, tx_len, rx_len, i, err;
12902 struct tg3_rx_buffer_desc *desc;
12903 struct tg3_napi *tnapi, *rnapi;
12904 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12906 tnapi = &tp->napi[0];
12907 rnapi = &tp->napi[0];
12908 if (tp->irq_cnt > 1) {
12909 if (tg3_flag(tp, ENABLE_RSS))
12910 rnapi = &tp->napi[1];
12911 if (tg3_flag(tp, ENABLE_TSS))
12912 tnapi = &tp->napi[1];
12914 coal_now = tnapi->coal_now | rnapi->coal_now;
12919 skb = netdev_alloc_skb(tp->dev, tx_len);
12923 tx_data = skb_put(skb, tx_len);
12924 memcpy(tx_data, tp->dev->dev_addr, 6);
12925 memset(tx_data + 6, 0x0, 8);
12927 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12929 if (tso_loopback) {
12930 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12932 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12933 TG3_TSO_TCP_OPT_LEN;
12935 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12936 sizeof(tg3_tso_header));
12939 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12940 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12942 /* Set the total length field in the IP header */
12943 iph->tot_len = htons((u16)(mss + hdr_len));
12945 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12946 TXD_FLAG_CPU_POST_DMA);
12948 if (tg3_flag(tp, HW_TSO_1) ||
12949 tg3_flag(tp, HW_TSO_2) ||
12950 tg3_flag(tp, HW_TSO_3)) {
12952 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12953 th = (struct tcphdr *)&tx_data[val];
12956 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12958 if (tg3_flag(tp, HW_TSO_3)) {
12959 mss |= (hdr_len & 0xc) << 12;
12960 if (hdr_len & 0x10)
12961 base_flags |= 0x00000010;
12962 base_flags |= (hdr_len & 0x3e0) << 5;
12963 } else if (tg3_flag(tp, HW_TSO_2))
12964 mss |= hdr_len << 9;
12965 else if (tg3_flag(tp, HW_TSO_1) ||
12966 tg3_asic_rev(tp) == ASIC_REV_5705) {
12967 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12969 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12972 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12975 data_off = ETH_HLEN;
12977 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12978 tx_len > VLAN_ETH_FRAME_LEN)
12979 base_flags |= TXD_FLAG_JMB_PKT;
12982 for (i = data_off; i < tx_len; i++)
12983 tx_data[i] = (u8) (i & 0xff);
12985 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12986 if (pci_dma_mapping_error(tp->pdev, map)) {
12987 dev_kfree_skb(skb);
12991 val = tnapi->tx_prod;
12992 tnapi->tx_buffers[val].skb = skb;
12993 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12995 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13000 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13002 budget = tg3_tx_avail(tnapi);
13003 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13004 base_flags | TXD_FLAG_END, mss, 0)) {
13005 tnapi->tx_buffers[val].skb = NULL;
13006 dev_kfree_skb(skb);
13012 /* Sync BD data before updating mailbox */
13015 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13016 tr32_mailbox(tnapi->prodmbox);
13020 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13021 for (i = 0; i < 35; i++) {
13022 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13027 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13028 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13029 if ((tx_idx == tnapi->tx_prod) &&
13030 (rx_idx == (rx_start_idx + num_pkts)))
13034 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13035 dev_kfree_skb(skb);
13037 if (tx_idx != tnapi->tx_prod)
13040 if (rx_idx != rx_start_idx + num_pkts)
13044 while (rx_idx != rx_start_idx) {
13045 desc = &rnapi->rx_rcb[rx_start_idx++];
13046 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13047 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13049 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13050 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13053 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13056 if (!tso_loopback) {
13057 if (rx_len != tx_len)
13060 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13061 if (opaque_key != RXD_OPAQUE_RING_STD)
13064 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13067 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13068 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13069 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13073 if (opaque_key == RXD_OPAQUE_RING_STD) {
13074 rx_data = tpr->rx_std_buffers[desc_idx].data;
13075 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13077 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13078 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13079 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13084 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13085 PCI_DMA_FROMDEVICE);
13087 rx_data += TG3_RX_OFFSET(tp);
13088 for (i = data_off; i < rx_len; i++, val++) {
13089 if (*(rx_data + i) != (u8) (val & 0xff))
13096 /* tg3_free_rings will unmap and free the rx_data */
13101 #define TG3_STD_LOOPBACK_FAILED 1
13102 #define TG3_JMB_LOOPBACK_FAILED 2
13103 #define TG3_TSO_LOOPBACK_FAILED 4
13104 #define TG3_LOOPBACK_FAILED \
13105 (TG3_STD_LOOPBACK_FAILED | \
13106 TG3_JMB_LOOPBACK_FAILED | \
13107 TG3_TSO_LOOPBACK_FAILED)
13109 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13113 u32 jmb_pkt_sz = 9000;
13116 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13118 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13119 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13121 if (!netif_running(tp->dev)) {
13122 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13123 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13125 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13129 err = tg3_reset_hw(tp, 1);
13131 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13132 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13134 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13138 if (tg3_flag(tp, ENABLE_RSS)) {
13141 /* Reroute all rx packets to the 1st queue */
13142 for (i = MAC_RSS_INDIR_TBL_0;
13143 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13147 /* HW errata - mac loopback fails in some cases on 5780.
13148 * Normal traffic and PHY loopback are not affected by
13149 * errata. Also, the MAC loopback test is deprecated for
13150 * all newer ASIC revisions.
13152 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13153 !tg3_flag(tp, CPMU_PRESENT)) {
13154 tg3_mac_loopback(tp, true);
13156 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13157 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13159 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13160 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13161 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13163 tg3_mac_loopback(tp, false);
13166 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13167 !tg3_flag(tp, USE_PHYLIB)) {
13170 tg3_phy_lpbk_set(tp, 0, false);
13172 /* Wait for link */
13173 for (i = 0; i < 100; i++) {
13174 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13179 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13180 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13181 if (tg3_flag(tp, TSO_CAPABLE) &&
13182 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13183 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13184 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13185 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13186 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13189 tg3_phy_lpbk_set(tp, 0, true);
13191 /* All link indications report up, but the hardware
13192 * isn't really ready for about 20 msec. Double it
13197 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13198 data[TG3_EXT_LOOPB_TEST] |=
13199 TG3_STD_LOOPBACK_FAILED;
13200 if (tg3_flag(tp, TSO_CAPABLE) &&
13201 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13202 data[TG3_EXT_LOOPB_TEST] |=
13203 TG3_TSO_LOOPBACK_FAILED;
13204 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13205 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13206 data[TG3_EXT_LOOPB_TEST] |=
13207 TG3_JMB_LOOPBACK_FAILED;
13210 /* Re-enable gphy autopowerdown. */
13211 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13212 tg3_phy_toggle_apd(tp, true);
13215 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13216 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13219 tp->phy_flags |= eee_cap;
13224 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13227 struct tg3 *tp = netdev_priv(dev);
13228 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13230 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13231 tg3_power_up(tp)) {
13232 etest->flags |= ETH_TEST_FL_FAILED;
13233 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13237 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13239 if (tg3_test_nvram(tp) != 0) {
13240 etest->flags |= ETH_TEST_FL_FAILED;
13241 data[TG3_NVRAM_TEST] = 1;
13243 if (!doextlpbk && tg3_test_link(tp)) {
13244 etest->flags |= ETH_TEST_FL_FAILED;
13245 data[TG3_LINK_TEST] = 1;
13247 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13248 int err, err2 = 0, irq_sync = 0;
13250 if (netif_running(dev)) {
13252 tg3_netif_stop(tp);
13256 tg3_full_lock(tp, irq_sync);
13257 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13258 err = tg3_nvram_lock(tp);
13259 tg3_halt_cpu(tp, RX_CPU_BASE);
13260 if (!tg3_flag(tp, 5705_PLUS))
13261 tg3_halt_cpu(tp, TX_CPU_BASE);
13263 tg3_nvram_unlock(tp);
13265 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13268 if (tg3_test_registers(tp) != 0) {
13269 etest->flags |= ETH_TEST_FL_FAILED;
13270 data[TG3_REGISTER_TEST] = 1;
13273 if (tg3_test_memory(tp) != 0) {
13274 etest->flags |= ETH_TEST_FL_FAILED;
13275 data[TG3_MEMORY_TEST] = 1;
13279 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13281 if (tg3_test_loopback(tp, data, doextlpbk))
13282 etest->flags |= ETH_TEST_FL_FAILED;
13284 tg3_full_unlock(tp);
13286 if (tg3_test_interrupt(tp) != 0) {
13287 etest->flags |= ETH_TEST_FL_FAILED;
13288 data[TG3_INTERRUPT_TEST] = 1;
13291 tg3_full_lock(tp, 0);
13293 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13294 if (netif_running(dev)) {
13295 tg3_flag_set(tp, INIT_COMPLETE);
13296 err2 = tg3_restart_hw(tp, 1);
13298 tg3_netif_start(tp);
13301 tg3_full_unlock(tp);
13303 if (irq_sync && !err2)
13306 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13307 tg3_power_down(tp);
13311 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13312 struct ifreq *ifr, int cmd)
13314 struct tg3 *tp = netdev_priv(dev);
13315 struct hwtstamp_config stmpconf;
13317 if (!tg3_flag(tp, PTP_CAPABLE))
13320 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13323 if (stmpconf.flags)
13326 switch (stmpconf.tx_type) {
13327 case HWTSTAMP_TX_ON:
13328 tg3_flag_set(tp, TX_TSTAMP_EN);
13330 case HWTSTAMP_TX_OFF:
13331 tg3_flag_clear(tp, TX_TSTAMP_EN);
13337 switch (stmpconf.rx_filter) {
13338 case HWTSTAMP_FILTER_NONE:
13341 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13342 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13343 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13345 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13346 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13347 TG3_RX_PTP_CTL_SYNC_EVNT;
13349 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13350 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13351 TG3_RX_PTP_CTL_DELAY_REQ;
13353 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13354 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13355 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13357 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13358 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13359 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13361 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13362 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13363 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13365 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13366 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13367 TG3_RX_PTP_CTL_SYNC_EVNT;
13369 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13370 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13371 TG3_RX_PTP_CTL_SYNC_EVNT;
13373 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13374 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13375 TG3_RX_PTP_CTL_SYNC_EVNT;
13377 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13378 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13379 TG3_RX_PTP_CTL_DELAY_REQ;
13381 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13382 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13383 TG3_RX_PTP_CTL_DELAY_REQ;
13385 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13386 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13387 TG3_RX_PTP_CTL_DELAY_REQ;
13393 if (netif_running(dev) && tp->rxptpctl)
13394 tw32(TG3_RX_PTP_CTL,
13395 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13397 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13401 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13403 struct mii_ioctl_data *data = if_mii(ifr);
13404 struct tg3 *tp = netdev_priv(dev);
13407 if (tg3_flag(tp, USE_PHYLIB)) {
13408 struct phy_device *phydev;
13409 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13411 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13412 return phy_mii_ioctl(phydev, ifr, cmd);
13417 data->phy_id = tp->phy_addr;
13420 case SIOCGMIIREG: {
13423 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13424 break; /* We have no PHY */
13426 if (!netif_running(dev))
13429 spin_lock_bh(&tp->lock);
13430 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13431 data->reg_num & 0x1f, &mii_regval);
13432 spin_unlock_bh(&tp->lock);
13434 data->val_out = mii_regval;
13440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13441 break; /* We have no PHY */
13443 if (!netif_running(dev))
13446 spin_lock_bh(&tp->lock);
13447 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13448 data->reg_num & 0x1f, data->val_in);
13449 spin_unlock_bh(&tp->lock);
13453 case SIOCSHWTSTAMP:
13454 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13460 return -EOPNOTSUPP;
13463 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13465 struct tg3 *tp = netdev_priv(dev);
13467 memcpy(ec, &tp->coal, sizeof(*ec));
13471 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13473 struct tg3 *tp = netdev_priv(dev);
13474 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13475 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13477 if (!tg3_flag(tp, 5705_PLUS)) {
13478 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13479 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13480 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13481 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13484 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13485 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13486 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13487 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13488 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13489 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13490 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13491 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13492 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13493 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13496 /* No rx interrupts will be generated if both are zero */
13497 if ((ec->rx_coalesce_usecs == 0) &&
13498 (ec->rx_max_coalesced_frames == 0))
13501 /* No tx interrupts will be generated if both are zero */
13502 if ((ec->tx_coalesce_usecs == 0) &&
13503 (ec->tx_max_coalesced_frames == 0))
13506 /* Only copy relevant parameters, ignore all others. */
13507 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13508 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13509 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13510 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13511 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13512 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13513 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13514 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13515 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13517 if (netif_running(dev)) {
13518 tg3_full_lock(tp, 0);
13519 __tg3_set_coalesce(tp, &tp->coal);
13520 tg3_full_unlock(tp);
13525 static const struct ethtool_ops tg3_ethtool_ops = {
13526 .get_settings = tg3_get_settings,
13527 .set_settings = tg3_set_settings,
13528 .get_drvinfo = tg3_get_drvinfo,
13529 .get_regs_len = tg3_get_regs_len,
13530 .get_regs = tg3_get_regs,
13531 .get_wol = tg3_get_wol,
13532 .set_wol = tg3_set_wol,
13533 .get_msglevel = tg3_get_msglevel,
13534 .set_msglevel = tg3_set_msglevel,
13535 .nway_reset = tg3_nway_reset,
13536 .get_link = ethtool_op_get_link,
13537 .get_eeprom_len = tg3_get_eeprom_len,
13538 .get_eeprom = tg3_get_eeprom,
13539 .set_eeprom = tg3_set_eeprom,
13540 .get_ringparam = tg3_get_ringparam,
13541 .set_ringparam = tg3_set_ringparam,
13542 .get_pauseparam = tg3_get_pauseparam,
13543 .set_pauseparam = tg3_set_pauseparam,
13544 .self_test = tg3_self_test,
13545 .get_strings = tg3_get_strings,
13546 .set_phys_id = tg3_set_phys_id,
13547 .get_ethtool_stats = tg3_get_ethtool_stats,
13548 .get_coalesce = tg3_get_coalesce,
13549 .set_coalesce = tg3_set_coalesce,
13550 .get_sset_count = tg3_get_sset_count,
13551 .get_rxnfc = tg3_get_rxnfc,
13552 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13553 .get_rxfh_indir = tg3_get_rxfh_indir,
13554 .set_rxfh_indir = tg3_set_rxfh_indir,
13555 .get_channels = tg3_get_channels,
13556 .set_channels = tg3_set_channels,
13557 .get_ts_info = tg3_get_ts_info,
13560 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13561 struct rtnl_link_stats64 *stats)
13563 struct tg3 *tp = netdev_priv(dev);
13565 spin_lock_bh(&tp->lock);
13566 if (!tp->hw_stats) {
13567 spin_unlock_bh(&tp->lock);
13568 return &tp->net_stats_prev;
13571 tg3_get_nstats(tp, stats);
13572 spin_unlock_bh(&tp->lock);
13577 static void tg3_set_rx_mode(struct net_device *dev)
13579 struct tg3 *tp = netdev_priv(dev);
13581 if (!netif_running(dev))
13584 tg3_full_lock(tp, 0);
13585 __tg3_set_rx_mode(dev);
13586 tg3_full_unlock(tp);
13589 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13592 dev->mtu = new_mtu;
13594 if (new_mtu > ETH_DATA_LEN) {
13595 if (tg3_flag(tp, 5780_CLASS)) {
13596 netdev_update_features(dev);
13597 tg3_flag_clear(tp, TSO_CAPABLE);
13599 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13602 if (tg3_flag(tp, 5780_CLASS)) {
13603 tg3_flag_set(tp, TSO_CAPABLE);
13604 netdev_update_features(dev);
13606 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13610 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13612 struct tg3 *tp = netdev_priv(dev);
13613 int err, reset_phy = 0;
13615 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13618 if (!netif_running(dev)) {
13619 /* We'll just catch it later when the
13622 tg3_set_mtu(dev, tp, new_mtu);
13628 tg3_netif_stop(tp);
13630 tg3_full_lock(tp, 1);
13632 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13634 tg3_set_mtu(dev, tp, new_mtu);
13636 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13637 * breaks all requests to 256 bytes.
13639 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13642 err = tg3_restart_hw(tp, reset_phy);
13645 tg3_netif_start(tp);
13647 tg3_full_unlock(tp);
13655 static const struct net_device_ops tg3_netdev_ops = {
13656 .ndo_open = tg3_open,
13657 .ndo_stop = tg3_close,
13658 .ndo_start_xmit = tg3_start_xmit,
13659 .ndo_get_stats64 = tg3_get_stats64,
13660 .ndo_validate_addr = eth_validate_addr,
13661 .ndo_set_rx_mode = tg3_set_rx_mode,
13662 .ndo_set_mac_address = tg3_set_mac_addr,
13663 .ndo_do_ioctl = tg3_ioctl,
13664 .ndo_tx_timeout = tg3_tx_timeout,
13665 .ndo_change_mtu = tg3_change_mtu,
13666 .ndo_fix_features = tg3_fix_features,
13667 .ndo_set_features = tg3_set_features,
13668 #ifdef CONFIG_NET_POLL_CONTROLLER
13669 .ndo_poll_controller = tg3_poll_controller,
13673 static void tg3_get_eeprom_size(struct tg3 *tp)
13675 u32 cursize, val, magic;
13677 tp->nvram_size = EEPROM_CHIP_SIZE;
13679 if (tg3_nvram_read(tp, 0, &magic) != 0)
13682 if ((magic != TG3_EEPROM_MAGIC) &&
13683 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13684 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13688 * Size the chip by reading offsets at increasing powers of two.
13689 * When we encounter our validation signature, we know the addressing
13690 * has wrapped around, and thus have our chip size.
13694 while (cursize < tp->nvram_size) {
13695 if (tg3_nvram_read(tp, cursize, &val) != 0)
13704 tp->nvram_size = cursize;
13707 static void tg3_get_nvram_size(struct tg3 *tp)
13711 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13714 /* Selfboot format */
13715 if (val != TG3_EEPROM_MAGIC) {
13716 tg3_get_eeprom_size(tp);
13720 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13722 /* This is confusing. We want to operate on the
13723 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13724 * call will read from NVRAM and byteswap the data
13725 * according to the byteswapping settings for all
13726 * other register accesses. This ensures the data we
13727 * want will always reside in the lower 16-bits.
13728 * However, the data in NVRAM is in LE format, which
13729 * means the data from the NVRAM read will always be
13730 * opposite the endianness of the CPU. The 16-bit
13731 * byteswap then brings the data to CPU endianness.
13733 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13737 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13740 static void tg3_get_nvram_info(struct tg3 *tp)
13744 nvcfg1 = tr32(NVRAM_CFG1);
13745 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13746 tg3_flag_set(tp, FLASH);
13748 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13749 tw32(NVRAM_CFG1, nvcfg1);
13752 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13753 tg3_flag(tp, 5780_CLASS)) {
13754 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13755 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13756 tp->nvram_jedecnum = JEDEC_ATMEL;
13757 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13758 tg3_flag_set(tp, NVRAM_BUFFERED);
13760 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13761 tp->nvram_jedecnum = JEDEC_ATMEL;
13762 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13764 case FLASH_VENDOR_ATMEL_EEPROM:
13765 tp->nvram_jedecnum = JEDEC_ATMEL;
13766 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13767 tg3_flag_set(tp, NVRAM_BUFFERED);
13769 case FLASH_VENDOR_ST:
13770 tp->nvram_jedecnum = JEDEC_ST;
13771 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13772 tg3_flag_set(tp, NVRAM_BUFFERED);
13774 case FLASH_VENDOR_SAIFUN:
13775 tp->nvram_jedecnum = JEDEC_SAIFUN;
13776 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13778 case FLASH_VENDOR_SST_SMALL:
13779 case FLASH_VENDOR_SST_LARGE:
13780 tp->nvram_jedecnum = JEDEC_SST;
13781 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13785 tp->nvram_jedecnum = JEDEC_ATMEL;
13786 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13787 tg3_flag_set(tp, NVRAM_BUFFERED);
13791 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13793 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13794 case FLASH_5752PAGE_SIZE_256:
13795 tp->nvram_pagesize = 256;
13797 case FLASH_5752PAGE_SIZE_512:
13798 tp->nvram_pagesize = 512;
13800 case FLASH_5752PAGE_SIZE_1K:
13801 tp->nvram_pagesize = 1024;
13803 case FLASH_5752PAGE_SIZE_2K:
13804 tp->nvram_pagesize = 2048;
13806 case FLASH_5752PAGE_SIZE_4K:
13807 tp->nvram_pagesize = 4096;
13809 case FLASH_5752PAGE_SIZE_264:
13810 tp->nvram_pagesize = 264;
13812 case FLASH_5752PAGE_SIZE_528:
13813 tp->nvram_pagesize = 528;
13818 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13822 nvcfg1 = tr32(NVRAM_CFG1);
13824 /* NVRAM protection for TPM */
13825 if (nvcfg1 & (1 << 27))
13826 tg3_flag_set(tp, PROTECTED_NVRAM);
13828 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13829 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13830 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13831 tp->nvram_jedecnum = JEDEC_ATMEL;
13832 tg3_flag_set(tp, NVRAM_BUFFERED);
13834 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13835 tp->nvram_jedecnum = JEDEC_ATMEL;
13836 tg3_flag_set(tp, NVRAM_BUFFERED);
13837 tg3_flag_set(tp, FLASH);
13839 case FLASH_5752VENDOR_ST_M45PE10:
13840 case FLASH_5752VENDOR_ST_M45PE20:
13841 case FLASH_5752VENDOR_ST_M45PE40:
13842 tp->nvram_jedecnum = JEDEC_ST;
13843 tg3_flag_set(tp, NVRAM_BUFFERED);
13844 tg3_flag_set(tp, FLASH);
13848 if (tg3_flag(tp, FLASH)) {
13849 tg3_nvram_get_pagesize(tp, nvcfg1);
13851 /* For eeprom, set pagesize to maximum eeprom size */
13852 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13854 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13855 tw32(NVRAM_CFG1, nvcfg1);
13859 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13861 u32 nvcfg1, protect = 0;
13863 nvcfg1 = tr32(NVRAM_CFG1);
13865 /* NVRAM protection for TPM */
13866 if (nvcfg1 & (1 << 27)) {
13867 tg3_flag_set(tp, PROTECTED_NVRAM);
13871 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13873 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13874 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13875 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13876 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13877 tp->nvram_jedecnum = JEDEC_ATMEL;
13878 tg3_flag_set(tp, NVRAM_BUFFERED);
13879 tg3_flag_set(tp, FLASH);
13880 tp->nvram_pagesize = 264;
13881 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13882 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13883 tp->nvram_size = (protect ? 0x3e200 :
13884 TG3_NVRAM_SIZE_512KB);
13885 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13886 tp->nvram_size = (protect ? 0x1f200 :
13887 TG3_NVRAM_SIZE_256KB);
13889 tp->nvram_size = (protect ? 0x1f200 :
13890 TG3_NVRAM_SIZE_128KB);
13892 case FLASH_5752VENDOR_ST_M45PE10:
13893 case FLASH_5752VENDOR_ST_M45PE20:
13894 case FLASH_5752VENDOR_ST_M45PE40:
13895 tp->nvram_jedecnum = JEDEC_ST;
13896 tg3_flag_set(tp, NVRAM_BUFFERED);
13897 tg3_flag_set(tp, FLASH);
13898 tp->nvram_pagesize = 256;
13899 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13900 tp->nvram_size = (protect ?
13901 TG3_NVRAM_SIZE_64KB :
13902 TG3_NVRAM_SIZE_128KB);
13903 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13904 tp->nvram_size = (protect ?
13905 TG3_NVRAM_SIZE_64KB :
13906 TG3_NVRAM_SIZE_256KB);
13908 tp->nvram_size = (protect ?
13909 TG3_NVRAM_SIZE_128KB :
13910 TG3_NVRAM_SIZE_512KB);
13915 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13919 nvcfg1 = tr32(NVRAM_CFG1);
13921 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13922 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13923 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13924 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13925 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13926 tp->nvram_jedecnum = JEDEC_ATMEL;
13927 tg3_flag_set(tp, NVRAM_BUFFERED);
13928 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13930 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13931 tw32(NVRAM_CFG1, nvcfg1);
13933 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13934 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13935 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13936 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13937 tp->nvram_jedecnum = JEDEC_ATMEL;
13938 tg3_flag_set(tp, NVRAM_BUFFERED);
13939 tg3_flag_set(tp, FLASH);
13940 tp->nvram_pagesize = 264;
13942 case FLASH_5752VENDOR_ST_M45PE10:
13943 case FLASH_5752VENDOR_ST_M45PE20:
13944 case FLASH_5752VENDOR_ST_M45PE40:
13945 tp->nvram_jedecnum = JEDEC_ST;
13946 tg3_flag_set(tp, NVRAM_BUFFERED);
13947 tg3_flag_set(tp, FLASH);
13948 tp->nvram_pagesize = 256;
13953 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13955 u32 nvcfg1, protect = 0;
13957 nvcfg1 = tr32(NVRAM_CFG1);
13959 /* NVRAM protection for TPM */
13960 if (nvcfg1 & (1 << 27)) {
13961 tg3_flag_set(tp, PROTECTED_NVRAM);
13965 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13967 case FLASH_5761VENDOR_ATMEL_ADB021D:
13968 case FLASH_5761VENDOR_ATMEL_ADB041D:
13969 case FLASH_5761VENDOR_ATMEL_ADB081D:
13970 case FLASH_5761VENDOR_ATMEL_ADB161D:
13971 case FLASH_5761VENDOR_ATMEL_MDB021D:
13972 case FLASH_5761VENDOR_ATMEL_MDB041D:
13973 case FLASH_5761VENDOR_ATMEL_MDB081D:
13974 case FLASH_5761VENDOR_ATMEL_MDB161D:
13975 tp->nvram_jedecnum = JEDEC_ATMEL;
13976 tg3_flag_set(tp, NVRAM_BUFFERED);
13977 tg3_flag_set(tp, FLASH);
13978 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13979 tp->nvram_pagesize = 256;
13981 case FLASH_5761VENDOR_ST_A_M45PE20:
13982 case FLASH_5761VENDOR_ST_A_M45PE40:
13983 case FLASH_5761VENDOR_ST_A_M45PE80:
13984 case FLASH_5761VENDOR_ST_A_M45PE16:
13985 case FLASH_5761VENDOR_ST_M_M45PE20:
13986 case FLASH_5761VENDOR_ST_M_M45PE40:
13987 case FLASH_5761VENDOR_ST_M_M45PE80:
13988 case FLASH_5761VENDOR_ST_M_M45PE16:
13989 tp->nvram_jedecnum = JEDEC_ST;
13990 tg3_flag_set(tp, NVRAM_BUFFERED);
13991 tg3_flag_set(tp, FLASH);
13992 tp->nvram_pagesize = 256;
13997 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14000 case FLASH_5761VENDOR_ATMEL_ADB161D:
14001 case FLASH_5761VENDOR_ATMEL_MDB161D:
14002 case FLASH_5761VENDOR_ST_A_M45PE16:
14003 case FLASH_5761VENDOR_ST_M_M45PE16:
14004 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14006 case FLASH_5761VENDOR_ATMEL_ADB081D:
14007 case FLASH_5761VENDOR_ATMEL_MDB081D:
14008 case FLASH_5761VENDOR_ST_A_M45PE80:
14009 case FLASH_5761VENDOR_ST_M_M45PE80:
14010 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14012 case FLASH_5761VENDOR_ATMEL_ADB041D:
14013 case FLASH_5761VENDOR_ATMEL_MDB041D:
14014 case FLASH_5761VENDOR_ST_A_M45PE40:
14015 case FLASH_5761VENDOR_ST_M_M45PE40:
14016 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14018 case FLASH_5761VENDOR_ATMEL_ADB021D:
14019 case FLASH_5761VENDOR_ATMEL_MDB021D:
14020 case FLASH_5761VENDOR_ST_A_M45PE20:
14021 case FLASH_5761VENDOR_ST_M_M45PE20:
14022 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14028 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14030 tp->nvram_jedecnum = JEDEC_ATMEL;
14031 tg3_flag_set(tp, NVRAM_BUFFERED);
14032 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14035 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14039 nvcfg1 = tr32(NVRAM_CFG1);
14041 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14042 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14043 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14044 tp->nvram_jedecnum = JEDEC_ATMEL;
14045 tg3_flag_set(tp, NVRAM_BUFFERED);
14046 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14048 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14049 tw32(NVRAM_CFG1, nvcfg1);
14051 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14052 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14053 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14054 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14055 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14056 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14057 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14058 tp->nvram_jedecnum = JEDEC_ATMEL;
14059 tg3_flag_set(tp, NVRAM_BUFFERED);
14060 tg3_flag_set(tp, FLASH);
14062 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14063 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14064 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14065 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14066 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14068 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14069 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14070 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14072 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14073 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14074 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14078 case FLASH_5752VENDOR_ST_M45PE10:
14079 case FLASH_5752VENDOR_ST_M45PE20:
14080 case FLASH_5752VENDOR_ST_M45PE40:
14081 tp->nvram_jedecnum = JEDEC_ST;
14082 tg3_flag_set(tp, NVRAM_BUFFERED);
14083 tg3_flag_set(tp, FLASH);
14085 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14086 case FLASH_5752VENDOR_ST_M45PE10:
14087 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14089 case FLASH_5752VENDOR_ST_M45PE20:
14090 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14092 case FLASH_5752VENDOR_ST_M45PE40:
14093 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14098 tg3_flag_set(tp, NO_NVRAM);
14102 tg3_nvram_get_pagesize(tp, nvcfg1);
14103 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14104 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14108 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14112 nvcfg1 = tr32(NVRAM_CFG1);
14114 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14115 case FLASH_5717VENDOR_ATMEL_EEPROM:
14116 case FLASH_5717VENDOR_MICRO_EEPROM:
14117 tp->nvram_jedecnum = JEDEC_ATMEL;
14118 tg3_flag_set(tp, NVRAM_BUFFERED);
14119 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14121 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14122 tw32(NVRAM_CFG1, nvcfg1);
14124 case FLASH_5717VENDOR_ATMEL_MDB011D:
14125 case FLASH_5717VENDOR_ATMEL_ADB011B:
14126 case FLASH_5717VENDOR_ATMEL_ADB011D:
14127 case FLASH_5717VENDOR_ATMEL_MDB021D:
14128 case FLASH_5717VENDOR_ATMEL_ADB021B:
14129 case FLASH_5717VENDOR_ATMEL_ADB021D:
14130 case FLASH_5717VENDOR_ATMEL_45USPT:
14131 tp->nvram_jedecnum = JEDEC_ATMEL;
14132 tg3_flag_set(tp, NVRAM_BUFFERED);
14133 tg3_flag_set(tp, FLASH);
14135 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14136 case FLASH_5717VENDOR_ATMEL_MDB021D:
14137 /* Detect size with tg3_nvram_get_size() */
14139 case FLASH_5717VENDOR_ATMEL_ADB021B:
14140 case FLASH_5717VENDOR_ATMEL_ADB021D:
14141 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14144 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14148 case FLASH_5717VENDOR_ST_M_M25PE10:
14149 case FLASH_5717VENDOR_ST_A_M25PE10:
14150 case FLASH_5717VENDOR_ST_M_M45PE10:
14151 case FLASH_5717VENDOR_ST_A_M45PE10:
14152 case FLASH_5717VENDOR_ST_M_M25PE20:
14153 case FLASH_5717VENDOR_ST_A_M25PE20:
14154 case FLASH_5717VENDOR_ST_M_M45PE20:
14155 case FLASH_5717VENDOR_ST_A_M45PE20:
14156 case FLASH_5717VENDOR_ST_25USPT:
14157 case FLASH_5717VENDOR_ST_45USPT:
14158 tp->nvram_jedecnum = JEDEC_ST;
14159 tg3_flag_set(tp, NVRAM_BUFFERED);
14160 tg3_flag_set(tp, FLASH);
14162 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14163 case FLASH_5717VENDOR_ST_M_M25PE20:
14164 case FLASH_5717VENDOR_ST_M_M45PE20:
14165 /* Detect size with tg3_nvram_get_size() */
14167 case FLASH_5717VENDOR_ST_A_M25PE20:
14168 case FLASH_5717VENDOR_ST_A_M45PE20:
14169 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14172 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14177 tg3_flag_set(tp, NO_NVRAM);
14181 tg3_nvram_get_pagesize(tp, nvcfg1);
14182 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14183 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14186 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14188 u32 nvcfg1, nvmpinstrp;
14190 nvcfg1 = tr32(NVRAM_CFG1);
14191 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14193 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14194 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14195 tg3_flag_set(tp, NO_NVRAM);
14199 switch (nvmpinstrp) {
14200 case FLASH_5762_EEPROM_HD:
14201 nvmpinstrp = FLASH_5720_EEPROM_HD;
14203 case FLASH_5762_EEPROM_LD:
14204 nvmpinstrp = FLASH_5720_EEPROM_LD;
14206 case FLASH_5720VENDOR_M_ST_M45PE20:
14207 /* This pinstrap supports multiple sizes, so force it
14208 * to read the actual size from location 0xf0.
14210 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14215 switch (nvmpinstrp) {
14216 case FLASH_5720_EEPROM_HD:
14217 case FLASH_5720_EEPROM_LD:
14218 tp->nvram_jedecnum = JEDEC_ATMEL;
14219 tg3_flag_set(tp, NVRAM_BUFFERED);
14221 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14222 tw32(NVRAM_CFG1, nvcfg1);
14223 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14224 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14226 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14228 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14229 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14230 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14231 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14232 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14233 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14234 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14235 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14236 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14237 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14238 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14239 case FLASH_5720VENDOR_ATMEL_45USPT:
14240 tp->nvram_jedecnum = JEDEC_ATMEL;
14241 tg3_flag_set(tp, NVRAM_BUFFERED);
14242 tg3_flag_set(tp, FLASH);
14244 switch (nvmpinstrp) {
14245 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14246 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14247 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14248 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14250 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14251 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14252 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14253 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14255 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14256 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14257 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14260 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14261 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14265 case FLASH_5720VENDOR_M_ST_M25PE10:
14266 case FLASH_5720VENDOR_M_ST_M45PE10:
14267 case FLASH_5720VENDOR_A_ST_M25PE10:
14268 case FLASH_5720VENDOR_A_ST_M45PE10:
14269 case FLASH_5720VENDOR_M_ST_M25PE20:
14270 case FLASH_5720VENDOR_M_ST_M45PE20:
14271 case FLASH_5720VENDOR_A_ST_M25PE20:
14272 case FLASH_5720VENDOR_A_ST_M45PE20:
14273 case FLASH_5720VENDOR_M_ST_M25PE40:
14274 case FLASH_5720VENDOR_M_ST_M45PE40:
14275 case FLASH_5720VENDOR_A_ST_M25PE40:
14276 case FLASH_5720VENDOR_A_ST_M45PE40:
14277 case FLASH_5720VENDOR_M_ST_M25PE80:
14278 case FLASH_5720VENDOR_M_ST_M45PE80:
14279 case FLASH_5720VENDOR_A_ST_M25PE80:
14280 case FLASH_5720VENDOR_A_ST_M45PE80:
14281 case FLASH_5720VENDOR_ST_25USPT:
14282 case FLASH_5720VENDOR_ST_45USPT:
14283 tp->nvram_jedecnum = JEDEC_ST;
14284 tg3_flag_set(tp, NVRAM_BUFFERED);
14285 tg3_flag_set(tp, FLASH);
14287 switch (nvmpinstrp) {
14288 case FLASH_5720VENDOR_M_ST_M25PE20:
14289 case FLASH_5720VENDOR_M_ST_M45PE20:
14290 case FLASH_5720VENDOR_A_ST_M25PE20:
14291 case FLASH_5720VENDOR_A_ST_M45PE20:
14292 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14294 case FLASH_5720VENDOR_M_ST_M25PE40:
14295 case FLASH_5720VENDOR_M_ST_M45PE40:
14296 case FLASH_5720VENDOR_A_ST_M25PE40:
14297 case FLASH_5720VENDOR_A_ST_M45PE40:
14298 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14300 case FLASH_5720VENDOR_M_ST_M25PE80:
14301 case FLASH_5720VENDOR_M_ST_M45PE80:
14302 case FLASH_5720VENDOR_A_ST_M25PE80:
14303 case FLASH_5720VENDOR_A_ST_M45PE80:
14304 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14307 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14308 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14313 tg3_flag_set(tp, NO_NVRAM);
14317 tg3_nvram_get_pagesize(tp, nvcfg1);
14318 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14319 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14321 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14324 if (tg3_nvram_read(tp, 0, &val))
14327 if (val != TG3_EEPROM_MAGIC &&
14328 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14329 tg3_flag_set(tp, NO_NVRAM);
14333 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14334 static void tg3_nvram_init(struct tg3 *tp)
14336 if (tg3_flag(tp, IS_SSB_CORE)) {
14337 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14338 tg3_flag_clear(tp, NVRAM);
14339 tg3_flag_clear(tp, NVRAM_BUFFERED);
14340 tg3_flag_set(tp, NO_NVRAM);
14344 tw32_f(GRC_EEPROM_ADDR,
14345 (EEPROM_ADDR_FSM_RESET |
14346 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14347 EEPROM_ADDR_CLKPERD_SHIFT)));
14351 /* Enable seeprom accesses. */
14352 tw32_f(GRC_LOCAL_CTRL,
14353 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14356 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14357 tg3_asic_rev(tp) != ASIC_REV_5701) {
14358 tg3_flag_set(tp, NVRAM);
14360 if (tg3_nvram_lock(tp)) {
14361 netdev_warn(tp->dev,
14362 "Cannot get nvram lock, %s failed\n",
14366 tg3_enable_nvram_access(tp);
14368 tp->nvram_size = 0;
14370 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14371 tg3_get_5752_nvram_info(tp);
14372 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14373 tg3_get_5755_nvram_info(tp);
14374 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14375 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14376 tg3_asic_rev(tp) == ASIC_REV_5785)
14377 tg3_get_5787_nvram_info(tp);
14378 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14379 tg3_get_5761_nvram_info(tp);
14380 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14381 tg3_get_5906_nvram_info(tp);
14382 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14383 tg3_flag(tp, 57765_CLASS))
14384 tg3_get_57780_nvram_info(tp);
14385 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14386 tg3_asic_rev(tp) == ASIC_REV_5719)
14387 tg3_get_5717_nvram_info(tp);
14388 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14389 tg3_asic_rev(tp) == ASIC_REV_5762)
14390 tg3_get_5720_nvram_info(tp);
14392 tg3_get_nvram_info(tp);
14394 if (tp->nvram_size == 0)
14395 tg3_get_nvram_size(tp);
14397 tg3_disable_nvram_access(tp);
14398 tg3_nvram_unlock(tp);
14401 tg3_flag_clear(tp, NVRAM);
14402 tg3_flag_clear(tp, NVRAM_BUFFERED);
14404 tg3_get_eeprom_size(tp);
14408 struct subsys_tbl_ent {
14409 u16 subsys_vendor, subsys_devid;
14413 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14414 /* Broadcom boards. */
14415 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14416 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14417 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14418 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14419 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14420 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14421 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14422 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14423 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14424 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14425 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14426 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14427 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14428 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14429 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14430 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14431 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14432 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14433 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14434 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14435 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14436 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14439 { TG3PCI_SUBVENDOR_ID_3COM,
14440 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14441 { TG3PCI_SUBVENDOR_ID_3COM,
14442 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14443 { TG3PCI_SUBVENDOR_ID_3COM,
14444 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14445 { TG3PCI_SUBVENDOR_ID_3COM,
14446 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14447 { TG3PCI_SUBVENDOR_ID_3COM,
14448 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14451 { TG3PCI_SUBVENDOR_ID_DELL,
14452 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14453 { TG3PCI_SUBVENDOR_ID_DELL,
14454 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14455 { TG3PCI_SUBVENDOR_ID_DELL,
14456 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14457 { TG3PCI_SUBVENDOR_ID_DELL,
14458 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14460 /* Compaq boards. */
14461 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14462 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14463 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14464 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14465 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14466 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14467 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14468 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14469 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14470 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14473 { TG3PCI_SUBVENDOR_ID_IBM,
14474 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14477 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14481 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14482 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14483 tp->pdev->subsystem_vendor) &&
14484 (subsys_id_to_phy_id[i].subsys_devid ==
14485 tp->pdev->subsystem_device))
14486 return &subsys_id_to_phy_id[i];
14491 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14495 tp->phy_id = TG3_PHY_ID_INVALID;
14496 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14498 /* Assume an onboard device and WOL capable by default. */
14499 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14500 tg3_flag_set(tp, WOL_CAP);
14502 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14503 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14504 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14505 tg3_flag_set(tp, IS_NIC);
14507 val = tr32(VCPU_CFGSHDW);
14508 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14509 tg3_flag_set(tp, ASPM_WORKAROUND);
14510 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14511 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14512 tg3_flag_set(tp, WOL_ENABLE);
14513 device_set_wakeup_enable(&tp->pdev->dev, true);
14518 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14519 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14520 u32 nic_cfg, led_cfg;
14521 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14522 int eeprom_phy_serdes = 0;
14524 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14525 tp->nic_sram_data_cfg = nic_cfg;
14527 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14528 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14529 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14530 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14531 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14532 (ver > 0) && (ver < 0x100))
14533 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14535 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14536 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14538 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14539 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14540 eeprom_phy_serdes = 1;
14542 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14543 if (nic_phy_id != 0) {
14544 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14545 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14547 eeprom_phy_id = (id1 >> 16) << 10;
14548 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14549 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14553 tp->phy_id = eeprom_phy_id;
14554 if (eeprom_phy_serdes) {
14555 if (!tg3_flag(tp, 5705_PLUS))
14556 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14558 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14561 if (tg3_flag(tp, 5750_PLUS))
14562 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14563 SHASTA_EXT_LED_MODE_MASK);
14565 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14569 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14570 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14573 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14574 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14577 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14578 tp->led_ctrl = LED_CTRL_MODE_MAC;
14580 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14581 * read on some older 5700/5701 bootcode.
14583 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14584 tg3_asic_rev(tp) == ASIC_REV_5701)
14585 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14589 case SHASTA_EXT_LED_SHARED:
14590 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14591 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14592 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14593 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14594 LED_CTRL_MODE_PHY_2);
14597 case SHASTA_EXT_LED_MAC:
14598 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14601 case SHASTA_EXT_LED_COMBO:
14602 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14603 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14604 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14605 LED_CTRL_MODE_PHY_2);
14610 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14611 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14612 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14613 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14615 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14616 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14618 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14619 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14620 if ((tp->pdev->subsystem_vendor ==
14621 PCI_VENDOR_ID_ARIMA) &&
14622 (tp->pdev->subsystem_device == 0x205a ||
14623 tp->pdev->subsystem_device == 0x2063))
14624 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14626 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14627 tg3_flag_set(tp, IS_NIC);
14630 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14631 tg3_flag_set(tp, ENABLE_ASF);
14632 if (tg3_flag(tp, 5750_PLUS))
14633 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14636 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14637 tg3_flag(tp, 5750_PLUS))
14638 tg3_flag_set(tp, ENABLE_APE);
14640 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14641 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14642 tg3_flag_clear(tp, WOL_CAP);
14644 if (tg3_flag(tp, WOL_CAP) &&
14645 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14646 tg3_flag_set(tp, WOL_ENABLE);
14647 device_set_wakeup_enable(&tp->pdev->dev, true);
14650 if (cfg2 & (1 << 17))
14651 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14653 /* serdes signal pre-emphasis in register 0x590 set by */
14654 /* bootcode if bit 18 is set */
14655 if (cfg2 & (1 << 18))
14656 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14658 if ((tg3_flag(tp, 57765_PLUS) ||
14659 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14660 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14661 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14662 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14664 if (tg3_flag(tp, PCI_EXPRESS)) {
14667 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14668 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14669 !tg3_flag(tp, 57765_PLUS) &&
14670 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14671 tg3_flag_set(tp, ASPM_WORKAROUND);
14672 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14673 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14674 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14675 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14678 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14679 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14680 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14681 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14682 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14683 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14686 if (tg3_flag(tp, WOL_CAP))
14687 device_set_wakeup_enable(&tp->pdev->dev,
14688 tg3_flag(tp, WOL_ENABLE));
14690 device_set_wakeup_capable(&tp->pdev->dev, false);
14693 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14696 u32 val2, off = offset * 8;
14698 err = tg3_nvram_lock(tp);
14702 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14703 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14704 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14705 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14708 for (i = 0; i < 100; i++) {
14709 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14710 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14711 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14717 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14719 tg3_nvram_unlock(tp);
14720 if (val2 & APE_OTP_STATUS_CMD_DONE)
14726 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14731 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14732 tw32(OTP_CTRL, cmd);
14734 /* Wait for up to 1 ms for command to execute. */
14735 for (i = 0; i < 100; i++) {
14736 val = tr32(OTP_STATUS);
14737 if (val & OTP_STATUS_CMD_DONE)
14742 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14745 /* Read the gphy configuration from the OTP region of the chip. The gphy
14746 * configuration is a 32-bit value that straddles the alignment boundary.
14747 * We do two 32-bit reads and then shift and merge the results.
14749 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14751 u32 bhalf_otp, thalf_otp;
14753 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14755 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14758 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14760 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14763 thalf_otp = tr32(OTP_READ_DATA);
14765 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14767 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14770 bhalf_otp = tr32(OTP_READ_DATA);
14772 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14775 static void tg3_phy_init_link_config(struct tg3 *tp)
14777 u32 adv = ADVERTISED_Autoneg;
14779 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14780 adv |= ADVERTISED_1000baseT_Half |
14781 ADVERTISED_1000baseT_Full;
14783 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14784 adv |= ADVERTISED_100baseT_Half |
14785 ADVERTISED_100baseT_Full |
14786 ADVERTISED_10baseT_Half |
14787 ADVERTISED_10baseT_Full |
14790 adv |= ADVERTISED_FIBRE;
14792 tp->link_config.advertising = adv;
14793 tp->link_config.speed = SPEED_UNKNOWN;
14794 tp->link_config.duplex = DUPLEX_UNKNOWN;
14795 tp->link_config.autoneg = AUTONEG_ENABLE;
14796 tp->link_config.active_speed = SPEED_UNKNOWN;
14797 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14802 static int tg3_phy_probe(struct tg3 *tp)
14804 u32 hw_phy_id_1, hw_phy_id_2;
14805 u32 hw_phy_id, hw_phy_id_masked;
14808 /* flow control autonegotiation is default behavior */
14809 tg3_flag_set(tp, PAUSE_AUTONEG);
14810 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14812 if (tg3_flag(tp, ENABLE_APE)) {
14813 switch (tp->pci_fn) {
14815 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14818 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14821 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14824 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14829 if (!tg3_flag(tp, ENABLE_ASF) &&
14830 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14831 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14832 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14833 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14835 if (tg3_flag(tp, USE_PHYLIB))
14836 return tg3_phy_init(tp);
14838 /* Reading the PHY ID register can conflict with ASF
14839 * firmware access to the PHY hardware.
14842 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14843 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14845 /* Now read the physical PHY_ID from the chip and verify
14846 * that it is sane. If it doesn't look good, we fall back
14847 * to either the hard-coded table based PHY_ID and failing
14848 * that the value found in the eeprom area.
14850 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14851 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14853 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14854 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14855 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14857 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14860 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14861 tp->phy_id = hw_phy_id;
14862 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14863 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14865 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14867 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14868 /* Do nothing, phy ID already set up in
14869 * tg3_get_eeprom_hw_cfg().
14872 struct subsys_tbl_ent *p;
14874 /* No eeprom signature? Try the hardcoded
14875 * subsys device table.
14877 p = tg3_lookup_by_subsys(tp);
14879 tp->phy_id = p->phy_id;
14880 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14881 /* For now we saw the IDs 0xbc050cd0,
14882 * 0xbc050f80 and 0xbc050c30 on devices
14883 * connected to an BCM4785 and there are
14884 * probably more. Just assume that the phy is
14885 * supported when it is connected to a SSB core
14892 tp->phy_id == TG3_PHY_ID_BCM8002)
14893 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14897 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14898 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14899 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14900 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14901 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14902 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14903 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14904 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14905 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14906 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14908 tg3_phy_init_link_config(tp);
14910 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14911 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14912 !tg3_flag(tp, ENABLE_APE) &&
14913 !tg3_flag(tp, ENABLE_ASF)) {
14916 tg3_readphy(tp, MII_BMSR, &bmsr);
14917 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14918 (bmsr & BMSR_LSTATUS))
14919 goto skip_phy_reset;
14921 err = tg3_phy_reset(tp);
14925 tg3_phy_set_wirespeed(tp);
14927 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14928 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14929 tp->link_config.flowctrl);
14931 tg3_writephy(tp, MII_BMCR,
14932 BMCR_ANENABLE | BMCR_ANRESTART);
14937 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14938 err = tg3_init_5401phy_dsp(tp);
14942 err = tg3_init_5401phy_dsp(tp);
14948 static void tg3_read_vpd(struct tg3 *tp)
14951 unsigned int block_end, rosize, len;
14955 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14959 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14961 goto out_not_found;
14963 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14964 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14965 i += PCI_VPD_LRDT_TAG_SIZE;
14967 if (block_end > vpdlen)
14968 goto out_not_found;
14970 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14971 PCI_VPD_RO_KEYWORD_MFR_ID);
14973 len = pci_vpd_info_field_size(&vpd_data[j]);
14975 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14976 if (j + len > block_end || len != 4 ||
14977 memcmp(&vpd_data[j], "1028", 4))
14980 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14981 PCI_VPD_RO_KEYWORD_VENDOR0);
14985 len = pci_vpd_info_field_size(&vpd_data[j]);
14987 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14988 if (j + len > block_end)
14991 if (len >= sizeof(tp->fw_ver))
14992 len = sizeof(tp->fw_ver) - 1;
14993 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
14994 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
14999 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15000 PCI_VPD_RO_KEYWORD_PARTNO);
15002 goto out_not_found;
15004 len = pci_vpd_info_field_size(&vpd_data[i]);
15006 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15007 if (len > TG3_BPN_SIZE ||
15008 (len + i) > vpdlen)
15009 goto out_not_found;
15011 memcpy(tp->board_part_number, &vpd_data[i], len);
15015 if (tp->board_part_number[0])
15019 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15020 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15021 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15022 strcpy(tp->board_part_number, "BCM5717");
15023 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15024 strcpy(tp->board_part_number, "BCM5718");
15027 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15028 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15029 strcpy(tp->board_part_number, "BCM57780");
15030 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15031 strcpy(tp->board_part_number, "BCM57760");
15032 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15033 strcpy(tp->board_part_number, "BCM57790");
15034 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15035 strcpy(tp->board_part_number, "BCM57788");
15038 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15039 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15040 strcpy(tp->board_part_number, "BCM57761");
15041 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15042 strcpy(tp->board_part_number, "BCM57765");
15043 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15044 strcpy(tp->board_part_number, "BCM57781");
15045 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15046 strcpy(tp->board_part_number, "BCM57785");
15047 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15048 strcpy(tp->board_part_number, "BCM57791");
15049 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15050 strcpy(tp->board_part_number, "BCM57795");
15053 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15054 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15055 strcpy(tp->board_part_number, "BCM57762");
15056 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15057 strcpy(tp->board_part_number, "BCM57766");
15058 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15059 strcpy(tp->board_part_number, "BCM57782");
15060 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15061 strcpy(tp->board_part_number, "BCM57786");
15064 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15065 strcpy(tp->board_part_number, "BCM95906");
15068 strcpy(tp->board_part_number, "none");
15072 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15076 if (tg3_nvram_read(tp, offset, &val) ||
15077 (val & 0xfc000000) != 0x0c000000 ||
15078 tg3_nvram_read(tp, offset + 4, &val) ||
15085 static void tg3_read_bc_ver(struct tg3 *tp)
15087 u32 val, offset, start, ver_offset;
15089 bool newver = false;
15091 if (tg3_nvram_read(tp, 0xc, &offset) ||
15092 tg3_nvram_read(tp, 0x4, &start))
15095 offset = tg3_nvram_logical_addr(tp, offset);
15097 if (tg3_nvram_read(tp, offset, &val))
15100 if ((val & 0xfc000000) == 0x0c000000) {
15101 if (tg3_nvram_read(tp, offset + 4, &val))
15108 dst_off = strlen(tp->fw_ver);
15111 if (TG3_VER_SIZE - dst_off < 16 ||
15112 tg3_nvram_read(tp, offset + 8, &ver_offset))
15115 offset = offset + ver_offset - start;
15116 for (i = 0; i < 16; i += 4) {
15118 if (tg3_nvram_read_be32(tp, offset + i, &v))
15121 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15126 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15129 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15130 TG3_NVM_BCVER_MAJSFT;
15131 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15132 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15133 "v%d.%02d", major, minor);
15137 static void tg3_read_hwsb_ver(struct tg3 *tp)
15139 u32 val, major, minor;
15141 /* Use native endian representation */
15142 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15145 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15146 TG3_NVM_HWSB_CFG1_MAJSFT;
15147 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15148 TG3_NVM_HWSB_CFG1_MINSFT;
15150 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15153 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15155 u32 offset, major, minor, build;
15157 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15159 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15162 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15163 case TG3_EEPROM_SB_REVISION_0:
15164 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15166 case TG3_EEPROM_SB_REVISION_2:
15167 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15169 case TG3_EEPROM_SB_REVISION_3:
15170 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15172 case TG3_EEPROM_SB_REVISION_4:
15173 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15175 case TG3_EEPROM_SB_REVISION_5:
15176 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15178 case TG3_EEPROM_SB_REVISION_6:
15179 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15185 if (tg3_nvram_read(tp, offset, &val))
15188 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15189 TG3_EEPROM_SB_EDH_BLD_SHFT;
15190 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15191 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15192 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15194 if (minor > 99 || build > 26)
15197 offset = strlen(tp->fw_ver);
15198 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15199 " v%d.%02d", major, minor);
15202 offset = strlen(tp->fw_ver);
15203 if (offset < TG3_VER_SIZE - 1)
15204 tp->fw_ver[offset] = 'a' + build - 1;
15208 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15210 u32 val, offset, start;
15213 for (offset = TG3_NVM_DIR_START;
15214 offset < TG3_NVM_DIR_END;
15215 offset += TG3_NVM_DIRENT_SIZE) {
15216 if (tg3_nvram_read(tp, offset, &val))
15219 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15223 if (offset == TG3_NVM_DIR_END)
15226 if (!tg3_flag(tp, 5705_PLUS))
15227 start = 0x08000000;
15228 else if (tg3_nvram_read(tp, offset - 4, &start))
15231 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15232 !tg3_fw_img_is_valid(tp, offset) ||
15233 tg3_nvram_read(tp, offset + 8, &val))
15236 offset += val - start;
15238 vlen = strlen(tp->fw_ver);
15240 tp->fw_ver[vlen++] = ',';
15241 tp->fw_ver[vlen++] = ' ';
15243 for (i = 0; i < 4; i++) {
15245 if (tg3_nvram_read_be32(tp, offset, &v))
15248 offset += sizeof(v);
15250 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15251 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15255 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15260 static void tg3_probe_ncsi(struct tg3 *tp)
15264 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15265 if (apedata != APE_SEG_SIG_MAGIC)
15268 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15269 if (!(apedata & APE_FW_STATUS_READY))
15272 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15273 tg3_flag_set(tp, APE_HAS_NCSI);
15276 static void tg3_read_dash_ver(struct tg3 *tp)
15282 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15284 if (tg3_flag(tp, APE_HAS_NCSI))
15286 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15291 vlen = strlen(tp->fw_ver);
15293 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15295 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15296 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15297 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15298 (apedata & APE_FW_VERSION_BLDMSK));
15301 static void tg3_read_otp_ver(struct tg3 *tp)
15305 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15308 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15309 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15310 TG3_OTP_MAGIC0_VALID(val)) {
15311 u64 val64 = (u64) val << 32 | val2;
15315 for (i = 0; i < 7; i++) {
15316 if ((val64 & 0xff) == 0)
15318 ver = val64 & 0xff;
15321 vlen = strlen(tp->fw_ver);
15322 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15326 static void tg3_read_fw_ver(struct tg3 *tp)
15329 bool vpd_vers = false;
15331 if (tp->fw_ver[0] != 0)
15334 if (tg3_flag(tp, NO_NVRAM)) {
15335 strcat(tp->fw_ver, "sb");
15336 tg3_read_otp_ver(tp);
15340 if (tg3_nvram_read(tp, 0, &val))
15343 if (val == TG3_EEPROM_MAGIC)
15344 tg3_read_bc_ver(tp);
15345 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15346 tg3_read_sb_ver(tp, val);
15347 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15348 tg3_read_hwsb_ver(tp);
15350 if (tg3_flag(tp, ENABLE_ASF)) {
15351 if (tg3_flag(tp, ENABLE_APE)) {
15352 tg3_probe_ncsi(tp);
15354 tg3_read_dash_ver(tp);
15355 } else if (!vpd_vers) {
15356 tg3_read_mgmtfw_ver(tp);
15360 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15363 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15365 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15366 return TG3_RX_RET_MAX_SIZE_5717;
15367 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15368 return TG3_RX_RET_MAX_SIZE_5700;
15370 return TG3_RX_RET_MAX_SIZE_5705;
15373 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15374 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15375 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15376 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15380 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15382 struct pci_dev *peer;
15383 unsigned int func, devnr = tp->pdev->devfn & ~7;
15385 for (func = 0; func < 8; func++) {
15386 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15387 if (peer && peer != tp->pdev)
15391 /* 5704 can be configured in single-port mode, set peer to
15392 * tp->pdev in that case.
15400 * We don't need to keep the refcount elevated; there's no way
15401 * to remove one half of this device without removing the other
15408 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15410 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15411 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15414 /* All devices that use the alternate
15415 * ASIC REV location have a CPMU.
15417 tg3_flag_set(tp, CPMU_PRESENT);
15419 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15420 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15421 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15422 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15423 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15424 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15425 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15426 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15427 reg = TG3PCI_GEN2_PRODID_ASICREV;
15428 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15429 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15430 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15431 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15432 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15433 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15434 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15435 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15436 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15437 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15438 reg = TG3PCI_GEN15_PRODID_ASICREV;
15440 reg = TG3PCI_PRODID_ASICREV;
15442 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15445 /* Wrong chip ID in 5752 A0. This code can be removed later
15446 * as A0 is not in production.
15448 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15449 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15451 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15452 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15454 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15455 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15456 tg3_asic_rev(tp) == ASIC_REV_5720)
15457 tg3_flag_set(tp, 5717_PLUS);
15459 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15460 tg3_asic_rev(tp) == ASIC_REV_57766)
15461 tg3_flag_set(tp, 57765_CLASS);
15463 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15464 tg3_asic_rev(tp) == ASIC_REV_5762)
15465 tg3_flag_set(tp, 57765_PLUS);
15467 /* Intentionally exclude ASIC_REV_5906 */
15468 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15469 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15470 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15471 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15472 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15473 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15474 tg3_flag(tp, 57765_PLUS))
15475 tg3_flag_set(tp, 5755_PLUS);
15477 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15478 tg3_asic_rev(tp) == ASIC_REV_5714)
15479 tg3_flag_set(tp, 5780_CLASS);
15481 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15482 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15483 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15484 tg3_flag(tp, 5755_PLUS) ||
15485 tg3_flag(tp, 5780_CLASS))
15486 tg3_flag_set(tp, 5750_PLUS);
15488 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15489 tg3_flag(tp, 5750_PLUS))
15490 tg3_flag_set(tp, 5705_PLUS);
15493 static bool tg3_10_100_only_device(struct tg3 *tp,
15494 const struct pci_device_id *ent)
15496 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15498 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15499 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15500 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15503 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15504 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15505 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15515 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15518 u32 pci_state_reg, grc_misc_cfg;
15523 /* Force memory write invalidate off. If we leave it on,
15524 * then on 5700_BX chips we have to enable a workaround.
15525 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15526 * to match the cacheline size. The Broadcom driver have this
15527 * workaround but turns MWI off all the times so never uses
15528 * it. This seems to suggest that the workaround is insufficient.
15530 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15531 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15532 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15534 /* Important! -- Make sure register accesses are byteswapped
15535 * correctly. Also, for those chips that require it, make
15536 * sure that indirect register accesses are enabled before
15537 * the first operation.
15539 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15541 tp->misc_host_ctrl |= (misc_ctrl_reg &
15542 MISC_HOST_CTRL_CHIPREV);
15543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15544 tp->misc_host_ctrl);
15546 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15548 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15549 * we need to disable memory and use config. cycles
15550 * only to access all registers. The 5702/03 chips
15551 * can mistakenly decode the special cycles from the
15552 * ICH chipsets as memory write cycles, causing corruption
15553 * of register and memory space. Only certain ICH bridges
15554 * will drive special cycles with non-zero data during the
15555 * address phase which can fall within the 5703's address
15556 * range. This is not an ICH bug as the PCI spec allows
15557 * non-zero address during special cycles. However, only
15558 * these ICH bridges are known to drive non-zero addresses
15559 * during special cycles.
15561 * Since special cycles do not cross PCI bridges, we only
15562 * enable this workaround if the 5703 is on the secondary
15563 * bus of these ICH bridges.
15565 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15566 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15567 static struct tg3_dev_id {
15571 } ich_chipsets[] = {
15572 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15574 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15576 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15578 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15582 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15583 struct pci_dev *bridge = NULL;
15585 while (pci_id->vendor != 0) {
15586 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15592 if (pci_id->rev != PCI_ANY_ID) {
15593 if (bridge->revision > pci_id->rev)
15596 if (bridge->subordinate &&
15597 (bridge->subordinate->number ==
15598 tp->pdev->bus->number)) {
15599 tg3_flag_set(tp, ICH_WORKAROUND);
15600 pci_dev_put(bridge);
15606 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15607 static struct tg3_dev_id {
15610 } bridge_chipsets[] = {
15611 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15612 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15615 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15616 struct pci_dev *bridge = NULL;
15618 while (pci_id->vendor != 0) {
15619 bridge = pci_get_device(pci_id->vendor,
15626 if (bridge->subordinate &&
15627 (bridge->subordinate->number <=
15628 tp->pdev->bus->number) &&
15629 (bridge->subordinate->busn_res.end >=
15630 tp->pdev->bus->number)) {
15631 tg3_flag_set(tp, 5701_DMA_BUG);
15632 pci_dev_put(bridge);
15638 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15639 * DMA addresses > 40-bit. This bridge may have other additional
15640 * 57xx devices behind it in some 4-port NIC designs for example.
15641 * Any tg3 device found behind the bridge will also need the 40-bit
15644 if (tg3_flag(tp, 5780_CLASS)) {
15645 tg3_flag_set(tp, 40BIT_DMA_BUG);
15646 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15648 struct pci_dev *bridge = NULL;
15651 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15652 PCI_DEVICE_ID_SERVERWORKS_EPB,
15654 if (bridge && bridge->subordinate &&
15655 (bridge->subordinate->number <=
15656 tp->pdev->bus->number) &&
15657 (bridge->subordinate->busn_res.end >=
15658 tp->pdev->bus->number)) {
15659 tg3_flag_set(tp, 40BIT_DMA_BUG);
15660 pci_dev_put(bridge);
15666 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15667 tg3_asic_rev(tp) == ASIC_REV_5714)
15668 tp->pdev_peer = tg3_find_peer(tp);
15670 /* Determine TSO capabilities */
15671 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15672 ; /* Do nothing. HW bug. */
15673 else if (tg3_flag(tp, 57765_PLUS))
15674 tg3_flag_set(tp, HW_TSO_3);
15675 else if (tg3_flag(tp, 5755_PLUS) ||
15676 tg3_asic_rev(tp) == ASIC_REV_5906)
15677 tg3_flag_set(tp, HW_TSO_2);
15678 else if (tg3_flag(tp, 5750_PLUS)) {
15679 tg3_flag_set(tp, HW_TSO_1);
15680 tg3_flag_set(tp, TSO_BUG);
15681 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15682 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15683 tg3_flag_clear(tp, TSO_BUG);
15684 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15685 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15686 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15687 tg3_flag_set(tp, FW_TSO);
15688 tg3_flag_set(tp, TSO_BUG);
15689 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15690 tp->fw_needed = FIRMWARE_TG3TSO5;
15692 tp->fw_needed = FIRMWARE_TG3TSO;
15695 /* Selectively allow TSO based on operating conditions */
15696 if (tg3_flag(tp, HW_TSO_1) ||
15697 tg3_flag(tp, HW_TSO_2) ||
15698 tg3_flag(tp, HW_TSO_3) ||
15699 tg3_flag(tp, FW_TSO)) {
15700 /* For firmware TSO, assume ASF is disabled.
15701 * We'll disable TSO later if we discover ASF
15702 * is enabled in tg3_get_eeprom_hw_cfg().
15704 tg3_flag_set(tp, TSO_CAPABLE);
15706 tg3_flag_clear(tp, TSO_CAPABLE);
15707 tg3_flag_clear(tp, TSO_BUG);
15708 tp->fw_needed = NULL;
15711 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15712 tp->fw_needed = FIRMWARE_TG3;
15714 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15715 tp->fw_needed = FIRMWARE_TG357766;
15719 if (tg3_flag(tp, 5750_PLUS)) {
15720 tg3_flag_set(tp, SUPPORT_MSI);
15721 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15722 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15723 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15724 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15725 tp->pdev_peer == tp->pdev))
15726 tg3_flag_clear(tp, SUPPORT_MSI);
15728 if (tg3_flag(tp, 5755_PLUS) ||
15729 tg3_asic_rev(tp) == ASIC_REV_5906) {
15730 tg3_flag_set(tp, 1SHOT_MSI);
15733 if (tg3_flag(tp, 57765_PLUS)) {
15734 tg3_flag_set(tp, SUPPORT_MSIX);
15735 tp->irq_max = TG3_IRQ_MAX_VECS;
15741 if (tp->irq_max > 1) {
15742 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15743 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15745 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15746 tg3_asic_rev(tp) == ASIC_REV_5720)
15747 tp->txq_max = tp->irq_max - 1;
15750 if (tg3_flag(tp, 5755_PLUS) ||
15751 tg3_asic_rev(tp) == ASIC_REV_5906)
15752 tg3_flag_set(tp, SHORT_DMA_BUG);
15754 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15755 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15757 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15758 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15759 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15760 tg3_asic_rev(tp) == ASIC_REV_5762)
15761 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15763 if (tg3_flag(tp, 57765_PLUS) &&
15764 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15765 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15767 if (!tg3_flag(tp, 5705_PLUS) ||
15768 tg3_flag(tp, 5780_CLASS) ||
15769 tg3_flag(tp, USE_JUMBO_BDFLAG))
15770 tg3_flag_set(tp, JUMBO_CAPABLE);
15772 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15775 if (pci_is_pcie(tp->pdev)) {
15778 tg3_flag_set(tp, PCI_EXPRESS);
15780 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15781 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15782 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15783 tg3_flag_clear(tp, HW_TSO_2);
15784 tg3_flag_clear(tp, TSO_CAPABLE);
15786 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15787 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15788 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15789 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15790 tg3_flag_set(tp, CLKREQ_BUG);
15791 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15792 tg3_flag_set(tp, L1PLLPD_EN);
15794 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15795 /* BCM5785 devices are effectively PCIe devices, and should
15796 * follow PCIe codepaths, but do not have a PCIe capabilities
15799 tg3_flag_set(tp, PCI_EXPRESS);
15800 } else if (!tg3_flag(tp, 5705_PLUS) ||
15801 tg3_flag(tp, 5780_CLASS)) {
15802 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15803 if (!tp->pcix_cap) {
15804 dev_err(&tp->pdev->dev,
15805 "Cannot find PCI-X capability, aborting\n");
15809 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15810 tg3_flag_set(tp, PCIX_MODE);
15813 /* If we have an AMD 762 or VIA K8T800 chipset, write
15814 * reordering to the mailbox registers done by the host
15815 * controller can cause major troubles. We read back from
15816 * every mailbox register write to force the writes to be
15817 * posted to the chip in order.
15819 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15820 !tg3_flag(tp, PCI_EXPRESS))
15821 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15823 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15824 &tp->pci_cacheline_sz);
15825 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15826 &tp->pci_lat_timer);
15827 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15828 tp->pci_lat_timer < 64) {
15829 tp->pci_lat_timer = 64;
15830 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15831 tp->pci_lat_timer);
15834 /* Important! -- It is critical that the PCI-X hw workaround
15835 * situation is decided before the first MMIO register access.
15837 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15838 /* 5700 BX chips need to have their TX producer index
15839 * mailboxes written twice to workaround a bug.
15841 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15843 /* If we are in PCI-X mode, enable register write workaround.
15845 * The workaround is to use indirect register accesses
15846 * for all chip writes not to mailbox registers.
15848 if (tg3_flag(tp, PCIX_MODE)) {
15851 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15853 /* The chip can have it's power management PCI config
15854 * space registers clobbered due to this bug.
15855 * So explicitly force the chip into D0 here.
15857 pci_read_config_dword(tp->pdev,
15858 tp->pm_cap + PCI_PM_CTRL,
15860 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15861 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15862 pci_write_config_dword(tp->pdev,
15863 tp->pm_cap + PCI_PM_CTRL,
15866 /* Also, force SERR#/PERR# in PCI command. */
15867 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15868 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15869 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15873 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15874 tg3_flag_set(tp, PCI_HIGH_SPEED);
15875 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15876 tg3_flag_set(tp, PCI_32BIT);
15878 /* Chip-specific fixup from Broadcom driver */
15879 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15880 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15881 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15882 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15885 /* Default fast path register access methods */
15886 tp->read32 = tg3_read32;
15887 tp->write32 = tg3_write32;
15888 tp->read32_mbox = tg3_read32;
15889 tp->write32_mbox = tg3_write32;
15890 tp->write32_tx_mbox = tg3_write32;
15891 tp->write32_rx_mbox = tg3_write32;
15893 /* Various workaround register access methods */
15894 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15895 tp->write32 = tg3_write_indirect_reg32;
15896 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15897 (tg3_flag(tp, PCI_EXPRESS) &&
15898 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15900 * Back to back register writes can cause problems on these
15901 * chips, the workaround is to read back all reg writes
15902 * except those to mailbox regs.
15904 * See tg3_write_indirect_reg32().
15906 tp->write32 = tg3_write_flush_reg32;
15909 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15910 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15911 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15912 tp->write32_rx_mbox = tg3_write_flush_reg32;
15915 if (tg3_flag(tp, ICH_WORKAROUND)) {
15916 tp->read32 = tg3_read_indirect_reg32;
15917 tp->write32 = tg3_write_indirect_reg32;
15918 tp->read32_mbox = tg3_read_indirect_mbox;
15919 tp->write32_mbox = tg3_write_indirect_mbox;
15920 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15921 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15926 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15927 pci_cmd &= ~PCI_COMMAND_MEMORY;
15928 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15930 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15931 tp->read32_mbox = tg3_read32_mbox_5906;
15932 tp->write32_mbox = tg3_write32_mbox_5906;
15933 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15934 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15937 if (tp->write32 == tg3_write_indirect_reg32 ||
15938 (tg3_flag(tp, PCIX_MODE) &&
15939 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15940 tg3_asic_rev(tp) == ASIC_REV_5701)))
15941 tg3_flag_set(tp, SRAM_USE_CONFIG);
15943 /* The memory arbiter has to be enabled in order for SRAM accesses
15944 * to succeed. Normally on powerup the tg3 chip firmware will make
15945 * sure it is enabled, but other entities such as system netboot
15946 * code might disable it.
15948 val = tr32(MEMARB_MODE);
15949 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15951 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15952 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15953 tg3_flag(tp, 5780_CLASS)) {
15954 if (tg3_flag(tp, PCIX_MODE)) {
15955 pci_read_config_dword(tp->pdev,
15956 tp->pcix_cap + PCI_X_STATUS,
15958 tp->pci_fn = val & 0x7;
15960 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15961 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15962 tg3_asic_rev(tp) == ASIC_REV_5720) {
15963 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15964 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15965 val = tr32(TG3_CPMU_STATUS);
15967 if (tg3_asic_rev(tp) == ASIC_REV_5717)
15968 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15970 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15971 TG3_CPMU_STATUS_FSHFT_5719;
15974 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15975 tp->write32_tx_mbox = tg3_write_flush_reg32;
15976 tp->write32_rx_mbox = tg3_write_flush_reg32;
15979 /* Get eeprom hw config before calling tg3_set_power_state().
15980 * In particular, the TG3_FLAG_IS_NIC flag must be
15981 * determined before calling tg3_set_power_state() so that
15982 * we know whether or not to switch out of Vaux power.
15983 * When the flag is set, it means that GPIO1 is used for eeprom
15984 * write protect and also implies that it is a LOM where GPIOs
15985 * are not used to switch power.
15987 tg3_get_eeprom_hw_cfg(tp);
15989 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15990 tg3_flag_clear(tp, TSO_CAPABLE);
15991 tg3_flag_clear(tp, TSO_BUG);
15992 tp->fw_needed = NULL;
15995 if (tg3_flag(tp, ENABLE_APE)) {
15996 /* Allow reads and writes to the
15997 * APE register and memory space.
15999 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16000 PCISTATE_ALLOW_APE_SHMEM_WR |
16001 PCISTATE_ALLOW_APE_PSPACE_WR;
16002 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16005 tg3_ape_lock_init(tp);
16008 /* Set up tp->grc_local_ctrl before calling
16009 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16010 * will bring 5700's external PHY out of reset.
16011 * It is also used as eeprom write protect on LOMs.
16013 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16014 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16015 tg3_flag(tp, EEPROM_WRITE_PROT))
16016 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16017 GRC_LCLCTRL_GPIO_OUTPUT1);
16018 /* Unused GPIO3 must be driven as output on 5752 because there
16019 * are no pull-up resistors on unused GPIO pins.
16021 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16022 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16024 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16025 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16026 tg3_flag(tp, 57765_CLASS))
16027 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16029 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16030 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16031 /* Turn off the debug UART. */
16032 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16033 if (tg3_flag(tp, IS_NIC))
16034 /* Keep VMain power. */
16035 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16036 GRC_LCLCTRL_GPIO_OUTPUT0;
16039 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16040 tp->grc_local_ctrl |=
16041 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16043 /* Switch out of Vaux if it is a NIC */
16044 tg3_pwrsrc_switch_to_vmain(tp);
16046 /* Derive initial jumbo mode from MTU assigned in
16047 * ether_setup() via the alloc_etherdev() call
16049 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16050 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16052 /* Determine WakeOnLan speed to use. */
16053 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16054 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16055 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16056 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16057 tg3_flag_clear(tp, WOL_SPEED_100MB);
16059 tg3_flag_set(tp, WOL_SPEED_100MB);
16062 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16063 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16065 /* A few boards don't want Ethernet@WireSpeed phy feature */
16066 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16067 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16068 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16069 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16070 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16071 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16072 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16074 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16075 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16076 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16077 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16078 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16080 if (tg3_flag(tp, 5705_PLUS) &&
16081 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16082 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16083 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16084 !tg3_flag(tp, 57765_PLUS)) {
16085 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16086 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16087 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16088 tg3_asic_rev(tp) == ASIC_REV_5761) {
16089 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16090 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16091 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16092 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16093 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16095 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16098 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16099 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16100 tp->phy_otp = tg3_read_otp_phycfg(tp);
16101 if (tp->phy_otp == 0)
16102 tp->phy_otp = TG3_OTP_DEFAULT;
16105 if (tg3_flag(tp, CPMU_PRESENT))
16106 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16108 tp->mi_mode = MAC_MI_MODE_BASE;
16110 tp->coalesce_mode = 0;
16111 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16112 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16113 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16115 /* Set these bits to enable statistics workaround. */
16116 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16117 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16118 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16119 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16120 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16123 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16124 tg3_asic_rev(tp) == ASIC_REV_57780)
16125 tg3_flag_set(tp, USE_PHYLIB);
16127 err = tg3_mdio_init(tp);
16131 /* Initialize data/descriptor byte/word swapping. */
16132 val = tr32(GRC_MODE);
16133 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16134 tg3_asic_rev(tp) == ASIC_REV_5762)
16135 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16136 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16137 GRC_MODE_B2HRX_ENABLE |
16138 GRC_MODE_HTX2B_ENABLE |
16139 GRC_MODE_HOST_STACKUP);
16141 val &= GRC_MODE_HOST_STACKUP;
16143 tw32(GRC_MODE, val | tp->grc_mode);
16145 tg3_switch_clocks(tp);
16147 /* Clear this out for sanity. */
16148 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16150 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16152 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16153 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16154 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16155 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16156 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16157 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16158 void __iomem *sram_base;
16160 /* Write some dummy words into the SRAM status block
16161 * area, see if it reads back correctly. If the return
16162 * value is bad, force enable the PCIX workaround.
16164 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16166 writel(0x00000000, sram_base);
16167 writel(0x00000000, sram_base + 4);
16168 writel(0xffffffff, sram_base + 4);
16169 if (readl(sram_base) != 0x00000000)
16170 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16175 tg3_nvram_init(tp);
16177 /* If the device has an NVRAM, no need to load patch firmware */
16178 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16179 !tg3_flag(tp, NO_NVRAM))
16180 tp->fw_needed = NULL;
16182 grc_misc_cfg = tr32(GRC_MISC_CFG);
16183 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16185 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16186 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16187 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16188 tg3_flag_set(tp, IS_5788);
16190 if (!tg3_flag(tp, IS_5788) &&
16191 tg3_asic_rev(tp) != ASIC_REV_5700)
16192 tg3_flag_set(tp, TAGGED_STATUS);
16193 if (tg3_flag(tp, TAGGED_STATUS)) {
16194 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16195 HOSTCC_MODE_CLRTICK_TXBD);
16197 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16198 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16199 tp->misc_host_ctrl);
16202 /* Preserve the APE MAC_MODE bits */
16203 if (tg3_flag(tp, ENABLE_APE))
16204 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16208 if (tg3_10_100_only_device(tp, ent))
16209 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16211 err = tg3_phy_probe(tp);
16213 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16214 /* ... but do not return immediately ... */
16219 tg3_read_fw_ver(tp);
16221 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16222 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16224 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16225 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16227 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16230 /* 5700 {AX,BX} chips have a broken status block link
16231 * change bit implementation, so we must use the
16232 * status register in those cases.
16234 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16235 tg3_flag_set(tp, USE_LINKCHG_REG);
16237 tg3_flag_clear(tp, USE_LINKCHG_REG);
16239 /* The led_ctrl is set during tg3_phy_probe, here we might
16240 * have to force the link status polling mechanism based
16241 * upon subsystem IDs.
16243 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16244 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16245 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16246 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16247 tg3_flag_set(tp, USE_LINKCHG_REG);
16250 /* For all SERDES we poll the MAC status register. */
16251 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16252 tg3_flag_set(tp, POLL_SERDES);
16254 tg3_flag_clear(tp, POLL_SERDES);
16256 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16257 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16258 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16259 tg3_flag(tp, PCIX_MODE)) {
16260 tp->rx_offset = NET_SKB_PAD;
16261 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16262 tp->rx_copy_thresh = ~(u16)0;
16266 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16267 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16268 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16270 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16272 /* Increment the rx prod index on the rx std ring by at most
16273 * 8 for these chips to workaround hw errata.
16275 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16276 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16277 tg3_asic_rev(tp) == ASIC_REV_5755)
16278 tp->rx_std_max_post = 8;
16280 if (tg3_flag(tp, ASPM_WORKAROUND))
16281 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16282 PCIE_PWR_MGMT_L1_THRESH_MSK;
16287 #ifdef CONFIG_SPARC
16288 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16290 struct net_device *dev = tp->dev;
16291 struct pci_dev *pdev = tp->pdev;
16292 struct device_node *dp = pci_device_to_OF_node(pdev);
16293 const unsigned char *addr;
16296 addr = of_get_property(dp, "local-mac-address", &len);
16297 if (addr && len == 6) {
16298 memcpy(dev->dev_addr, addr, 6);
16304 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16306 struct net_device *dev = tp->dev;
16308 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16313 static int tg3_get_device_address(struct tg3 *tp)
16315 struct net_device *dev = tp->dev;
16316 u32 hi, lo, mac_offset;
16320 #ifdef CONFIG_SPARC
16321 if (!tg3_get_macaddr_sparc(tp))
16325 if (tg3_flag(tp, IS_SSB_CORE)) {
16326 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16327 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16332 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16333 tg3_flag(tp, 5780_CLASS)) {
16334 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16336 if (tg3_nvram_lock(tp))
16337 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16339 tg3_nvram_unlock(tp);
16340 } else if (tg3_flag(tp, 5717_PLUS)) {
16341 if (tp->pci_fn & 1)
16343 if (tp->pci_fn > 1)
16344 mac_offset += 0x18c;
16345 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16348 /* First try to get it from MAC address mailbox. */
16349 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16350 if ((hi >> 16) == 0x484b) {
16351 dev->dev_addr[0] = (hi >> 8) & 0xff;
16352 dev->dev_addr[1] = (hi >> 0) & 0xff;
16354 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16355 dev->dev_addr[2] = (lo >> 24) & 0xff;
16356 dev->dev_addr[3] = (lo >> 16) & 0xff;
16357 dev->dev_addr[4] = (lo >> 8) & 0xff;
16358 dev->dev_addr[5] = (lo >> 0) & 0xff;
16360 /* Some old bootcode may report a 0 MAC address in SRAM */
16361 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16364 /* Next, try NVRAM. */
16365 if (!tg3_flag(tp, NO_NVRAM) &&
16366 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16367 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16368 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16369 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16371 /* Finally just fetch it out of the MAC control regs. */
16373 hi = tr32(MAC_ADDR_0_HIGH);
16374 lo = tr32(MAC_ADDR_0_LOW);
16376 dev->dev_addr[5] = lo & 0xff;
16377 dev->dev_addr[4] = (lo >> 8) & 0xff;
16378 dev->dev_addr[3] = (lo >> 16) & 0xff;
16379 dev->dev_addr[2] = (lo >> 24) & 0xff;
16380 dev->dev_addr[1] = hi & 0xff;
16381 dev->dev_addr[0] = (hi >> 8) & 0xff;
16385 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16386 #ifdef CONFIG_SPARC
16387 if (!tg3_get_default_macaddr_sparc(tp))
16395 #define BOUNDARY_SINGLE_CACHELINE 1
16396 #define BOUNDARY_MULTI_CACHELINE 2
16398 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16400 int cacheline_size;
16404 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16406 cacheline_size = 1024;
16408 cacheline_size = (int) byte * 4;
16410 /* On 5703 and later chips, the boundary bits have no
16413 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16414 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16415 !tg3_flag(tp, PCI_EXPRESS))
16418 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16419 goal = BOUNDARY_MULTI_CACHELINE;
16421 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16422 goal = BOUNDARY_SINGLE_CACHELINE;
16428 if (tg3_flag(tp, 57765_PLUS)) {
16429 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16436 /* PCI controllers on most RISC systems tend to disconnect
16437 * when a device tries to burst across a cache-line boundary.
16438 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16440 * Unfortunately, for PCI-E there are only limited
16441 * write-side controls for this, and thus for reads
16442 * we will still get the disconnects. We'll also waste
16443 * these PCI cycles for both read and write for chips
16444 * other than 5700 and 5701 which do not implement the
16447 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16448 switch (cacheline_size) {
16453 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16454 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16455 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16457 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16458 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16463 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16464 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16468 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16469 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16472 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16473 switch (cacheline_size) {
16477 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16478 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16479 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16485 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16486 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16490 switch (cacheline_size) {
16492 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16493 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16494 DMA_RWCTRL_WRITE_BNDRY_16);
16499 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16500 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16501 DMA_RWCTRL_WRITE_BNDRY_32);
16506 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16507 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16508 DMA_RWCTRL_WRITE_BNDRY_64);
16513 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16514 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16515 DMA_RWCTRL_WRITE_BNDRY_128);
16520 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16521 DMA_RWCTRL_WRITE_BNDRY_256);
16524 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16525 DMA_RWCTRL_WRITE_BNDRY_512);
16529 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16530 DMA_RWCTRL_WRITE_BNDRY_1024);
16539 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16540 int size, int to_device)
16542 struct tg3_internal_buffer_desc test_desc;
16543 u32 sram_dma_descs;
16546 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16548 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16549 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16550 tw32(RDMAC_STATUS, 0);
16551 tw32(WDMAC_STATUS, 0);
16553 tw32(BUFMGR_MODE, 0);
16554 tw32(FTQ_RESET, 0);
16556 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16557 test_desc.addr_lo = buf_dma & 0xffffffff;
16558 test_desc.nic_mbuf = 0x00002100;
16559 test_desc.len = size;
16562 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16563 * the *second* time the tg3 driver was getting loaded after an
16566 * Broadcom tells me:
16567 * ...the DMA engine is connected to the GRC block and a DMA
16568 * reset may affect the GRC block in some unpredictable way...
16569 * The behavior of resets to individual blocks has not been tested.
16571 * Broadcom noted the GRC reset will also reset all sub-components.
16574 test_desc.cqid_sqid = (13 << 8) | 2;
16576 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16579 test_desc.cqid_sqid = (16 << 8) | 7;
16581 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16584 test_desc.flags = 0x00000005;
16586 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16589 val = *(((u32 *)&test_desc) + i);
16590 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16591 sram_dma_descs + (i * sizeof(u32)));
16592 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16594 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16597 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16599 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16602 for (i = 0; i < 40; i++) {
16606 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16608 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16609 if ((val & 0xffff) == sram_dma_descs) {
16620 #define TEST_BUFFER_SIZE 0x2000
16622 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16623 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16627 static int tg3_test_dma(struct tg3 *tp)
16629 dma_addr_t buf_dma;
16630 u32 *buf, saved_dma_rwctrl;
16633 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16634 &buf_dma, GFP_KERNEL);
16640 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16641 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16643 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16645 if (tg3_flag(tp, 57765_PLUS))
16648 if (tg3_flag(tp, PCI_EXPRESS)) {
16649 /* DMA read watermark not used on PCIE */
16650 tp->dma_rwctrl |= 0x00180000;
16651 } else if (!tg3_flag(tp, PCIX_MODE)) {
16652 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16653 tg3_asic_rev(tp) == ASIC_REV_5750)
16654 tp->dma_rwctrl |= 0x003f0000;
16656 tp->dma_rwctrl |= 0x003f000f;
16658 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16659 tg3_asic_rev(tp) == ASIC_REV_5704) {
16660 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16661 u32 read_water = 0x7;
16663 /* If the 5704 is behind the EPB bridge, we can
16664 * do the less restrictive ONE_DMA workaround for
16665 * better performance.
16667 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16668 tg3_asic_rev(tp) == ASIC_REV_5704)
16669 tp->dma_rwctrl |= 0x8000;
16670 else if (ccval == 0x6 || ccval == 0x7)
16671 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16673 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16675 /* Set bit 23 to enable PCIX hw bug fix */
16677 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16678 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16680 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16681 /* 5780 always in PCIX mode */
16682 tp->dma_rwctrl |= 0x00144000;
16683 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16684 /* 5714 always in PCIX mode */
16685 tp->dma_rwctrl |= 0x00148000;
16687 tp->dma_rwctrl |= 0x001b000f;
16690 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16691 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16693 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16694 tg3_asic_rev(tp) == ASIC_REV_5704)
16695 tp->dma_rwctrl &= 0xfffffff0;
16697 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16698 tg3_asic_rev(tp) == ASIC_REV_5701) {
16699 /* Remove this if it causes problems for some boards. */
16700 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16702 /* On 5700/5701 chips, we need to set this bit.
16703 * Otherwise the chip will issue cacheline transactions
16704 * to streamable DMA memory with not all the byte
16705 * enables turned on. This is an error on several
16706 * RISC PCI controllers, in particular sparc64.
16708 * On 5703/5704 chips, this bit has been reassigned
16709 * a different meaning. In particular, it is used
16710 * on those chips to enable a PCI-X workaround.
16712 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16715 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16718 /* Unneeded, already done by tg3_get_invariants. */
16719 tg3_switch_clocks(tp);
16722 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16723 tg3_asic_rev(tp) != ASIC_REV_5701)
16726 /* It is best to perform DMA test with maximum write burst size
16727 * to expose the 5700/5701 write DMA bug.
16729 saved_dma_rwctrl = tp->dma_rwctrl;
16730 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16731 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16736 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16739 /* Send the buffer to the chip. */
16740 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16742 dev_err(&tp->pdev->dev,
16743 "%s: Buffer write failed. err = %d\n",
16749 /* validate data reached card RAM correctly. */
16750 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16752 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16753 if (le32_to_cpu(val) != p[i]) {
16754 dev_err(&tp->pdev->dev,
16755 "%s: Buffer corrupted on device! "
16756 "(%d != %d)\n", __func__, val, i);
16757 /* ret = -ENODEV here? */
16762 /* Now read it back. */
16763 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16765 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16766 "err = %d\n", __func__, ret);
16771 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16775 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16776 DMA_RWCTRL_WRITE_BNDRY_16) {
16777 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16778 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16779 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16782 dev_err(&tp->pdev->dev,
16783 "%s: Buffer corrupted on read back! "
16784 "(%d != %d)\n", __func__, p[i], i);
16790 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16796 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16797 DMA_RWCTRL_WRITE_BNDRY_16) {
16798 /* DMA test passed without adjusting DMA boundary,
16799 * now look for chipsets that are known to expose the
16800 * DMA bug without failing the test.
16802 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16803 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16804 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16806 /* Safe to use the calculated DMA boundary. */
16807 tp->dma_rwctrl = saved_dma_rwctrl;
16810 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16814 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16819 static void tg3_init_bufmgr_config(struct tg3 *tp)
16821 if (tg3_flag(tp, 57765_PLUS)) {
16822 tp->bufmgr_config.mbuf_read_dma_low_water =
16823 DEFAULT_MB_RDMA_LOW_WATER_5705;
16824 tp->bufmgr_config.mbuf_mac_rx_low_water =
16825 DEFAULT_MB_MACRX_LOW_WATER_57765;
16826 tp->bufmgr_config.mbuf_high_water =
16827 DEFAULT_MB_HIGH_WATER_57765;
16829 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16830 DEFAULT_MB_RDMA_LOW_WATER_5705;
16831 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16832 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16833 tp->bufmgr_config.mbuf_high_water_jumbo =
16834 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16835 } else if (tg3_flag(tp, 5705_PLUS)) {
16836 tp->bufmgr_config.mbuf_read_dma_low_water =
16837 DEFAULT_MB_RDMA_LOW_WATER_5705;
16838 tp->bufmgr_config.mbuf_mac_rx_low_water =
16839 DEFAULT_MB_MACRX_LOW_WATER_5705;
16840 tp->bufmgr_config.mbuf_high_water =
16841 DEFAULT_MB_HIGH_WATER_5705;
16842 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16843 tp->bufmgr_config.mbuf_mac_rx_low_water =
16844 DEFAULT_MB_MACRX_LOW_WATER_5906;
16845 tp->bufmgr_config.mbuf_high_water =
16846 DEFAULT_MB_HIGH_WATER_5906;
16849 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16850 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16851 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16852 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16853 tp->bufmgr_config.mbuf_high_water_jumbo =
16854 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16856 tp->bufmgr_config.mbuf_read_dma_low_water =
16857 DEFAULT_MB_RDMA_LOW_WATER;
16858 tp->bufmgr_config.mbuf_mac_rx_low_water =
16859 DEFAULT_MB_MACRX_LOW_WATER;
16860 tp->bufmgr_config.mbuf_high_water =
16861 DEFAULT_MB_HIGH_WATER;
16863 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16864 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16865 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16866 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16867 tp->bufmgr_config.mbuf_high_water_jumbo =
16868 DEFAULT_MB_HIGH_WATER_JUMBO;
16871 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16872 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16875 static char *tg3_phy_string(struct tg3 *tp)
16877 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16878 case TG3_PHY_ID_BCM5400: return "5400";
16879 case TG3_PHY_ID_BCM5401: return "5401";
16880 case TG3_PHY_ID_BCM5411: return "5411";
16881 case TG3_PHY_ID_BCM5701: return "5701";
16882 case TG3_PHY_ID_BCM5703: return "5703";
16883 case TG3_PHY_ID_BCM5704: return "5704";
16884 case TG3_PHY_ID_BCM5705: return "5705";
16885 case TG3_PHY_ID_BCM5750: return "5750";
16886 case TG3_PHY_ID_BCM5752: return "5752";
16887 case TG3_PHY_ID_BCM5714: return "5714";
16888 case TG3_PHY_ID_BCM5780: return "5780";
16889 case TG3_PHY_ID_BCM5755: return "5755";
16890 case TG3_PHY_ID_BCM5787: return "5787";
16891 case TG3_PHY_ID_BCM5784: return "5784";
16892 case TG3_PHY_ID_BCM5756: return "5722/5756";
16893 case TG3_PHY_ID_BCM5906: return "5906";
16894 case TG3_PHY_ID_BCM5761: return "5761";
16895 case TG3_PHY_ID_BCM5718C: return "5718C";
16896 case TG3_PHY_ID_BCM5718S: return "5718S";
16897 case TG3_PHY_ID_BCM57765: return "57765";
16898 case TG3_PHY_ID_BCM5719C: return "5719C";
16899 case TG3_PHY_ID_BCM5720C: return "5720C";
16900 case TG3_PHY_ID_BCM5762: return "5762C";
16901 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16902 case 0: return "serdes";
16903 default: return "unknown";
16907 static char *tg3_bus_string(struct tg3 *tp, char *str)
16909 if (tg3_flag(tp, PCI_EXPRESS)) {
16910 strcpy(str, "PCI Express");
16912 } else if (tg3_flag(tp, PCIX_MODE)) {
16913 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16915 strcpy(str, "PCIX:");
16917 if ((clock_ctrl == 7) ||
16918 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16919 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16920 strcat(str, "133MHz");
16921 else if (clock_ctrl == 0)
16922 strcat(str, "33MHz");
16923 else if (clock_ctrl == 2)
16924 strcat(str, "50MHz");
16925 else if (clock_ctrl == 4)
16926 strcat(str, "66MHz");
16927 else if (clock_ctrl == 6)
16928 strcat(str, "100MHz");
16930 strcpy(str, "PCI:");
16931 if (tg3_flag(tp, PCI_HIGH_SPEED))
16932 strcat(str, "66MHz");
16934 strcat(str, "33MHz");
16936 if (tg3_flag(tp, PCI_32BIT))
16937 strcat(str, ":32-bit");
16939 strcat(str, ":64-bit");
16943 static void tg3_init_coal(struct tg3 *tp)
16945 struct ethtool_coalesce *ec = &tp->coal;
16947 memset(ec, 0, sizeof(*ec));
16948 ec->cmd = ETHTOOL_GCOALESCE;
16949 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16950 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16951 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16952 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16953 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16954 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16955 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16956 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16957 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16959 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16960 HOSTCC_MODE_CLRTICK_TXBD)) {
16961 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16962 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16963 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16964 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16967 if (tg3_flag(tp, 5705_PLUS)) {
16968 ec->rx_coalesce_usecs_irq = 0;
16969 ec->tx_coalesce_usecs_irq = 0;
16970 ec->stats_block_coalesce_usecs = 0;
16974 static int tg3_init_one(struct pci_dev *pdev,
16975 const struct pci_device_id *ent)
16977 struct net_device *dev;
16979 int i, err, pm_cap;
16980 u32 sndmbx, rcvmbx, intmbx;
16982 u64 dma_mask, persist_dma_mask;
16983 netdev_features_t features = 0;
16985 printk_once(KERN_INFO "%s\n", version);
16987 err = pci_enable_device(pdev);
16989 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16993 err = pci_request_regions(pdev, DRV_MODULE_NAME);
16995 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16996 goto err_out_disable_pdev;
16999 pci_set_master(pdev);
17001 /* Find power-management capability. */
17002 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17004 dev_err(&pdev->dev,
17005 "Cannot find Power Management capability, aborting\n");
17007 goto err_out_free_res;
17010 err = pci_set_power_state(pdev, PCI_D0);
17012 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17013 goto err_out_free_res;
17016 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17019 goto err_out_power_down;
17022 SET_NETDEV_DEV(dev, &pdev->dev);
17024 tp = netdev_priv(dev);
17027 tp->pm_cap = pm_cap;
17028 tp->rx_mode = TG3_DEF_RX_MODE;
17029 tp->tx_mode = TG3_DEF_TX_MODE;
17033 tp->msg_enable = tg3_debug;
17035 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17037 if (pdev_is_ssb_gige_core(pdev)) {
17038 tg3_flag_set(tp, IS_SSB_CORE);
17039 if (ssb_gige_must_flush_posted_writes(pdev))
17040 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17041 if (ssb_gige_one_dma_at_once(pdev))
17042 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17043 if (ssb_gige_have_roboswitch(pdev))
17044 tg3_flag_set(tp, ROBOSWITCH);
17045 if (ssb_gige_is_rgmii(pdev))
17046 tg3_flag_set(tp, RGMII_MODE);
17049 /* The word/byte swap controls here control register access byte
17050 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17053 tp->misc_host_ctrl =
17054 MISC_HOST_CTRL_MASK_PCI_INT |
17055 MISC_HOST_CTRL_WORD_SWAP |
17056 MISC_HOST_CTRL_INDIR_ACCESS |
17057 MISC_HOST_CTRL_PCISTATE_RW;
17059 /* The NONFRM (non-frame) byte/word swap controls take effect
17060 * on descriptor entries, anything which isn't packet data.
17062 * The StrongARM chips on the board (one for tx, one for rx)
17063 * are running in big-endian mode.
17065 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17066 GRC_MODE_WSWAP_NONFRM_DATA);
17067 #ifdef __BIG_ENDIAN
17068 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17070 spin_lock_init(&tp->lock);
17071 spin_lock_init(&tp->indirect_lock);
17072 INIT_WORK(&tp->reset_task, tg3_reset_task);
17074 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17076 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17078 goto err_out_free_dev;
17081 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17082 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17083 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17084 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17085 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17086 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17087 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17088 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17089 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17090 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17091 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17092 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17093 tg3_flag_set(tp, ENABLE_APE);
17094 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17095 if (!tp->aperegs) {
17096 dev_err(&pdev->dev,
17097 "Cannot map APE registers, aborting\n");
17099 goto err_out_iounmap;
17103 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17104 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17106 dev->ethtool_ops = &tg3_ethtool_ops;
17107 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17108 dev->netdev_ops = &tg3_netdev_ops;
17109 dev->irq = pdev->irq;
17111 err = tg3_get_invariants(tp, ent);
17113 dev_err(&pdev->dev,
17114 "Problem fetching invariants of chip, aborting\n");
17115 goto err_out_apeunmap;
17118 /* The EPB bridge inside 5714, 5715, and 5780 and any
17119 * device behind the EPB cannot support DMA addresses > 40-bit.
17120 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17121 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17122 * do DMA address check in tg3_start_xmit().
17124 if (tg3_flag(tp, IS_5788))
17125 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17126 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17127 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17128 #ifdef CONFIG_HIGHMEM
17129 dma_mask = DMA_BIT_MASK(64);
17132 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17134 /* Configure DMA attributes. */
17135 if (dma_mask > DMA_BIT_MASK(32)) {
17136 err = pci_set_dma_mask(pdev, dma_mask);
17138 features |= NETIF_F_HIGHDMA;
17139 err = pci_set_consistent_dma_mask(pdev,
17142 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17143 "DMA for consistent allocations\n");
17144 goto err_out_apeunmap;
17148 if (err || dma_mask == DMA_BIT_MASK(32)) {
17149 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17151 dev_err(&pdev->dev,
17152 "No usable DMA configuration, aborting\n");
17153 goto err_out_apeunmap;
17157 tg3_init_bufmgr_config(tp);
17159 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
17161 /* 5700 B0 chips do not support checksumming correctly due
17162 * to hardware bugs.
17164 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17165 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17167 if (tg3_flag(tp, 5755_PLUS))
17168 features |= NETIF_F_IPV6_CSUM;
17171 /* TSO is on by default on chips that support hardware TSO.
17172 * Firmware TSO on older chips gives lower performance, so it
17173 * is off by default, but can be enabled using ethtool.
17175 if ((tg3_flag(tp, HW_TSO_1) ||
17176 tg3_flag(tp, HW_TSO_2) ||
17177 tg3_flag(tp, HW_TSO_3)) &&
17178 (features & NETIF_F_IP_CSUM))
17179 features |= NETIF_F_TSO;
17180 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17181 if (features & NETIF_F_IPV6_CSUM)
17182 features |= NETIF_F_TSO6;
17183 if (tg3_flag(tp, HW_TSO_3) ||
17184 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17185 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17186 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17187 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17188 tg3_asic_rev(tp) == ASIC_REV_57780)
17189 features |= NETIF_F_TSO_ECN;
17192 dev->features |= features;
17193 dev->vlan_features |= features;
17196 * Add loopback capability only for a subset of devices that support
17197 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17198 * loopback for the remaining devices.
17200 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17201 !tg3_flag(tp, CPMU_PRESENT))
17202 /* Add the loopback capability */
17203 features |= NETIF_F_LOOPBACK;
17205 dev->hw_features |= features;
17207 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17208 !tg3_flag(tp, TSO_CAPABLE) &&
17209 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17210 tg3_flag_set(tp, MAX_RXPEND_64);
17211 tp->rx_pending = 63;
17214 err = tg3_get_device_address(tp);
17216 dev_err(&pdev->dev,
17217 "Could not obtain valid ethernet address, aborting\n");
17218 goto err_out_apeunmap;
17222 * Reset chip in case UNDI or EFI driver did not shutdown
17223 * DMA self test will enable WDMAC and we'll see (spurious)
17224 * pending DMA on the PCI bus at that point.
17226 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17227 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17228 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17229 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17232 err = tg3_test_dma(tp);
17234 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17235 goto err_out_apeunmap;
17238 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17239 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17240 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17241 for (i = 0; i < tp->irq_max; i++) {
17242 struct tg3_napi *tnapi = &tp->napi[i];
17245 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17247 tnapi->int_mbox = intmbx;
17253 tnapi->consmbox = rcvmbx;
17254 tnapi->prodmbox = sndmbx;
17257 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17259 tnapi->coal_now = HOSTCC_MODE_NOW;
17261 if (!tg3_flag(tp, SUPPORT_MSIX))
17265 * If we support MSIX, we'll be using RSS. If we're using
17266 * RSS, the first vector only handles link interrupts and the
17267 * remaining vectors handle rx and tx interrupts. Reuse the
17268 * mailbox values for the next iteration. The values we setup
17269 * above are still useful for the single vectored mode.
17284 pci_set_drvdata(pdev, dev);
17286 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17287 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17288 tg3_asic_rev(tp) == ASIC_REV_5762)
17289 tg3_flag_set(tp, PTP_CAPABLE);
17291 if (tg3_flag(tp, 5717_PLUS)) {
17292 /* Resume a low-power mode */
17293 tg3_frob_aux_power(tp, false);
17296 tg3_timer_init(tp);
17298 tg3_carrier_off(tp);
17300 err = register_netdev(dev);
17302 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17303 goto err_out_apeunmap;
17306 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17307 tp->board_part_number,
17308 tg3_chip_rev_id(tp),
17309 tg3_bus_string(tp, str),
17312 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17313 struct phy_device *phydev;
17314 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17316 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17317 phydev->drv->name, dev_name(&phydev->dev));
17321 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17322 ethtype = "10/100Base-TX";
17323 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17324 ethtype = "1000Base-SX";
17326 ethtype = "10/100/1000Base-T";
17328 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17329 "(WireSpeed[%d], EEE[%d])\n",
17330 tg3_phy_string(tp), ethtype,
17331 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17332 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17335 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17336 (dev->features & NETIF_F_RXCSUM) != 0,
17337 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17338 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17339 tg3_flag(tp, ENABLE_ASF) != 0,
17340 tg3_flag(tp, TSO_CAPABLE) != 0);
17341 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17343 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17344 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17346 pci_save_state(pdev);
17352 iounmap(tp->aperegs);
17353 tp->aperegs = NULL;
17365 err_out_power_down:
17366 pci_set_power_state(pdev, PCI_D3hot);
17369 pci_release_regions(pdev);
17371 err_out_disable_pdev:
17372 pci_disable_device(pdev);
17373 pci_set_drvdata(pdev, NULL);
17377 static void tg3_remove_one(struct pci_dev *pdev)
17379 struct net_device *dev = pci_get_drvdata(pdev);
17382 struct tg3 *tp = netdev_priv(dev);
17384 release_firmware(tp->fw);
17386 tg3_reset_task_cancel(tp);
17388 if (tg3_flag(tp, USE_PHYLIB)) {
17393 unregister_netdev(dev);
17395 iounmap(tp->aperegs);
17396 tp->aperegs = NULL;
17403 pci_release_regions(pdev);
17404 pci_disable_device(pdev);
17405 pci_set_drvdata(pdev, NULL);
17409 #ifdef CONFIG_PM_SLEEP
17410 static int tg3_suspend(struct device *device)
17412 struct pci_dev *pdev = to_pci_dev(device);
17413 struct net_device *dev = pci_get_drvdata(pdev);
17414 struct tg3 *tp = netdev_priv(dev);
17417 if (!netif_running(dev))
17420 tg3_reset_task_cancel(tp);
17422 tg3_netif_stop(tp);
17424 tg3_timer_stop(tp);
17426 tg3_full_lock(tp, 1);
17427 tg3_disable_ints(tp);
17428 tg3_full_unlock(tp);
17430 netif_device_detach(dev);
17432 tg3_full_lock(tp, 0);
17433 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17434 tg3_flag_clear(tp, INIT_COMPLETE);
17435 tg3_full_unlock(tp);
17437 err = tg3_power_down_prepare(tp);
17441 tg3_full_lock(tp, 0);
17443 tg3_flag_set(tp, INIT_COMPLETE);
17444 err2 = tg3_restart_hw(tp, 1);
17448 tg3_timer_start(tp);
17450 netif_device_attach(dev);
17451 tg3_netif_start(tp);
17454 tg3_full_unlock(tp);
17463 static int tg3_resume(struct device *device)
17465 struct pci_dev *pdev = to_pci_dev(device);
17466 struct net_device *dev = pci_get_drvdata(pdev);
17467 struct tg3 *tp = netdev_priv(dev);
17470 if (!netif_running(dev))
17473 netif_device_attach(dev);
17475 tg3_full_lock(tp, 0);
17477 tg3_flag_set(tp, INIT_COMPLETE);
17478 err = tg3_restart_hw(tp,
17479 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17483 tg3_timer_start(tp);
17485 tg3_netif_start(tp);
17488 tg3_full_unlock(tp);
17496 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17497 #define TG3_PM_OPS (&tg3_pm_ops)
17501 #define TG3_PM_OPS NULL
17503 #endif /* CONFIG_PM_SLEEP */
17506 * tg3_io_error_detected - called when PCI error is detected
17507 * @pdev: Pointer to PCI device
17508 * @state: The current pci connection state
17510 * This function is called after a PCI bus error affecting
17511 * this device has been detected.
17513 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17514 pci_channel_state_t state)
17516 struct net_device *netdev = pci_get_drvdata(pdev);
17517 struct tg3 *tp = netdev_priv(netdev);
17518 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17520 netdev_info(netdev, "PCI I/O error detected\n");
17524 if (!netif_running(netdev))
17529 tg3_netif_stop(tp);
17531 tg3_timer_stop(tp);
17533 /* Want to make sure that the reset task doesn't run */
17534 tg3_reset_task_cancel(tp);
17536 netif_device_detach(netdev);
17538 /* Clean up software state, even if MMIO is blocked */
17539 tg3_full_lock(tp, 0);
17540 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17541 tg3_full_unlock(tp);
17544 if (state == pci_channel_io_perm_failure)
17545 err = PCI_ERS_RESULT_DISCONNECT;
17547 pci_disable_device(pdev);
17555 * tg3_io_slot_reset - called after the pci bus has been reset.
17556 * @pdev: Pointer to PCI device
17558 * Restart the card from scratch, as if from a cold-boot.
17559 * At this point, the card has exprienced a hard reset,
17560 * followed by fixups by BIOS, and has its config space
17561 * set up identically to what it was at cold boot.
17563 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17565 struct net_device *netdev = pci_get_drvdata(pdev);
17566 struct tg3 *tp = netdev_priv(netdev);
17567 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17572 if (pci_enable_device(pdev)) {
17573 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17577 pci_set_master(pdev);
17578 pci_restore_state(pdev);
17579 pci_save_state(pdev);
17581 if (!netif_running(netdev)) {
17582 rc = PCI_ERS_RESULT_RECOVERED;
17586 err = tg3_power_up(tp);
17590 rc = PCI_ERS_RESULT_RECOVERED;
17599 * tg3_io_resume - called when traffic can start flowing again.
17600 * @pdev: Pointer to PCI device
17602 * This callback is called when the error recovery driver tells
17603 * us that its OK to resume normal operation.
17605 static void tg3_io_resume(struct pci_dev *pdev)
17607 struct net_device *netdev = pci_get_drvdata(pdev);
17608 struct tg3 *tp = netdev_priv(netdev);
17613 if (!netif_running(netdev))
17616 tg3_full_lock(tp, 0);
17617 tg3_flag_set(tp, INIT_COMPLETE);
17618 err = tg3_restart_hw(tp, 1);
17620 tg3_full_unlock(tp);
17621 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17625 netif_device_attach(netdev);
17627 tg3_timer_start(tp);
17629 tg3_netif_start(tp);
17631 tg3_full_unlock(tp);
17639 static const struct pci_error_handlers tg3_err_handler = {
17640 .error_detected = tg3_io_error_detected,
17641 .slot_reset = tg3_io_slot_reset,
17642 .resume = tg3_io_resume
17645 static struct pci_driver tg3_driver = {
17646 .name = DRV_MODULE_NAME,
17647 .id_table = tg3_pci_tbl,
17648 .probe = tg3_init_one,
17649 .remove = tg3_remove_one,
17650 .err_handler = &tg3_err_handler,
17651 .driver.pm = TG3_PM_OPS,
17654 static int __init tg3_init(void)
17656 return pci_register_driver(&tg3_driver);
17659 static void __exit tg3_cleanup(void)
17661 pci_unregister_driver(&tg3_driver);
17664 module_init(tg3_init);
17665 module_exit(tg3_cleanup);