2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 131
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "April 09, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
968 case RESET_KIND_SUSPEND:
969 event = APE_EVENT_STATUS_STATE_SUSPEND;
975 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
977 tg3_ape_send_event(tp, event);
980 static void tg3_disable_ints(struct tg3 *tp)
984 tw32(TG3PCI_MISC_HOST_CTRL,
985 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 for (i = 0; i < tp->irq_max; i++)
987 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
990 static void tg3_enable_ints(struct tg3 *tp)
997 tw32(TG3PCI_MISC_HOST_CTRL,
998 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1000 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 for (i = 0; i < tp->irq_cnt; i++) {
1002 struct tg3_napi *tnapi = &tp->napi[i];
1004 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 if (tg3_flag(tp, 1SHOT_MSI))
1006 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 tp->coal_now |= tnapi->coal_now;
1011 /* Force an initial interrupt */
1012 if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1016 tw32(HOSTCC_MODE, tp->coal_now);
1018 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1023 struct tg3 *tp = tnapi->tp;
1024 struct tg3_hw_status *sblk = tnapi->hw_status;
1025 unsigned int work_exists = 0;
1027 /* check for phy events */
1028 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 if (sblk->status & SD_STATUS_LINK_CHG)
1033 /* check for TX work to do */
1034 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1037 /* check for RX work to do */
1038 if (tnapi->rx_rcb_prod_idx &&
1039 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1046 * similar to tg3_enable_ints, but it accurately determines whether there
1047 * is new work pending and can return without flushing the PIO write
1048 * which reenables interrupts
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1052 struct tg3 *tp = tnapi->tp;
1054 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1057 /* When doing tagged status, this work check is unnecessary.
1058 * The last_tag we write above tells the chip which piece of
1059 * work we've completed.
1061 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1066 static void tg3_switch_clocks(struct tg3 *tp)
1069 u32 orig_clock_ctrl;
1071 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1074 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1076 orig_clock_ctrl = clock_ctrl;
1077 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 CLOCK_CTRL_CLKRUN_OENABLE |
1080 tp->pci_clock_ctrl = clock_ctrl;
1082 if (tg3_flag(tp, 5705_PLUS)) {
1083 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1087 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1092 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1096 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1099 #define PHY_BUSY_LOOPS 5000
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1108 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1110 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 tg3_ape_lock(tp, tp->phy_ape_lock);
1118 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 MI_COM_PHY_ADDR_MASK);
1120 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 MI_COM_REG_ADDR_MASK);
1122 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1124 tw32_f(MAC_MI_COM, frame_val);
1126 loops = PHY_BUSY_LOOPS;
1127 while (loops != 0) {
1129 frame_val = tr32(MAC_MI_COM);
1131 if ((frame_val & MI_COM_BUSY) == 0) {
1133 frame_val = tr32(MAC_MI_COM);
1141 *val = frame_val & MI_COM_DATA_MASK;
1145 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 tg3_ape_unlock(tp, tp->phy_ape_lock);
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1157 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1167 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1171 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1173 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 tg3_ape_lock(tp, tp->phy_ape_lock);
1179 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 MI_COM_PHY_ADDR_MASK);
1181 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 MI_COM_REG_ADDR_MASK);
1183 frame_val |= (val & MI_COM_DATA_MASK);
1184 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1186 tw32_f(MAC_MI_COM, frame_val);
1188 loops = PHY_BUSY_LOOPS;
1189 while (loops != 0) {
1191 frame_val = tr32(MAC_MI_COM);
1192 if ((frame_val & MI_COM_BUSY) == 0) {
1194 frame_val = tr32(MAC_MI_COM);
1204 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 tg3_ape_unlock(tp, tp->phy_ape_lock);
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1216 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1271 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1282 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 MII_TG3_AUXCTL_SHDWSEL_MISC);
1295 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1302 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 set |= MII_TG3_AUXCTL_MISC_WREN;
1305 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1313 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1321 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1334 /* OK, reset it, and poll the BMCR_RESET bit until it
1335 * clears or we time out.
1337 phy_control = BMCR_RESET;
1338 err = tg3_writephy(tp, MII_BMCR, phy_control);
1344 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if ((phy_control & BMCR_RESET) == 0) {
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1362 struct tg3 *tp = bp->priv;
1365 spin_lock_bh(&tp->lock);
1367 if (tg3_readphy(tp, reg, &val))
1370 spin_unlock_bh(&tp->lock);
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1377 struct tg3 *tp = bp->priv;
1380 spin_lock_bh(&tp->lock);
1382 if (tg3_writephy(tp, reg, val))
1385 spin_unlock_bh(&tp->lock);
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1398 struct phy_device *phydev;
1400 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 case PHY_ID_BCM50610:
1403 case PHY_ID_BCM50610M:
1404 val = MAC_PHYCFG2_50610_LED_MODES;
1406 case PHY_ID_BCMAC131:
1407 val = MAC_PHYCFG2_AC131_LED_MODES;
1409 case PHY_ID_RTL8211C:
1410 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1412 case PHY_ID_RTL8201E:
1413 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1419 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 tw32(MAC_PHYCFG2, val);
1422 val = tr32(MAC_PHYCFG1);
1423 val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 tw32(MAC_PHYCFG1, val);
1431 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 MAC_PHYCFG2_FMODE_MASK_MASK |
1434 MAC_PHYCFG2_GMODE_MASK_MASK |
1435 MAC_PHYCFG2_ACT_MASK_MASK |
1436 MAC_PHYCFG2_QUAL_MASK_MASK |
1437 MAC_PHYCFG2_INBAND_ENABLE;
1439 tw32(MAC_PHYCFG2, val);
1441 val = tr32(MAC_PHYCFG1);
1442 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1450 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 tw32(MAC_PHYCFG1, val);
1454 val = tr32(MAC_EXT_RGMII_MODE);
1455 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 MAC_RGMII_MODE_RX_QUALITY |
1457 MAC_RGMII_MODE_RX_ACTIVITY |
1458 MAC_RGMII_MODE_RX_ENG_DET |
1459 MAC_RGMII_MODE_TX_ENABLE |
1460 MAC_RGMII_MODE_TX_LOWPWR |
1461 MAC_RGMII_MODE_TX_RESET);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_RGMII_MODE_RX_INT_B |
1465 MAC_RGMII_MODE_RX_QUALITY |
1466 MAC_RGMII_MODE_RX_ACTIVITY |
1467 MAC_RGMII_MODE_RX_ENG_DET;
1468 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 val |= MAC_RGMII_MODE_TX_ENABLE |
1470 MAC_RGMII_MODE_TX_LOWPWR |
1471 MAC_RGMII_MODE_TX_RESET;
1473 tw32(MAC_EXT_RGMII_MODE, val);
1476 static void tg3_mdio_start(struct tg3 *tp)
1478 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 tw32_f(MAC_MI_MODE, tp->mi_mode);
1482 if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 tg3_asic_rev(tp) == ASIC_REV_5785)
1484 tg3_mdio_config_5785(tp);
1487 static int tg3_mdio_init(struct tg3 *tp)
1491 struct phy_device *phydev;
1493 if (tg3_flag(tp, 5717_PLUS)) {
1496 tp->phy_addr = tp->pci_fn + 1;
1498 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1501 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 tp->phy_addr = TG3_PHY_MII_ADDR;
1510 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1513 tp->mdio_bus = mdiobus_alloc();
1514 if (tp->mdio_bus == NULL)
1517 tp->mdio_bus->name = "tg3 mdio bus";
1518 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 tp->mdio_bus->priv = tp;
1521 tp->mdio_bus->parent = &tp->pdev->dev;
1522 tp->mdio_bus->read = &tg3_mdio_read;
1523 tp->mdio_bus->write = &tg3_mdio_write;
1524 tp->mdio_bus->reset = &tg3_mdio_reset;
1525 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 tp->mdio_bus->irq = &tp->mdio_irq[0];
1528 for (i = 0; i < PHY_MAX_ADDR; i++)
1529 tp->mdio_bus->irq[i] = PHY_POLL;
1531 /* The bus registration will look for all the PHYs on the mdio bus.
1532 * Unfortunately, it does not ensure the PHY is powered up before
1533 * accessing the PHY ID registers. A chip reset is the
1534 * quickest way to bring the device back to an operational state..
1536 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1539 i = mdiobus_register(tp->mdio_bus);
1541 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 mdiobus_free(tp->mdio_bus);
1546 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1548 if (!phydev || !phydev->drv) {
1549 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 mdiobus_unregister(tp->mdio_bus);
1551 mdiobus_free(tp->mdio_bus);
1555 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 case PHY_ID_BCM57780:
1557 phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1560 case PHY_ID_BCM50610:
1561 case PHY_ID_BCM50610M:
1562 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 PHY_BRCM_RX_REFCLK_UNUSED |
1564 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1573 case PHY_ID_RTL8211C:
1574 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1576 case PHY_ID_RTL8201E:
1577 case PHY_ID_BCMAC131:
1578 phydev->interface = PHY_INTERFACE_MODE_MII;
1579 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 tg3_flag_set(tp, MDIOBUS_INITED);
1586 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 tg3_mdio_config_5785(tp);
1592 static void tg3_mdio_fini(struct tg3 *tp)
1594 if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 tg3_flag_clear(tp, MDIOBUS_INITED);
1596 mdiobus_unregister(tp->mdio_bus);
1597 mdiobus_free(tp->mdio_bus);
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 val = tr32(GRC_RX_CPU_EVENT);
1607 val |= GRC_RX_CPU_DRIVER_EVENT;
1608 tw32_f(GRC_RX_CPU_EVENT, val);
1610 tp->last_event_jiffies = jiffies;
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1619 unsigned int delay_cnt;
1622 /* If enough time has passed, no wait is necessary. */
1623 time_remain = (long)(tp->last_event_jiffies + 1 +
1624 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1626 if (time_remain < 0)
1629 /* Check if we can shorten the wait time. */
1630 delay_cnt = jiffies_to_usecs(time_remain);
1631 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 delay_cnt = (delay_cnt >> 3) + 1;
1635 for (i = 0; i < delay_cnt; i++) {
1636 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1648 if (!tg3_readphy(tp, MII_BMCR, ®))
1650 if (!tg3_readphy(tp, MII_BMSR, ®))
1651 val |= (reg & 0xffff);
1655 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1657 if (!tg3_readphy(tp, MII_LPA, ®))
1658 val |= (reg & 0xffff);
1662 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1665 if (!tg3_readphy(tp, MII_STAT1000, ®))
1666 val |= (reg & 0xffff);
1670 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1682 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1685 tg3_phy_gather_ump_data(tp, data);
1687 tg3_wait_for_event_ack(tp);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1696 tg3_generate_fw_event(tp);
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1702 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 /* Wait for RX cpu to ACK the previous event. */
1704 tg3_wait_for_event_ack(tp);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1708 tg3_generate_fw_event(tp);
1710 /* Wait for RX cpu to ACK this event. */
1711 tg3_wait_for_event_ack(tp);
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1718 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1721 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1723 case RESET_KIND_INIT:
1724 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 case RESET_KIND_SHUTDOWN:
1729 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 case RESET_KIND_SUSPEND:
1734 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743 if (kind == RESET_KIND_INIT ||
1744 kind == RESET_KIND_SUSPEND)
1745 tg3_ape_driver_state_change(tp, kind);
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1751 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1753 case RESET_KIND_INIT:
1754 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 DRV_STATE_START_DONE);
1758 case RESET_KIND_SHUTDOWN:
1759 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 DRV_STATE_UNLOAD_DONE);
1768 if (kind == RESET_KIND_SHUTDOWN)
1769 tg3_ape_driver_state_change(tp, kind);
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1775 if (tg3_flag(tp, ENABLE_ASF)) {
1777 case RESET_KIND_INIT:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1782 case RESET_KIND_SHUTDOWN:
1783 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 case RESET_KIND_SUSPEND:
1788 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 static int tg3_poll_fw(struct tg3 *tp)
1803 if (tg3_flag(tp, IS_SSB_CORE)) {
1804 /* We don't use firmware. */
1808 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 /* Wait up to 20ms for init done. */
1810 for (i = 0; i < 200; i++) {
1811 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1818 /* Wait for firmware initialization to complete. */
1819 for (i = 0; i < 100000; i++) {
1820 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1826 /* Chip might not be fitted with firmware. Some Sun onboard
1827 * parts are configured like that. So don't signal the timeout
1828 * of the above loop as an error, but do report the lack of
1829 * running firmware once.
1831 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 tg3_flag_set(tp, NO_FWARE_REPORTED);
1834 netdev_info(tp->dev, "No firmware running\n");
1837 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 /* The 57765 A0 needs a little more
1839 * time to do some important work.
1847 static void tg3_link_report(struct tg3 *tp)
1849 if (!netif_carrier_ok(tp->dev)) {
1850 netif_info(tp, link, tp->dev, "Link is down\n");
1851 tg3_ump_link_report(tp);
1852 } else if (netif_msg_link(tp)) {
1853 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 (tp->link_config.active_speed == SPEED_1000 ?
1856 (tp->link_config.active_speed == SPEED_100 ?
1858 (tp->link_config.active_duplex == DUPLEX_FULL ?
1861 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1864 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1867 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 netdev_info(tp->dev, "EEE is %s\n",
1869 tp->setlpicnt ? "enabled" : "disabled");
1871 tg3_ump_link_report(tp);
1874 tp->link_up = netif_carrier_ok(tp->dev);
1877 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1881 if (adv & ADVERTISE_PAUSE_CAP) {
1882 flowctrl |= FLOW_CTRL_RX;
1883 if (!(adv & ADVERTISE_PAUSE_ASYM))
1884 flowctrl |= FLOW_CTRL_TX;
1885 } else if (adv & ADVERTISE_PAUSE_ASYM)
1886 flowctrl |= FLOW_CTRL_TX;
1891 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1895 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1896 miireg = ADVERTISE_1000XPAUSE;
1897 else if (flow_ctrl & FLOW_CTRL_TX)
1898 miireg = ADVERTISE_1000XPSE_ASYM;
1899 else if (flow_ctrl & FLOW_CTRL_RX)
1900 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1907 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1911 if (adv & ADVERTISE_1000XPAUSE) {
1912 flowctrl |= FLOW_CTRL_RX;
1913 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1914 flowctrl |= FLOW_CTRL_TX;
1915 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1916 flowctrl |= FLOW_CTRL_TX;
1921 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1925 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1926 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1927 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1928 if (lcladv & ADVERTISE_1000XPAUSE)
1930 if (rmtadv & ADVERTISE_1000XPAUSE)
1937 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1941 u32 old_rx_mode = tp->rx_mode;
1942 u32 old_tx_mode = tp->tx_mode;
1944 if (tg3_flag(tp, USE_PHYLIB))
1945 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1947 autoneg = tp->link_config.autoneg;
1949 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1950 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1951 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1953 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1955 flowctrl = tp->link_config.flowctrl;
1957 tp->link_config.active_flowctrl = flowctrl;
1959 if (flowctrl & FLOW_CTRL_RX)
1960 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1962 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1964 if (old_rx_mode != tp->rx_mode)
1965 tw32_f(MAC_RX_MODE, tp->rx_mode);
1967 if (flowctrl & FLOW_CTRL_TX)
1968 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1970 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1972 if (old_tx_mode != tp->tx_mode)
1973 tw32_f(MAC_TX_MODE, tp->tx_mode);
1976 static void tg3_adjust_link(struct net_device *dev)
1978 u8 oldflowctrl, linkmesg = 0;
1979 u32 mac_mode, lcl_adv, rmt_adv;
1980 struct tg3 *tp = netdev_priv(dev);
1981 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1983 spin_lock_bh(&tp->lock);
1985 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1986 MAC_MODE_HALF_DUPLEX);
1988 oldflowctrl = tp->link_config.active_flowctrl;
1994 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1995 mac_mode |= MAC_MODE_PORT_MODE_MII;
1996 else if (phydev->speed == SPEED_1000 ||
1997 tg3_asic_rev(tp) != ASIC_REV_5785)
1998 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2000 mac_mode |= MAC_MODE_PORT_MODE_MII;
2002 if (phydev->duplex == DUPLEX_HALF)
2003 mac_mode |= MAC_MODE_HALF_DUPLEX;
2005 lcl_adv = mii_advertise_flowctrl(
2006 tp->link_config.flowctrl);
2009 rmt_adv = LPA_PAUSE_CAP;
2010 if (phydev->asym_pause)
2011 rmt_adv |= LPA_PAUSE_ASYM;
2014 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2016 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2018 if (mac_mode != tp->mac_mode) {
2019 tp->mac_mode = mac_mode;
2020 tw32_f(MAC_MODE, tp->mac_mode);
2024 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2025 if (phydev->speed == SPEED_10)
2027 MAC_MI_STAT_10MBPS_MODE |
2028 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2030 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2033 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2034 tw32(MAC_TX_LENGTHS,
2035 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2036 (6 << TX_LENGTHS_IPG_SHIFT) |
2037 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2039 tw32(MAC_TX_LENGTHS,
2040 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2041 (6 << TX_LENGTHS_IPG_SHIFT) |
2042 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2044 if (phydev->link != tp->old_link ||
2045 phydev->speed != tp->link_config.active_speed ||
2046 phydev->duplex != tp->link_config.active_duplex ||
2047 oldflowctrl != tp->link_config.active_flowctrl)
2050 tp->old_link = phydev->link;
2051 tp->link_config.active_speed = phydev->speed;
2052 tp->link_config.active_duplex = phydev->duplex;
2054 spin_unlock_bh(&tp->lock);
2057 tg3_link_report(tp);
2060 static int tg3_phy_init(struct tg3 *tp)
2062 struct phy_device *phydev;
2064 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2067 /* Bring the PHY back to a known state. */
2070 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2072 /* Attach the MAC to the PHY. */
2073 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2074 tg3_adjust_link, phydev->interface);
2075 if (IS_ERR(phydev)) {
2076 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2077 return PTR_ERR(phydev);
2080 /* Mask with MAC supported features. */
2081 switch (phydev->interface) {
2082 case PHY_INTERFACE_MODE_GMII:
2083 case PHY_INTERFACE_MODE_RGMII:
2084 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2085 phydev->supported &= (PHY_GBIT_FEATURES |
2087 SUPPORTED_Asym_Pause);
2091 case PHY_INTERFACE_MODE_MII:
2092 phydev->supported &= (PHY_BASIC_FEATURES |
2094 SUPPORTED_Asym_Pause);
2097 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2101 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2103 phydev->advertising = phydev->supported;
2108 static void tg3_phy_start(struct tg3 *tp)
2110 struct phy_device *phydev;
2112 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2115 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2117 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2118 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2119 phydev->speed = tp->link_config.speed;
2120 phydev->duplex = tp->link_config.duplex;
2121 phydev->autoneg = tp->link_config.autoneg;
2122 phydev->advertising = tp->link_config.advertising;
2127 phy_start_aneg(phydev);
2130 static void tg3_phy_stop(struct tg3 *tp)
2132 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2138 static void tg3_phy_fini(struct tg3 *tp)
2140 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2141 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2142 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2146 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2151 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2154 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2155 /* Cannot do read-modify-write on 5401 */
2156 err = tg3_phy_auxctl_write(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2158 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2163 err = tg3_phy_auxctl_read(tp,
2164 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2168 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2169 err = tg3_phy_auxctl_write(tp,
2170 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2176 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2180 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2183 tg3_writephy(tp, MII_TG3_FET_TEST,
2184 phytest | MII_TG3_FET_SHADOW_EN);
2185 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2187 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2189 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2190 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2192 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2196 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2200 if (!tg3_flag(tp, 5705_PLUS) ||
2201 (tg3_flag(tp, 5717_PLUS) &&
2202 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2205 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2206 tg3_phy_fet_toggle_apd(tp, enable);
2210 reg = MII_TG3_MISC_SHDW_WREN |
2211 MII_TG3_MISC_SHDW_SCR5_SEL |
2212 MII_TG3_MISC_SHDW_SCR5_LPED |
2213 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2214 MII_TG3_MISC_SHDW_SCR5_SDTL |
2215 MII_TG3_MISC_SHDW_SCR5_C125OE;
2216 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2217 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2219 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2222 reg = MII_TG3_MISC_SHDW_WREN |
2223 MII_TG3_MISC_SHDW_APD_SEL |
2224 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2226 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2228 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2235 if (!tg3_flag(tp, 5705_PLUS) ||
2236 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2239 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2242 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2243 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2245 tg3_writephy(tp, MII_TG3_FET_TEST,
2246 ephy | MII_TG3_FET_SHADOW_EN);
2247 if (!tg3_readphy(tp, reg, &phy)) {
2249 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2251 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2252 tg3_writephy(tp, reg, phy);
2254 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2259 ret = tg3_phy_auxctl_read(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2263 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2265 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2266 tg3_phy_auxctl_write(tp,
2267 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2272 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2277 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2280 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2282 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2283 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2286 static void tg3_phy_apply_otp(struct tg3 *tp)
2295 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2298 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2299 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2300 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2302 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2303 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2306 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2307 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2308 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2310 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2313 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2314 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2316 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2317 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2318 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2320 tg3_phy_toggle_auxctl_smdsp(tp, false);
2323 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2327 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2332 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2334 tp->link_config.active_duplex == DUPLEX_FULL &&
2335 (tp->link_config.active_speed == SPEED_100 ||
2336 tp->link_config.active_speed == SPEED_1000)) {
2339 if (tp->link_config.active_speed == SPEED_1000)
2340 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2342 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2344 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2346 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2347 TG3_CL45_D7_EEERES_STAT, &val);
2349 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2354 if (!tp->setlpicnt) {
2355 if (current_link_up &&
2356 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2357 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2358 tg3_phy_toggle_auxctl_smdsp(tp, false);
2361 val = tr32(TG3_CPMU_EEE_MODE);
2362 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2366 static void tg3_phy_eee_enable(struct tg3 *tp)
2370 if (tp->link_config.active_speed == SPEED_1000 &&
2371 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2372 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2373 tg3_flag(tp, 57765_CLASS)) &&
2374 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2375 val = MII_TG3_DSP_TAP26_ALNOKO |
2376 MII_TG3_DSP_TAP26_RMRXSTO;
2377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2378 tg3_phy_toggle_auxctl_smdsp(tp, false);
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2385 static int tg3_wait_macro_done(struct tg3 *tp)
2392 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2393 if ((tmp32 & 0x1000) == 0)
2403 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2405 static const u32 test_pat[4][6] = {
2406 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2407 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2408 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2409 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2413 for (chan = 0; chan < 4; chan++) {
2416 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2417 (chan * 0x2000) | 0x0200);
2418 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2420 for (i = 0; i < 6; i++)
2421 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2424 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2425 if (tg3_wait_macro_done(tp)) {
2430 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2431 (chan * 0x2000) | 0x0200);
2432 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2433 if (tg3_wait_macro_done(tp)) {
2438 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2439 if (tg3_wait_macro_done(tp)) {
2444 for (i = 0; i < 6; i += 2) {
2447 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2448 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2449 tg3_wait_macro_done(tp)) {
2455 if (low != test_pat[chan][i] ||
2456 high != test_pat[chan][i+1]) {
2457 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2458 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2459 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2469 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2473 for (chan = 0; chan < 4; chan++) {
2476 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477 (chan * 0x2000) | 0x0200);
2478 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479 for (i = 0; i < 6; i++)
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2481 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2482 if (tg3_wait_macro_done(tp))
2489 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2491 u32 reg32, phy9_orig;
2492 int retries, do_phy_reset, err;
2498 err = tg3_bmcr_reset(tp);
2504 /* Disable transmitter and interrupt. */
2505 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2509 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2511 /* Set full-duplex, 1000 mbps. */
2512 tg3_writephy(tp, MII_BMCR,
2513 BMCR_FULLDPLX | BMCR_SPEED1000);
2515 /* Set to master mode. */
2516 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2519 tg3_writephy(tp, MII_CTRL1000,
2520 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2522 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2526 /* Block the PHY control access. */
2527 tg3_phydsp_write(tp, 0x8005, 0x0800);
2529 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2532 } while (--retries);
2534 err = tg3_phy_reset_chanpat(tp);
2538 tg3_phydsp_write(tp, 0x8005, 0x0000);
2540 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2541 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2543 tg3_phy_toggle_auxctl_smdsp(tp, false);
2545 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2547 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2549 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2556 static void tg3_carrier_off(struct tg3 *tp)
2558 netif_carrier_off(tp->dev);
2559 tp->link_up = false;
2562 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2564 if (tg3_flag(tp, ENABLE_ASF))
2565 netdev_warn(tp->dev,
2566 "Management side-band traffic will be interrupted during phy settings change\n");
2569 /* This will reset the tigon3 PHY if there is no valid
2570 * link unless the FORCE argument is non-zero.
2572 static int tg3_phy_reset(struct tg3 *tp)
2577 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2578 val = tr32(GRC_MISC_CFG);
2579 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2582 err = tg3_readphy(tp, MII_BMSR, &val);
2583 err |= tg3_readphy(tp, MII_BMSR, &val);
2587 if (netif_running(tp->dev) && tp->link_up) {
2588 netif_carrier_off(tp->dev);
2589 tg3_link_report(tp);
2592 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2593 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2594 tg3_asic_rev(tp) == ASIC_REV_5705) {
2595 err = tg3_phy_reset_5703_4_5(tp);
2602 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2603 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2604 cpmuctrl = tr32(TG3_CPMU_CTRL);
2605 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2607 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2610 err = tg3_bmcr_reset(tp);
2614 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2615 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2616 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2618 tw32(TG3_CPMU_CTRL, cpmuctrl);
2621 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2622 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2623 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2624 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2625 CPMU_LSPD_1000MB_MACCLK_12_5) {
2626 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2628 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2632 if (tg3_flag(tp, 5717_PLUS) &&
2633 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2636 tg3_phy_apply_otp(tp);
2638 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2639 tg3_phy_toggle_apd(tp, true);
2641 tg3_phy_toggle_apd(tp, false);
2644 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2645 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2646 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2647 tg3_phydsp_write(tp, 0x000a, 0x0323);
2648 tg3_phy_toggle_auxctl_smdsp(tp, false);
2651 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2652 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2653 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2656 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2657 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2658 tg3_phydsp_write(tp, 0x000a, 0x310b);
2659 tg3_phydsp_write(tp, 0x201f, 0x9506);
2660 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2661 tg3_phy_toggle_auxctl_smdsp(tp, false);
2663 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2664 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2665 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2666 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2667 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2668 tg3_writephy(tp, MII_TG3_TEST1,
2669 MII_TG3_TEST1_TRIM_EN | 0x4);
2671 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2673 tg3_phy_toggle_auxctl_smdsp(tp, false);
2677 /* Set Extended packet length bit (bit 14) on all chips that */
2678 /* support jumbo frames */
2679 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2680 /* Cannot do read-modify-write on 5401 */
2681 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2682 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2683 /* Set bit 14 with read-modify-write to preserve other bits */
2684 err = tg3_phy_auxctl_read(tp,
2685 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2687 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2688 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2691 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2692 * jumbo frames transmission.
2694 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2695 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2696 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2697 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2700 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2701 /* adjust output voltage */
2702 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2705 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2706 tg3_phydsp_write(tp, 0xffb, 0x4000);
2708 tg3_phy_toggle_automdix(tp, true);
2709 tg3_phy_set_wirespeed(tp);
2713 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2714 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2715 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2716 TG3_GPIO_MSG_NEED_VAUX)
2717 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2718 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2719 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2720 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2721 (TG3_GPIO_MSG_DRVR_PRES << 12))
2723 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2724 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2725 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2726 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2727 (TG3_GPIO_MSG_NEED_VAUX << 12))
2729 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2733 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2734 tg3_asic_rev(tp) == ASIC_REV_5719)
2735 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2737 status = tr32(TG3_CPMU_DRV_STATUS);
2739 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2740 status &= ~(TG3_GPIO_MSG_MASK << shift);
2741 status |= (newstat << shift);
2743 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2744 tg3_asic_rev(tp) == ASIC_REV_5719)
2745 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2747 tw32(TG3_CPMU_DRV_STATUS, status);
2749 return status >> TG3_APE_GPIO_MSG_SHIFT;
2752 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2754 if (!tg3_flag(tp, IS_NIC))
2757 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2758 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2759 tg3_asic_rev(tp) == ASIC_REV_5720) {
2760 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2763 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2765 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2766 TG3_GRC_LCLCTL_PWRSW_DELAY);
2768 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2770 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2771 TG3_GRC_LCLCTL_PWRSW_DELAY);
2777 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2781 if (!tg3_flag(tp, IS_NIC) ||
2782 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2783 tg3_asic_rev(tp) == ASIC_REV_5701)
2786 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2788 tw32_wait_f(GRC_LOCAL_CTRL,
2789 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2790 TG3_GRC_LCLCTL_PWRSW_DELAY);
2792 tw32_wait_f(GRC_LOCAL_CTRL,
2794 TG3_GRC_LCLCTL_PWRSW_DELAY);
2796 tw32_wait_f(GRC_LOCAL_CTRL,
2797 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2798 TG3_GRC_LCLCTL_PWRSW_DELAY);
2801 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2803 if (!tg3_flag(tp, IS_NIC))
2806 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2807 tg3_asic_rev(tp) == ASIC_REV_5701) {
2808 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2809 (GRC_LCLCTRL_GPIO_OE0 |
2810 GRC_LCLCTRL_GPIO_OE1 |
2811 GRC_LCLCTRL_GPIO_OE2 |
2812 GRC_LCLCTRL_GPIO_OUTPUT0 |
2813 GRC_LCLCTRL_GPIO_OUTPUT1),
2814 TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2816 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2817 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2818 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2819 GRC_LCLCTRL_GPIO_OE1 |
2820 GRC_LCLCTRL_GPIO_OE2 |
2821 GRC_LCLCTRL_GPIO_OUTPUT0 |
2822 GRC_LCLCTRL_GPIO_OUTPUT1 |
2824 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2825 TG3_GRC_LCLCTL_PWRSW_DELAY);
2827 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2828 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2829 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2832 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 u32 grc_local_ctrl = 0;
2838 /* Workaround to prevent overdrawing Amps. */
2839 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2840 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2841 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2843 TG3_GRC_LCLCTL_PWRSW_DELAY);
2846 /* On 5753 and variants, GPIO2 cannot be used. */
2847 no_gpio2 = tp->nic_sram_data_cfg &
2848 NIC_SRAM_DATA_CFG_NO_GPIO2;
2850 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2851 GRC_LCLCTRL_GPIO_OE1 |
2852 GRC_LCLCTRL_GPIO_OE2 |
2853 GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 GRC_LCLCTRL_GPIO_OUTPUT2;
2856 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2857 GRC_LCLCTRL_GPIO_OUTPUT2);
2859 tw32_wait_f(GRC_LOCAL_CTRL,
2860 tp->grc_local_ctrl | grc_local_ctrl,
2861 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2865 tw32_wait_f(GRC_LOCAL_CTRL,
2866 tp->grc_local_ctrl | grc_local_ctrl,
2867 TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2871 tw32_wait_f(GRC_LOCAL_CTRL,
2872 tp->grc_local_ctrl | grc_local_ctrl,
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2878 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2882 /* Serialize power state transitions */
2883 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2886 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2887 msg = TG3_GPIO_MSG_NEED_VAUX;
2889 msg = tg3_set_function_status(tp, msg);
2891 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2894 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2895 tg3_pwrsrc_switch_to_vaux(tp);
2897 tg3_pwrsrc_die_with_vmain(tp);
2900 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2903 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2905 bool need_vaux = false;
2907 /* The GPIOs do something completely different on 57765. */
2908 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2911 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2912 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2913 tg3_asic_rev(tp) == ASIC_REV_5720) {
2914 tg3_frob_aux_power_5717(tp, include_wol ?
2915 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2919 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2920 struct net_device *dev_peer;
2922 dev_peer = pci_get_drvdata(tp->pdev_peer);
2924 /* remove_one() may have been run on the peer. */
2926 struct tg3 *tp_peer = netdev_priv(dev_peer);
2928 if (tg3_flag(tp_peer, INIT_COMPLETE))
2931 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2932 tg3_flag(tp_peer, ENABLE_ASF))
2937 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2938 tg3_flag(tp, ENABLE_ASF))
2942 tg3_pwrsrc_switch_to_vaux(tp);
2944 tg3_pwrsrc_die_with_vmain(tp);
2947 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2949 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2951 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2952 if (speed != SPEED_10)
2954 } else if (speed == SPEED_10)
2960 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2964 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
2967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2968 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2969 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2970 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2973 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2974 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2975 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2980 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2982 val = tr32(GRC_MISC_CFG);
2983 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2986 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2988 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2991 tg3_writephy(tp, MII_ADVERTISE, 0);
2992 tg3_writephy(tp, MII_BMCR,
2993 BMCR_ANENABLE | BMCR_ANRESTART);
2995 tg3_writephy(tp, MII_TG3_FET_TEST,
2996 phytest | MII_TG3_FET_SHADOW_EN);
2997 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2998 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3000 MII_TG3_FET_SHDW_AUXMODE4,
3003 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3006 } else if (do_low_power) {
3007 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3008 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3010 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3011 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3012 MII_TG3_AUXCTL_PCTL_VREG_11V;
3013 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3016 /* The PHY should not be powered down on some chips because
3019 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
3020 tg3_asic_rev(tp) == ASIC_REV_5704 ||
3021 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
3022 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
3023 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
3027 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3028 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3029 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3030 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3031 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3032 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3035 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3038 /* tp->lock is held. */
3039 static int tg3_nvram_lock(struct tg3 *tp)
3041 if (tg3_flag(tp, NVRAM)) {
3044 if (tp->nvram_lock_cnt == 0) {
3045 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3046 for (i = 0; i < 8000; i++) {
3047 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3052 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3056 tp->nvram_lock_cnt++;
3061 /* tp->lock is held. */
3062 static void tg3_nvram_unlock(struct tg3 *tp)
3064 if (tg3_flag(tp, NVRAM)) {
3065 if (tp->nvram_lock_cnt > 0)
3066 tp->nvram_lock_cnt--;
3067 if (tp->nvram_lock_cnt == 0)
3068 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3072 /* tp->lock is held. */
3073 static void tg3_enable_nvram_access(struct tg3 *tp)
3075 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3076 u32 nvaccess = tr32(NVRAM_ACCESS);
3078 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3082 /* tp->lock is held. */
3083 static void tg3_disable_nvram_access(struct tg3 *tp)
3085 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3086 u32 nvaccess = tr32(NVRAM_ACCESS);
3088 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3092 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3093 u32 offset, u32 *val)
3098 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3101 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3102 EEPROM_ADDR_DEVID_MASK |
3104 tw32(GRC_EEPROM_ADDR,
3106 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3107 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3108 EEPROM_ADDR_ADDR_MASK) |
3109 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3111 for (i = 0; i < 1000; i++) {
3112 tmp = tr32(GRC_EEPROM_ADDR);
3114 if (tmp & EEPROM_ADDR_COMPLETE)
3118 if (!(tmp & EEPROM_ADDR_COMPLETE))
3121 tmp = tr32(GRC_EEPROM_DATA);
3124 * The data will always be opposite the native endian
3125 * format. Perform a blind byteswap to compensate.
3132 #define NVRAM_CMD_TIMEOUT 10000
3134 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3138 tw32(NVRAM_CMD, nvram_cmd);
3139 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3141 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3147 if (i == NVRAM_CMD_TIMEOUT)
3153 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3155 if (tg3_flag(tp, NVRAM) &&
3156 tg3_flag(tp, NVRAM_BUFFERED) &&
3157 tg3_flag(tp, FLASH) &&
3158 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3159 (tp->nvram_jedecnum == JEDEC_ATMEL))
3161 addr = ((addr / tp->nvram_pagesize) <<
3162 ATMEL_AT45DB0X1B_PAGE_POS) +
3163 (addr % tp->nvram_pagesize);
3168 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3170 if (tg3_flag(tp, NVRAM) &&
3171 tg3_flag(tp, NVRAM_BUFFERED) &&
3172 tg3_flag(tp, FLASH) &&
3173 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3174 (tp->nvram_jedecnum == JEDEC_ATMEL))
3176 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3177 tp->nvram_pagesize) +
3178 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3183 /* NOTE: Data read in from NVRAM is byteswapped according to
3184 * the byteswapping settings for all other register accesses.
3185 * tg3 devices are BE devices, so on a BE machine, the data
3186 * returned will be exactly as it is seen in NVRAM. On a LE
3187 * machine, the 32-bit value will be byteswapped.
3189 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3193 if (!tg3_flag(tp, NVRAM))
3194 return tg3_nvram_read_using_eeprom(tp, offset, val);
3196 offset = tg3_nvram_phys_addr(tp, offset);
3198 if (offset > NVRAM_ADDR_MSK)
3201 ret = tg3_nvram_lock(tp);
3205 tg3_enable_nvram_access(tp);
3207 tw32(NVRAM_ADDR, offset);
3208 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3209 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3212 *val = tr32(NVRAM_RDDATA);
3214 tg3_disable_nvram_access(tp);
3216 tg3_nvram_unlock(tp);
3221 /* Ensures NVRAM data is in bytestream format. */
3222 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3225 int res = tg3_nvram_read(tp, offset, &v);
3227 *val = cpu_to_be32(v);
3231 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3232 u32 offset, u32 len, u8 *buf)
3237 for (i = 0; i < len; i += 4) {
3243 memcpy(&data, buf + i, 4);
3246 * The SEEPROM interface expects the data to always be opposite
3247 * the native endian format. We accomplish this by reversing
3248 * all the operations that would have been performed on the
3249 * data from a call to tg3_nvram_read_be32().
3251 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3253 val = tr32(GRC_EEPROM_ADDR);
3254 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3256 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3258 tw32(GRC_EEPROM_ADDR, val |
3259 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3260 (addr & EEPROM_ADDR_ADDR_MASK) |
3264 for (j = 0; j < 1000; j++) {
3265 val = tr32(GRC_EEPROM_ADDR);
3267 if (val & EEPROM_ADDR_COMPLETE)
3271 if (!(val & EEPROM_ADDR_COMPLETE)) {
3280 /* offset and length are dword aligned */
3281 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3285 u32 pagesize = tp->nvram_pagesize;
3286 u32 pagemask = pagesize - 1;
3290 tmp = kmalloc(pagesize, GFP_KERNEL);
3296 u32 phy_addr, page_off, size;
3298 phy_addr = offset & ~pagemask;
3300 for (j = 0; j < pagesize; j += 4) {
3301 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3302 (__be32 *) (tmp + j));
3309 page_off = offset & pagemask;
3316 memcpy(tmp + page_off, buf, size);
3318 offset = offset + (pagesize - page_off);
3320 tg3_enable_nvram_access(tp);
3323 * Before we can erase the flash page, we need
3324 * to issue a special "write enable" command.
3326 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3328 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3331 /* Erase the target page */
3332 tw32(NVRAM_ADDR, phy_addr);
3334 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3335 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3337 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3340 /* Issue another write enable to start the write. */
3341 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3343 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3346 for (j = 0; j < pagesize; j += 4) {
3349 data = *((__be32 *) (tmp + j));
3351 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3353 tw32(NVRAM_ADDR, phy_addr + j);
3355 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3359 nvram_cmd |= NVRAM_CMD_FIRST;
3360 else if (j == (pagesize - 4))
3361 nvram_cmd |= NVRAM_CMD_LAST;
3363 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3371 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3372 tg3_nvram_exec_cmd(tp, nvram_cmd);
3379 /* offset and length are dword aligned */
3380 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3385 for (i = 0; i < len; i += 4, offset += 4) {
3386 u32 page_off, phy_addr, nvram_cmd;
3389 memcpy(&data, buf + i, 4);
3390 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3392 page_off = offset % tp->nvram_pagesize;
3394 phy_addr = tg3_nvram_phys_addr(tp, offset);
3396 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3398 if (page_off == 0 || i == 0)
3399 nvram_cmd |= NVRAM_CMD_FIRST;
3400 if (page_off == (tp->nvram_pagesize - 4))
3401 nvram_cmd |= NVRAM_CMD_LAST;
3404 nvram_cmd |= NVRAM_CMD_LAST;
3406 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3407 !tg3_flag(tp, FLASH) ||
3408 !tg3_flag(tp, 57765_PLUS))
3409 tw32(NVRAM_ADDR, phy_addr);
3411 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3412 !tg3_flag(tp, 5755_PLUS) &&
3413 (tp->nvram_jedecnum == JEDEC_ST) &&
3414 (nvram_cmd & NVRAM_CMD_FIRST)) {
3417 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 ret = tg3_nvram_exec_cmd(tp, cmd);
3422 if (!tg3_flag(tp, FLASH)) {
3423 /* We always do complete word writes to eeprom. */
3424 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3427 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3439 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3440 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3441 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3445 if (!tg3_flag(tp, NVRAM)) {
3446 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3450 ret = tg3_nvram_lock(tp);
3454 tg3_enable_nvram_access(tp);
3455 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3456 tw32(NVRAM_WRITE1, 0x406);
3458 grc_mode = tr32(GRC_MODE);
3459 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3461 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3462 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3465 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3469 grc_mode = tr32(GRC_MODE);
3470 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3472 tg3_disable_nvram_access(tp);
3473 tg3_nvram_unlock(tp);
3476 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3477 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3484 #define RX_CPU_SCRATCH_BASE 0x30000
3485 #define RX_CPU_SCRATCH_SIZE 0x04000
3486 #define TX_CPU_SCRATCH_BASE 0x34000
3487 #define TX_CPU_SCRATCH_SIZE 0x04000
3489 /* tp->lock is held. */
3490 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3493 const int iters = 10000;
3495 for (i = 0; i < iters; i++) {
3496 tw32(cpu_base + CPU_STATE, 0xffffffff);
3497 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3498 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3502 return (i == iters) ? -EBUSY : 0;
3505 /* tp->lock is held. */
3506 static int tg3_rxcpu_pause(struct tg3 *tp)
3508 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3510 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3511 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3517 /* tp->lock is held. */
3518 static int tg3_txcpu_pause(struct tg3 *tp)
3520 return tg3_pause_cpu(tp, TX_CPU_BASE);
3523 /* tp->lock is held. */
3524 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3526 tw32(cpu_base + CPU_STATE, 0xffffffff);
3527 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3530 /* tp->lock is held. */
3531 static void tg3_rxcpu_resume(struct tg3 *tp)
3533 tg3_resume_cpu(tp, RX_CPU_BASE);
3536 /* tp->lock is held. */
3537 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3541 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3544 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3546 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3549 if (cpu_base == RX_CPU_BASE) {
3550 rc = tg3_rxcpu_pause(tp);
3553 * There is only an Rx CPU for the 5750 derivative in the
3556 if (tg3_flag(tp, IS_SSB_CORE))
3559 rc = tg3_txcpu_pause(tp);
3563 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3564 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3568 /* Clear firmware's nvram arbitration. */
3569 if (tg3_flag(tp, NVRAM))
3570 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3574 static int tg3_fw_data_len(struct tg3 *tp,
3575 const struct tg3_firmware_hdr *fw_hdr)
3579 /* Non fragmented firmware have one firmware header followed by a
3580 * contiguous chunk of data to be written. The length field in that
3581 * header is not the length of data to be written but the complete
3582 * length of the bss. The data length is determined based on
3583 * tp->fw->size minus headers.
3585 * Fragmented firmware have a main header followed by multiple
3586 * fragments. Each fragment is identical to non fragmented firmware
3587 * with a firmware header followed by a contiguous chunk of data. In
3588 * the main header, the length field is unused and set to 0xffffffff.
3589 * In each fragment header the length is the entire size of that
3590 * fragment i.e. fragment data + header length. Data length is
3591 * therefore length field in the header minus TG3_FW_HDR_LEN.
3593 if (tp->fw_len == 0xffffffff)
3594 fw_len = be32_to_cpu(fw_hdr->len);
3596 fw_len = tp->fw->size;
3598 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3601 /* tp->lock is held. */
3602 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3603 u32 cpu_scratch_base, int cpu_scratch_size,
3604 const struct tg3_firmware_hdr *fw_hdr)
3607 void (*write_op)(struct tg3 *, u32, u32);
3608 int total_len = tp->fw->size;
3610 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3612 "%s: Trying to load TX cpu firmware which is 5705\n",
3617 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3618 write_op = tg3_write_mem;
3620 write_op = tg3_write_indirect_reg32;
3622 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3623 /* It is possible that bootcode is still loading at this point.
3624 * Get the nvram lock first before halting the cpu.
3626 int lock_err = tg3_nvram_lock(tp);
3627 err = tg3_halt_cpu(tp, cpu_base);
3629 tg3_nvram_unlock(tp);
3633 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3634 write_op(tp, cpu_scratch_base + i, 0);
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32(cpu_base + CPU_MODE,
3637 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3639 /* Subtract additional main header for fragmented firmware and
3640 * advance to the first fragment
3642 total_len -= TG3_FW_HDR_LEN;
3647 u32 *fw_data = (u32 *)(fw_hdr + 1);
3648 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3649 write_op(tp, cpu_scratch_base +
3650 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3652 be32_to_cpu(fw_data[i]));
3654 total_len -= be32_to_cpu(fw_hdr->len);
3656 /* Advance to next fragment */
3657 fw_hdr = (struct tg3_firmware_hdr *)
3658 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3659 } while (total_len > 0);
3667 /* tp->lock is held. */
3668 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3671 const int iters = 5;
3673 tw32(cpu_base + CPU_STATE, 0xffffffff);
3674 tw32_f(cpu_base + CPU_PC, pc);
3676 for (i = 0; i < iters; i++) {
3677 if (tr32(cpu_base + CPU_PC) == pc)
3679 tw32(cpu_base + CPU_STATE, 0xffffffff);
3680 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3681 tw32_f(cpu_base + CPU_PC, pc);
3685 return (i == iters) ? -EBUSY : 0;
3688 /* tp->lock is held. */
3689 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3691 const struct tg3_firmware_hdr *fw_hdr;
3694 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3696 /* Firmware blob starts with version numbers, followed by
3697 start address and length. We are setting complete length.
3698 length = end_address_of_bss - start_address_of_text.
3699 Remainder is the blob to be loaded contiguously
3700 from start address. */
3702 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3703 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3708 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3709 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3714 /* Now startup only the RX cpu. */
3715 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3716 be32_to_cpu(fw_hdr->base_addr));
3718 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3719 "should be %08x\n", __func__,
3720 tr32(RX_CPU_BASE + CPU_PC),
3721 be32_to_cpu(fw_hdr->base_addr));
3725 tg3_rxcpu_resume(tp);
3730 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3732 const int iters = 1000;
3736 /* Wait for boot code to complete initialization and enter service
3737 * loop. It is then safe to download service patches
3739 for (i = 0; i < iters; i++) {
3740 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3747 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3751 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3753 netdev_warn(tp->dev,
3754 "Other patches exist. Not downloading EEE patch\n");
3761 /* tp->lock is held. */
3762 static void tg3_load_57766_firmware(struct tg3 *tp)
3764 struct tg3_firmware_hdr *fw_hdr;
3766 if (!tg3_flag(tp, NO_NVRAM))
3769 if (tg3_validate_rxcpu_state(tp))
3775 /* This firmware blob has a different format than older firmware
3776 * releases as given below. The main difference is we have fragmented
3777 * data to be written to non-contiguous locations.
3779 * In the beginning we have a firmware header identical to other
3780 * firmware which consists of version, base addr and length. The length
3781 * here is unused and set to 0xffffffff.
3783 * This is followed by a series of firmware fragments which are
3784 * individually identical to previous firmware. i.e. they have the
3785 * firmware header and followed by data for that fragment. The version
3786 * field of the individual fragment header is unused.
3789 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3790 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3793 if (tg3_rxcpu_pause(tp))
3796 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3797 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3799 tg3_rxcpu_resume(tp);
3802 /* tp->lock is held. */
3803 static int tg3_load_tso_firmware(struct tg3 *tp)
3805 const struct tg3_firmware_hdr *fw_hdr;
3806 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3809 if (!tg3_flag(tp, FW_TSO))
3812 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3814 /* Firmware blob starts with version numbers, followed by
3815 start address and length. We are setting complete length.
3816 length = end_address_of_bss - start_address_of_text.
3817 Remainder is the blob to be loaded contiguously
3818 from start address. */
3820 cpu_scratch_size = tp->fw_len;
3822 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3823 cpu_base = RX_CPU_BASE;
3824 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3826 cpu_base = TX_CPU_BASE;
3827 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3828 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3831 err = tg3_load_firmware_cpu(tp, cpu_base,
3832 cpu_scratch_base, cpu_scratch_size,
3837 /* Now startup the cpu. */
3838 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3839 be32_to_cpu(fw_hdr->base_addr));
3842 "%s fails to set CPU PC, is %08x should be %08x\n",
3843 __func__, tr32(cpu_base + CPU_PC),
3844 be32_to_cpu(fw_hdr->base_addr));
3848 tg3_resume_cpu(tp, cpu_base);
3853 /* tp->lock is held. */
3854 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3856 u32 addr_high, addr_low;
3859 addr_high = ((tp->dev->dev_addr[0] << 8) |
3860 tp->dev->dev_addr[1]);
3861 addr_low = ((tp->dev->dev_addr[2] << 24) |
3862 (tp->dev->dev_addr[3] << 16) |
3863 (tp->dev->dev_addr[4] << 8) |
3864 (tp->dev->dev_addr[5] << 0));
3865 for (i = 0; i < 4; i++) {
3866 if (i == 1 && skip_mac_1)
3868 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3869 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3872 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3873 tg3_asic_rev(tp) == ASIC_REV_5704) {
3874 for (i = 0; i < 12; i++) {
3875 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3876 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3880 addr_high = (tp->dev->dev_addr[0] +
3881 tp->dev->dev_addr[1] +
3882 tp->dev->dev_addr[2] +
3883 tp->dev->dev_addr[3] +
3884 tp->dev->dev_addr[4] +
3885 tp->dev->dev_addr[5]) &
3886 TX_BACKOFF_SEED_MASK;
3887 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3890 static void tg3_enable_register_access(struct tg3 *tp)
3893 * Make sure register accesses (indirect or otherwise) will function
3896 pci_write_config_dword(tp->pdev,
3897 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3900 static int tg3_power_up(struct tg3 *tp)
3904 tg3_enable_register_access(tp);
3906 err = pci_set_power_state(tp->pdev, PCI_D0);
3908 /* Switch out of Vaux if it is a NIC */
3909 tg3_pwrsrc_switch_to_vmain(tp);
3911 netdev_err(tp->dev, "Transition to D0 failed\n");
3917 static int tg3_setup_phy(struct tg3 *, bool);
3919 static int tg3_power_down_prepare(struct tg3 *tp)
3922 bool device_should_wake, do_low_power;
3924 tg3_enable_register_access(tp);
3926 /* Restore the CLKREQ setting. */
3927 if (tg3_flag(tp, CLKREQ_BUG))
3928 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3929 PCI_EXP_LNKCTL_CLKREQ_EN);
3931 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3932 tw32(TG3PCI_MISC_HOST_CTRL,
3933 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3935 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3936 tg3_flag(tp, WOL_ENABLE);
3938 if (tg3_flag(tp, USE_PHYLIB)) {
3939 do_low_power = false;
3940 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3941 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3942 struct phy_device *phydev;
3943 u32 phyid, advertising;
3945 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3947 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3949 tp->link_config.speed = phydev->speed;
3950 tp->link_config.duplex = phydev->duplex;
3951 tp->link_config.autoneg = phydev->autoneg;
3952 tp->link_config.advertising = phydev->advertising;
3954 advertising = ADVERTISED_TP |
3956 ADVERTISED_Autoneg |
3957 ADVERTISED_10baseT_Half;
3959 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3960 if (tg3_flag(tp, WOL_SPEED_100MB))
3962 ADVERTISED_100baseT_Half |
3963 ADVERTISED_100baseT_Full |
3964 ADVERTISED_10baseT_Full;
3966 advertising |= ADVERTISED_10baseT_Full;
3969 phydev->advertising = advertising;
3971 phy_start_aneg(phydev);
3973 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3974 if (phyid != PHY_ID_BCMAC131) {
3975 phyid &= PHY_BCM_OUI_MASK;
3976 if (phyid == PHY_BCM_OUI_1 ||
3977 phyid == PHY_BCM_OUI_2 ||
3978 phyid == PHY_BCM_OUI_3)
3979 do_low_power = true;
3983 do_low_power = true;
3985 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3986 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3988 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3989 tg3_setup_phy(tp, false);
3992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3995 val = tr32(GRC_VCPU_EXT_CTRL);
3996 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3997 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4001 for (i = 0; i < 200; i++) {
4002 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4003 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4008 if (tg3_flag(tp, WOL_CAP))
4009 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4010 WOL_DRV_STATE_SHUTDOWN |
4014 if (device_should_wake) {
4017 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4019 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4020 tg3_phy_auxctl_write(tp,
4021 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4022 MII_TG3_AUXCTL_PCTL_WOL_EN |
4023 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4024 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4028 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4029 mac_mode = MAC_MODE_PORT_MODE_GMII;
4030 else if (tp->phy_flags &
4031 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4032 if (tp->link_config.active_speed == SPEED_1000)
4033 mac_mode = MAC_MODE_PORT_MODE_GMII;
4035 mac_mode = MAC_MODE_PORT_MODE_MII;
4037 mac_mode = MAC_MODE_PORT_MODE_MII;
4039 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4040 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4041 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4042 SPEED_100 : SPEED_10;
4043 if (tg3_5700_link_polarity(tp, speed))
4044 mac_mode |= MAC_MODE_LINK_POLARITY;
4046 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4049 mac_mode = MAC_MODE_PORT_MODE_TBI;
4052 if (!tg3_flag(tp, 5750_PLUS))
4053 tw32(MAC_LED_CTRL, tp->led_ctrl);
4055 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4056 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4057 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4058 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4060 if (tg3_flag(tp, ENABLE_APE))
4061 mac_mode |= MAC_MODE_APE_TX_EN |
4062 MAC_MODE_APE_RX_EN |
4063 MAC_MODE_TDE_ENABLE;
4065 tw32_f(MAC_MODE, mac_mode);
4068 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4072 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4073 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4074 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4077 base_val = tp->pci_clock_ctrl;
4078 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4079 CLOCK_CTRL_TXCLK_DISABLE);
4081 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4082 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4083 } else if (tg3_flag(tp, 5780_CLASS) ||
4084 tg3_flag(tp, CPMU_PRESENT) ||
4085 tg3_asic_rev(tp) == ASIC_REV_5906) {
4087 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4088 u32 newbits1, newbits2;
4090 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4091 tg3_asic_rev(tp) == ASIC_REV_5701) {
4092 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4093 CLOCK_CTRL_TXCLK_DISABLE |
4095 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4096 } else if (tg3_flag(tp, 5705_PLUS)) {
4097 newbits1 = CLOCK_CTRL_625_CORE;
4098 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4100 newbits1 = CLOCK_CTRL_ALTCLK;
4101 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4104 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4107 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4110 if (!tg3_flag(tp, 5705_PLUS)) {
4113 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4114 tg3_asic_rev(tp) == ASIC_REV_5701) {
4115 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4116 CLOCK_CTRL_TXCLK_DISABLE |
4117 CLOCK_CTRL_44MHZ_CORE);
4119 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4122 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4123 tp->pci_clock_ctrl | newbits3, 40);
4127 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4128 tg3_power_down_phy(tp, do_low_power);
4130 tg3_frob_aux_power(tp, true);
4132 /* Workaround for unstable PLL clock */
4133 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4134 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4135 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4136 u32 val = tr32(0x7d00);
4138 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4140 if (!tg3_flag(tp, ENABLE_ASF)) {
4143 err = tg3_nvram_lock(tp);
4144 tg3_halt_cpu(tp, RX_CPU_BASE);
4146 tg3_nvram_unlock(tp);
4150 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4155 static void tg3_power_down(struct tg3 *tp)
4157 tg3_power_down_prepare(tp);
4159 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4160 pci_set_power_state(tp->pdev, PCI_D3hot);
4163 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4165 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4166 case MII_TG3_AUX_STAT_10HALF:
4168 *duplex = DUPLEX_HALF;
4171 case MII_TG3_AUX_STAT_10FULL:
4173 *duplex = DUPLEX_FULL;
4176 case MII_TG3_AUX_STAT_100HALF:
4178 *duplex = DUPLEX_HALF;
4181 case MII_TG3_AUX_STAT_100FULL:
4183 *duplex = DUPLEX_FULL;
4186 case MII_TG3_AUX_STAT_1000HALF:
4187 *speed = SPEED_1000;
4188 *duplex = DUPLEX_HALF;
4191 case MII_TG3_AUX_STAT_1000FULL:
4192 *speed = SPEED_1000;
4193 *duplex = DUPLEX_FULL;
4197 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4198 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4200 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4204 *speed = SPEED_UNKNOWN;
4205 *duplex = DUPLEX_UNKNOWN;
4210 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4215 new_adv = ADVERTISE_CSMA;
4216 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4217 new_adv |= mii_advertise_flowctrl(flowctrl);
4219 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4223 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4224 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4227 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4228 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4230 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4235 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4238 tw32(TG3_CPMU_EEE_MODE,
4239 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4241 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4246 /* Advertise 100-BaseTX EEE ability */
4247 if (advertise & ADVERTISED_100baseT_Full)
4248 val |= MDIO_AN_EEE_ADV_100TX;
4249 /* Advertise 1000-BaseT EEE ability */
4250 if (advertise & ADVERTISED_1000baseT_Full)
4251 val |= MDIO_AN_EEE_ADV_1000T;
4252 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4256 switch (tg3_asic_rev(tp)) {
4258 case ASIC_REV_57765:
4259 case ASIC_REV_57766:
4261 /* If we advertised any eee advertisements above... */
4263 val = MII_TG3_DSP_TAP26_ALNOKO |
4264 MII_TG3_DSP_TAP26_RMRXSTO |
4265 MII_TG3_DSP_TAP26_OPCSINPT;
4266 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4270 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4271 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4272 MII_TG3_DSP_CH34TP2_HIBW01);
4275 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4284 static void tg3_phy_copper_begin(struct tg3 *tp)
4286 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4287 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4290 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4291 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4292 adv = ADVERTISED_10baseT_Half |
4293 ADVERTISED_10baseT_Full;
4294 if (tg3_flag(tp, WOL_SPEED_100MB))
4295 adv |= ADVERTISED_100baseT_Half |
4296 ADVERTISED_100baseT_Full;
4297 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4298 adv |= ADVERTISED_1000baseT_Half |
4299 ADVERTISED_1000baseT_Full;
4301 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4303 adv = tp->link_config.advertising;
4304 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4305 adv &= ~(ADVERTISED_1000baseT_Half |
4306 ADVERTISED_1000baseT_Full);
4308 fc = tp->link_config.flowctrl;
4311 tg3_phy_autoneg_cfg(tp, adv, fc);
4313 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4314 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4315 /* Normally during power down we want to autonegotiate
4316 * the lowest possible speed for WOL. However, to avoid
4317 * link flap, we leave it untouched.
4322 tg3_writephy(tp, MII_BMCR,
4323 BMCR_ANENABLE | BMCR_ANRESTART);
4326 u32 bmcr, orig_bmcr;
4328 tp->link_config.active_speed = tp->link_config.speed;
4329 tp->link_config.active_duplex = tp->link_config.duplex;
4331 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4332 /* With autoneg disabled, 5715 only links up when the
4333 * advertisement register has the configured speed
4336 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4340 switch (tp->link_config.speed) {
4346 bmcr |= BMCR_SPEED100;
4350 bmcr |= BMCR_SPEED1000;
4354 if (tp->link_config.duplex == DUPLEX_FULL)
4355 bmcr |= BMCR_FULLDPLX;
4357 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4358 (bmcr != orig_bmcr)) {
4359 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4360 for (i = 0; i < 1500; i++) {
4364 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4365 tg3_readphy(tp, MII_BMSR, &tmp))
4367 if (!(tmp & BMSR_LSTATUS)) {
4372 tg3_writephy(tp, MII_BMCR, bmcr);
4378 static int tg3_phy_pull_config(struct tg3 *tp)
4383 err = tg3_readphy(tp, MII_BMCR, &val);
4387 if (!(val & BMCR_ANENABLE)) {
4388 tp->link_config.autoneg = AUTONEG_DISABLE;
4389 tp->link_config.advertising = 0;
4390 tg3_flag_clear(tp, PAUSE_AUTONEG);
4394 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4396 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4399 tp->link_config.speed = SPEED_10;
4402 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4405 tp->link_config.speed = SPEED_100;
4407 case BMCR_SPEED1000:
4408 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4409 tp->link_config.speed = SPEED_1000;
4417 if (val & BMCR_FULLDPLX)
4418 tp->link_config.duplex = DUPLEX_FULL;
4420 tp->link_config.duplex = DUPLEX_HALF;
4422 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4428 tp->link_config.autoneg = AUTONEG_ENABLE;
4429 tp->link_config.advertising = ADVERTISED_Autoneg;
4430 tg3_flag_set(tp, PAUSE_AUTONEG);
4432 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4435 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4439 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4440 tp->link_config.advertising |= adv | ADVERTISED_TP;
4442 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4444 tp->link_config.advertising |= ADVERTISED_FIBRE;
4447 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4450 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4451 err = tg3_readphy(tp, MII_CTRL1000, &val);
4455 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4457 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4461 adv = tg3_decode_flowctrl_1000X(val);
4462 tp->link_config.flowctrl = adv;
4464 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4465 adv = mii_adv_to_ethtool_adv_x(val);
4468 tp->link_config.advertising |= adv;
4475 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4479 /* Turn off tap power management. */
4480 /* Set Extended packet length bit */
4481 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4483 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4484 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4485 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4486 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4487 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4494 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4498 u32 advertising = tp->link_config.advertising;
4500 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4503 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4506 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4509 if (advertising & ADVERTISED_100baseT_Full)
4510 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4511 if (advertising & ADVERTISED_1000baseT_Full)
4512 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4520 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4522 u32 advmsk, tgtadv, advertising;
4524 advertising = tp->link_config.advertising;
4525 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4527 advmsk = ADVERTISE_ALL;
4528 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4529 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4530 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4533 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4536 if ((*lcladv & advmsk) != tgtadv)
4539 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4542 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4544 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4548 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4549 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4550 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4551 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4552 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4554 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4557 if (tg3_ctrl != tgtadv)
4564 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4568 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4571 if (tg3_readphy(tp, MII_STAT1000, &val))
4574 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4577 if (tg3_readphy(tp, MII_LPA, rmtadv))
4580 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4581 tp->link_config.rmt_adv = lpeth;
4586 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4588 if (curr_link_up != tp->link_up) {
4590 netif_carrier_on(tp->dev);
4592 netif_carrier_off(tp->dev);
4593 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4594 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4597 tg3_link_report(tp);
4604 static void tg3_clear_mac_status(struct tg3 *tp)
4609 MAC_STATUS_SYNC_CHANGED |
4610 MAC_STATUS_CFG_CHANGED |
4611 MAC_STATUS_MI_COMPLETION |
4612 MAC_STATUS_LNKSTATE_CHANGED);
4616 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4618 bool current_link_up;
4620 u32 lcl_adv, rmt_adv;
4625 tg3_clear_mac_status(tp);
4627 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4629 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4633 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4635 /* Some third-party PHYs need to be reset on link going
4638 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4639 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4640 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4642 tg3_readphy(tp, MII_BMSR, &bmsr);
4643 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4644 !(bmsr & BMSR_LSTATUS))
4650 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4651 tg3_readphy(tp, MII_BMSR, &bmsr);
4652 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4653 !tg3_flag(tp, INIT_COMPLETE))
4656 if (!(bmsr & BMSR_LSTATUS)) {
4657 err = tg3_init_5401phy_dsp(tp);
4661 tg3_readphy(tp, MII_BMSR, &bmsr);
4662 for (i = 0; i < 1000; i++) {
4664 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4665 (bmsr & BMSR_LSTATUS)) {
4671 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4672 TG3_PHY_REV_BCM5401_B0 &&
4673 !(bmsr & BMSR_LSTATUS) &&
4674 tp->link_config.active_speed == SPEED_1000) {
4675 err = tg3_phy_reset(tp);
4677 err = tg3_init_5401phy_dsp(tp);
4682 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4683 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4684 /* 5701 {A0,B0} CRC bug workaround */
4685 tg3_writephy(tp, 0x15, 0x0a75);
4686 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4687 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4688 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4691 /* Clear pending interrupts... */
4692 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4693 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4695 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4696 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4697 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4698 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4700 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4701 tg3_asic_rev(tp) == ASIC_REV_5701) {
4702 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4703 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4704 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4706 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4709 current_link_up = false;
4710 current_speed = SPEED_UNKNOWN;
4711 current_duplex = DUPLEX_UNKNOWN;
4712 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4713 tp->link_config.rmt_adv = 0;
4715 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4716 err = tg3_phy_auxctl_read(tp,
4717 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4719 if (!err && !(val & (1 << 10))) {
4720 tg3_phy_auxctl_write(tp,
4721 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4728 for (i = 0; i < 100; i++) {
4729 tg3_readphy(tp, MII_BMSR, &bmsr);
4730 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4731 (bmsr & BMSR_LSTATUS))
4736 if (bmsr & BMSR_LSTATUS) {
4739 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4740 for (i = 0; i < 2000; i++) {
4742 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4747 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4752 for (i = 0; i < 200; i++) {
4753 tg3_readphy(tp, MII_BMCR, &bmcr);
4754 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4756 if (bmcr && bmcr != 0x7fff)
4764 tp->link_config.active_speed = current_speed;
4765 tp->link_config.active_duplex = current_duplex;
4767 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4768 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4770 if ((bmcr & BMCR_ANENABLE) &&
4772 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4773 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4774 current_link_up = true;
4776 /* EEE settings changes take effect only after a phy
4777 * reset. If we have skipped a reset due to Link Flap
4778 * Avoidance being enabled, do it now.
4780 if (!eee_config_ok &&
4781 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4785 if (!(bmcr & BMCR_ANENABLE) &&
4786 tp->link_config.speed == current_speed &&
4787 tp->link_config.duplex == current_duplex) {
4788 current_link_up = true;
4792 if (current_link_up &&
4793 tp->link_config.active_duplex == DUPLEX_FULL) {
4796 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4797 reg = MII_TG3_FET_GEN_STAT;
4798 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4800 reg = MII_TG3_EXT_STAT;
4801 bit = MII_TG3_EXT_STAT_MDIX;
4804 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4805 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4807 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4812 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4813 tg3_phy_copper_begin(tp);
4815 if (tg3_flag(tp, ROBOSWITCH)) {
4816 current_link_up = true;
4817 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4818 current_speed = SPEED_1000;
4819 current_duplex = DUPLEX_FULL;
4820 tp->link_config.active_speed = current_speed;
4821 tp->link_config.active_duplex = current_duplex;
4824 tg3_readphy(tp, MII_BMSR, &bmsr);
4825 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4826 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4827 current_link_up = true;
4830 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4831 if (current_link_up) {
4832 if (tp->link_config.active_speed == SPEED_100 ||
4833 tp->link_config.active_speed == SPEED_10)
4834 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4836 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4837 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4838 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4840 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4842 /* In order for the 5750 core in BCM4785 chip to work properly
4843 * in RGMII mode, the Led Control Register must be set up.
4845 if (tg3_flag(tp, RGMII_MODE)) {
4846 u32 led_ctrl = tr32(MAC_LED_CTRL);
4847 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4849 if (tp->link_config.active_speed == SPEED_10)
4850 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4851 else if (tp->link_config.active_speed == SPEED_100)
4852 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4853 LED_CTRL_100MBPS_ON);
4854 else if (tp->link_config.active_speed == SPEED_1000)
4855 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4856 LED_CTRL_1000MBPS_ON);
4858 tw32(MAC_LED_CTRL, led_ctrl);
4862 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4863 if (tp->link_config.active_duplex == DUPLEX_HALF)
4864 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4866 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4867 if (current_link_up &&
4868 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4869 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4871 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4874 /* ??? Without this setting Netgear GA302T PHY does not
4875 * ??? send/receive packets...
4877 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4878 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4879 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4880 tw32_f(MAC_MI_MODE, tp->mi_mode);
4884 tw32_f(MAC_MODE, tp->mac_mode);
4887 tg3_phy_eee_adjust(tp, current_link_up);
4889 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4890 /* Polled via timer. */
4891 tw32_f(MAC_EVENT, 0);
4893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4897 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4899 tp->link_config.active_speed == SPEED_1000 &&
4900 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4903 (MAC_STATUS_SYNC_CHANGED |
4904 MAC_STATUS_CFG_CHANGED));
4907 NIC_SRAM_FIRMWARE_MBOX,
4908 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4911 /* Prevent send BD corruption. */
4912 if (tg3_flag(tp, CLKREQ_BUG)) {
4913 if (tp->link_config.active_speed == SPEED_100 ||
4914 tp->link_config.active_speed == SPEED_10)
4915 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4916 PCI_EXP_LNKCTL_CLKREQ_EN);
4918 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4919 PCI_EXP_LNKCTL_CLKREQ_EN);
4922 tg3_test_and_report_link_chg(tp, current_link_up);
4927 struct tg3_fiber_aneginfo {
4929 #define ANEG_STATE_UNKNOWN 0
4930 #define ANEG_STATE_AN_ENABLE 1
4931 #define ANEG_STATE_RESTART_INIT 2
4932 #define ANEG_STATE_RESTART 3
4933 #define ANEG_STATE_DISABLE_LINK_OK 4
4934 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4935 #define ANEG_STATE_ABILITY_DETECT 6
4936 #define ANEG_STATE_ACK_DETECT_INIT 7
4937 #define ANEG_STATE_ACK_DETECT 8
4938 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4939 #define ANEG_STATE_COMPLETE_ACK 10
4940 #define ANEG_STATE_IDLE_DETECT_INIT 11
4941 #define ANEG_STATE_IDLE_DETECT 12
4942 #define ANEG_STATE_LINK_OK 13
4943 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4944 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4947 #define MR_AN_ENABLE 0x00000001
4948 #define MR_RESTART_AN 0x00000002
4949 #define MR_AN_COMPLETE 0x00000004
4950 #define MR_PAGE_RX 0x00000008
4951 #define MR_NP_LOADED 0x00000010
4952 #define MR_TOGGLE_TX 0x00000020
4953 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4954 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4955 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4956 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4957 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4958 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4959 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4960 #define MR_TOGGLE_RX 0x00002000
4961 #define MR_NP_RX 0x00004000
4963 #define MR_LINK_OK 0x80000000
4965 unsigned long link_time, cur_time;
4967 u32 ability_match_cfg;
4968 int ability_match_count;
4970 char ability_match, idle_match, ack_match;
4972 u32 txconfig, rxconfig;
4973 #define ANEG_CFG_NP 0x00000080
4974 #define ANEG_CFG_ACK 0x00000040
4975 #define ANEG_CFG_RF2 0x00000020
4976 #define ANEG_CFG_RF1 0x00000010
4977 #define ANEG_CFG_PS2 0x00000001
4978 #define ANEG_CFG_PS1 0x00008000
4979 #define ANEG_CFG_HD 0x00004000
4980 #define ANEG_CFG_FD 0x00002000
4981 #define ANEG_CFG_INVAL 0x00001f06
4986 #define ANEG_TIMER_ENAB 2
4987 #define ANEG_FAILED -1
4989 #define ANEG_STATE_SETTLE_TIME 10000
4991 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4992 struct tg3_fiber_aneginfo *ap)
4995 unsigned long delta;
4999 if (ap->state == ANEG_STATE_UNKNOWN) {
5003 ap->ability_match_cfg = 0;
5004 ap->ability_match_count = 0;
5005 ap->ability_match = 0;
5011 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5012 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5014 if (rx_cfg_reg != ap->ability_match_cfg) {
5015 ap->ability_match_cfg = rx_cfg_reg;
5016 ap->ability_match = 0;
5017 ap->ability_match_count = 0;
5019 if (++ap->ability_match_count > 1) {
5020 ap->ability_match = 1;
5021 ap->ability_match_cfg = rx_cfg_reg;
5024 if (rx_cfg_reg & ANEG_CFG_ACK)
5032 ap->ability_match_cfg = 0;
5033 ap->ability_match_count = 0;
5034 ap->ability_match = 0;
5040 ap->rxconfig = rx_cfg_reg;
5043 switch (ap->state) {
5044 case ANEG_STATE_UNKNOWN:
5045 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5046 ap->state = ANEG_STATE_AN_ENABLE;
5049 case ANEG_STATE_AN_ENABLE:
5050 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5051 if (ap->flags & MR_AN_ENABLE) {
5054 ap->ability_match_cfg = 0;
5055 ap->ability_match_count = 0;
5056 ap->ability_match = 0;
5060 ap->state = ANEG_STATE_RESTART_INIT;
5062 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5066 case ANEG_STATE_RESTART_INIT:
5067 ap->link_time = ap->cur_time;
5068 ap->flags &= ~(MR_NP_LOADED);
5070 tw32(MAC_TX_AUTO_NEG, 0);
5071 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5072 tw32_f(MAC_MODE, tp->mac_mode);
5075 ret = ANEG_TIMER_ENAB;
5076 ap->state = ANEG_STATE_RESTART;
5079 case ANEG_STATE_RESTART:
5080 delta = ap->cur_time - ap->link_time;
5081 if (delta > ANEG_STATE_SETTLE_TIME)
5082 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5084 ret = ANEG_TIMER_ENAB;
5087 case ANEG_STATE_DISABLE_LINK_OK:
5091 case ANEG_STATE_ABILITY_DETECT_INIT:
5092 ap->flags &= ~(MR_TOGGLE_TX);
5093 ap->txconfig = ANEG_CFG_FD;
5094 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5095 if (flowctrl & ADVERTISE_1000XPAUSE)
5096 ap->txconfig |= ANEG_CFG_PS1;
5097 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5098 ap->txconfig |= ANEG_CFG_PS2;
5099 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5100 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5101 tw32_f(MAC_MODE, tp->mac_mode);
5104 ap->state = ANEG_STATE_ABILITY_DETECT;
5107 case ANEG_STATE_ABILITY_DETECT:
5108 if (ap->ability_match != 0 && ap->rxconfig != 0)
5109 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5112 case ANEG_STATE_ACK_DETECT_INIT:
5113 ap->txconfig |= ANEG_CFG_ACK;
5114 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5115 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5116 tw32_f(MAC_MODE, tp->mac_mode);
5119 ap->state = ANEG_STATE_ACK_DETECT;
5122 case ANEG_STATE_ACK_DETECT:
5123 if (ap->ack_match != 0) {
5124 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5125 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5126 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5128 ap->state = ANEG_STATE_AN_ENABLE;
5130 } else if (ap->ability_match != 0 &&
5131 ap->rxconfig == 0) {
5132 ap->state = ANEG_STATE_AN_ENABLE;
5136 case ANEG_STATE_COMPLETE_ACK_INIT:
5137 if (ap->rxconfig & ANEG_CFG_INVAL) {
5141 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5142 MR_LP_ADV_HALF_DUPLEX |
5143 MR_LP_ADV_SYM_PAUSE |
5144 MR_LP_ADV_ASYM_PAUSE |
5145 MR_LP_ADV_REMOTE_FAULT1 |
5146 MR_LP_ADV_REMOTE_FAULT2 |
5147 MR_LP_ADV_NEXT_PAGE |
5150 if (ap->rxconfig & ANEG_CFG_FD)
5151 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5152 if (ap->rxconfig & ANEG_CFG_HD)
5153 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5154 if (ap->rxconfig & ANEG_CFG_PS1)
5155 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5156 if (ap->rxconfig & ANEG_CFG_PS2)
5157 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5158 if (ap->rxconfig & ANEG_CFG_RF1)
5159 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5160 if (ap->rxconfig & ANEG_CFG_RF2)
5161 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5162 if (ap->rxconfig & ANEG_CFG_NP)
5163 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5165 ap->link_time = ap->cur_time;
5167 ap->flags ^= (MR_TOGGLE_TX);
5168 if (ap->rxconfig & 0x0008)
5169 ap->flags |= MR_TOGGLE_RX;
5170 if (ap->rxconfig & ANEG_CFG_NP)
5171 ap->flags |= MR_NP_RX;
5172 ap->flags |= MR_PAGE_RX;
5174 ap->state = ANEG_STATE_COMPLETE_ACK;
5175 ret = ANEG_TIMER_ENAB;
5178 case ANEG_STATE_COMPLETE_ACK:
5179 if (ap->ability_match != 0 &&
5180 ap->rxconfig == 0) {
5181 ap->state = ANEG_STATE_AN_ENABLE;
5184 delta = ap->cur_time - ap->link_time;
5185 if (delta > ANEG_STATE_SETTLE_TIME) {
5186 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5187 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5189 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5190 !(ap->flags & MR_NP_RX)) {
5191 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5199 case ANEG_STATE_IDLE_DETECT_INIT:
5200 ap->link_time = ap->cur_time;
5201 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5202 tw32_f(MAC_MODE, tp->mac_mode);
5205 ap->state = ANEG_STATE_IDLE_DETECT;
5206 ret = ANEG_TIMER_ENAB;
5209 case ANEG_STATE_IDLE_DETECT:
5210 if (ap->ability_match != 0 &&
5211 ap->rxconfig == 0) {
5212 ap->state = ANEG_STATE_AN_ENABLE;
5215 delta = ap->cur_time - ap->link_time;
5216 if (delta > ANEG_STATE_SETTLE_TIME) {
5217 /* XXX another gem from the Broadcom driver :( */
5218 ap->state = ANEG_STATE_LINK_OK;
5222 case ANEG_STATE_LINK_OK:
5223 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5227 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5228 /* ??? unimplemented */
5231 case ANEG_STATE_NEXT_PAGE_WAIT:
5232 /* ??? unimplemented */
5243 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5246 struct tg3_fiber_aneginfo aninfo;
5247 int status = ANEG_FAILED;
5251 tw32_f(MAC_TX_AUTO_NEG, 0);
5253 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5254 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5257 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5260 memset(&aninfo, 0, sizeof(aninfo));
5261 aninfo.flags |= MR_AN_ENABLE;
5262 aninfo.state = ANEG_STATE_UNKNOWN;
5263 aninfo.cur_time = 0;
5265 while (++tick < 195000) {
5266 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5267 if (status == ANEG_DONE || status == ANEG_FAILED)
5273 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5274 tw32_f(MAC_MODE, tp->mac_mode);
5277 *txflags = aninfo.txconfig;
5278 *rxflags = aninfo.flags;
5280 if (status == ANEG_DONE &&
5281 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5282 MR_LP_ADV_FULL_DUPLEX)))
5288 static void tg3_init_bcm8002(struct tg3 *tp)
5290 u32 mac_status = tr32(MAC_STATUS);
5293 /* Reset when initting first time or we have a link. */
5294 if (tg3_flag(tp, INIT_COMPLETE) &&
5295 !(mac_status & MAC_STATUS_PCS_SYNCED))
5298 /* Set PLL lock range. */
5299 tg3_writephy(tp, 0x16, 0x8007);
5302 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5304 /* Wait for reset to complete. */
5305 /* XXX schedule_timeout() ... */
5306 for (i = 0; i < 500; i++)
5309 /* Config mode; select PMA/Ch 1 regs. */
5310 tg3_writephy(tp, 0x10, 0x8411);
5312 /* Enable auto-lock and comdet, select txclk for tx. */
5313 tg3_writephy(tp, 0x11, 0x0a10);
5315 tg3_writephy(tp, 0x18, 0x00a0);
5316 tg3_writephy(tp, 0x16, 0x41ff);
5318 /* Assert and deassert POR. */
5319 tg3_writephy(tp, 0x13, 0x0400);
5321 tg3_writephy(tp, 0x13, 0x0000);
5323 tg3_writephy(tp, 0x11, 0x0a50);
5325 tg3_writephy(tp, 0x11, 0x0a10);
5327 /* Wait for signal to stabilize */
5328 /* XXX schedule_timeout() ... */
5329 for (i = 0; i < 15000; i++)
5332 /* Deselect the channel register so we can read the PHYID
5335 tg3_writephy(tp, 0x10, 0x8011);
5338 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5341 bool current_link_up;
5342 u32 sg_dig_ctrl, sg_dig_status;
5343 u32 serdes_cfg, expected_sg_dig_ctrl;
5344 int workaround, port_a;
5347 expected_sg_dig_ctrl = 0;
5350 current_link_up = false;
5352 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5353 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5355 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5358 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5359 /* preserve bits 20-23 for voltage regulator */
5360 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5363 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5365 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5366 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5368 u32 val = serdes_cfg;
5374 tw32_f(MAC_SERDES_CFG, val);
5377 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5379 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5380 tg3_setup_flow_control(tp, 0, 0);
5381 current_link_up = true;
5386 /* Want auto-negotiation. */
5387 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5389 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5390 if (flowctrl & ADVERTISE_1000XPAUSE)
5391 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5392 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5393 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5395 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5396 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5397 tp->serdes_counter &&
5398 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5399 MAC_STATUS_RCVD_CFG)) ==
5400 MAC_STATUS_PCS_SYNCED)) {
5401 tp->serdes_counter--;
5402 current_link_up = true;
5407 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5408 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5410 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5412 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5413 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5414 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5415 MAC_STATUS_SIGNAL_DET)) {
5416 sg_dig_status = tr32(SG_DIG_STATUS);
5417 mac_status = tr32(MAC_STATUS);
5419 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5420 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5421 u32 local_adv = 0, remote_adv = 0;
5423 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5424 local_adv |= ADVERTISE_1000XPAUSE;
5425 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5426 local_adv |= ADVERTISE_1000XPSE_ASYM;
5428 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5429 remote_adv |= LPA_1000XPAUSE;
5430 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5431 remote_adv |= LPA_1000XPAUSE_ASYM;
5433 tp->link_config.rmt_adv =
5434 mii_adv_to_ethtool_adv_x(remote_adv);
5436 tg3_setup_flow_control(tp, local_adv, remote_adv);
5437 current_link_up = true;
5438 tp->serdes_counter = 0;
5439 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5440 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5441 if (tp->serdes_counter)
5442 tp->serdes_counter--;
5445 u32 val = serdes_cfg;
5452 tw32_f(MAC_SERDES_CFG, val);
5455 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5458 /* Link parallel detection - link is up */
5459 /* only if we have PCS_SYNC and not */
5460 /* receiving config code words */
5461 mac_status = tr32(MAC_STATUS);
5462 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5463 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5464 tg3_setup_flow_control(tp, 0, 0);
5465 current_link_up = true;
5467 TG3_PHYFLG_PARALLEL_DETECT;
5468 tp->serdes_counter =
5469 SERDES_PARALLEL_DET_TIMEOUT;
5471 goto restart_autoneg;
5475 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5476 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5480 return current_link_up;
5483 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5485 bool current_link_up = false;
5487 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5490 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5491 u32 txflags, rxflags;
5494 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5495 u32 local_adv = 0, remote_adv = 0;
5497 if (txflags & ANEG_CFG_PS1)
5498 local_adv |= ADVERTISE_1000XPAUSE;
5499 if (txflags & ANEG_CFG_PS2)
5500 local_adv |= ADVERTISE_1000XPSE_ASYM;
5502 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5503 remote_adv |= LPA_1000XPAUSE;
5504 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5505 remote_adv |= LPA_1000XPAUSE_ASYM;
5507 tp->link_config.rmt_adv =
5508 mii_adv_to_ethtool_adv_x(remote_adv);
5510 tg3_setup_flow_control(tp, local_adv, remote_adv);
5512 current_link_up = true;
5514 for (i = 0; i < 30; i++) {
5517 (MAC_STATUS_SYNC_CHANGED |
5518 MAC_STATUS_CFG_CHANGED));
5520 if ((tr32(MAC_STATUS) &
5521 (MAC_STATUS_SYNC_CHANGED |
5522 MAC_STATUS_CFG_CHANGED)) == 0)
5526 mac_status = tr32(MAC_STATUS);
5527 if (!current_link_up &&
5528 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5529 !(mac_status & MAC_STATUS_RCVD_CFG))
5530 current_link_up = true;
5532 tg3_setup_flow_control(tp, 0, 0);
5534 /* Forcing 1000FD link up. */
5535 current_link_up = true;
5537 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5540 tw32_f(MAC_MODE, tp->mac_mode);
5545 return current_link_up;
5548 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5551 u16 orig_active_speed;
5552 u8 orig_active_duplex;
5554 bool current_link_up;
5557 orig_pause_cfg = tp->link_config.active_flowctrl;
5558 orig_active_speed = tp->link_config.active_speed;
5559 orig_active_duplex = tp->link_config.active_duplex;
5561 if (!tg3_flag(tp, HW_AUTONEG) &&
5563 tg3_flag(tp, INIT_COMPLETE)) {
5564 mac_status = tr32(MAC_STATUS);
5565 mac_status &= (MAC_STATUS_PCS_SYNCED |
5566 MAC_STATUS_SIGNAL_DET |
5567 MAC_STATUS_CFG_CHANGED |
5568 MAC_STATUS_RCVD_CFG);
5569 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5570 MAC_STATUS_SIGNAL_DET)) {
5571 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5572 MAC_STATUS_CFG_CHANGED));
5577 tw32_f(MAC_TX_AUTO_NEG, 0);
5579 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5580 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5581 tw32_f(MAC_MODE, tp->mac_mode);
5584 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5585 tg3_init_bcm8002(tp);
5587 /* Enable link change event even when serdes polling. */
5588 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5591 current_link_up = false;
5592 tp->link_config.rmt_adv = 0;
5593 mac_status = tr32(MAC_STATUS);
5595 if (tg3_flag(tp, HW_AUTONEG))
5596 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5598 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5600 tp->napi[0].hw_status->status =
5601 (SD_STATUS_UPDATED |
5602 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5604 for (i = 0; i < 100; i++) {
5605 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5606 MAC_STATUS_CFG_CHANGED));
5608 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5609 MAC_STATUS_CFG_CHANGED |
5610 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5614 mac_status = tr32(MAC_STATUS);
5615 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5616 current_link_up = false;
5617 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5618 tp->serdes_counter == 0) {
5619 tw32_f(MAC_MODE, (tp->mac_mode |
5620 MAC_MODE_SEND_CONFIGS));
5622 tw32_f(MAC_MODE, tp->mac_mode);
5626 if (current_link_up) {
5627 tp->link_config.active_speed = SPEED_1000;
5628 tp->link_config.active_duplex = DUPLEX_FULL;
5629 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5630 LED_CTRL_LNKLED_OVERRIDE |
5631 LED_CTRL_1000MBPS_ON));
5633 tp->link_config.active_speed = SPEED_UNKNOWN;
5634 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5635 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5636 LED_CTRL_LNKLED_OVERRIDE |
5637 LED_CTRL_TRAFFIC_OVERRIDE));
5640 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5641 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5642 if (orig_pause_cfg != now_pause_cfg ||
5643 orig_active_speed != tp->link_config.active_speed ||
5644 orig_active_duplex != tp->link_config.active_duplex)
5645 tg3_link_report(tp);
5651 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5655 u16 current_speed = SPEED_UNKNOWN;
5656 u8 current_duplex = DUPLEX_UNKNOWN;
5657 bool current_link_up = false;
5658 u32 local_adv, remote_adv, sgsr;
5660 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5661 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5662 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5663 (sgsr & SERDES_TG3_SGMII_MODE)) {
5668 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5670 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5671 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5673 current_link_up = true;
5674 if (sgsr & SERDES_TG3_SPEED_1000) {
5675 current_speed = SPEED_1000;
5676 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5677 } else if (sgsr & SERDES_TG3_SPEED_100) {
5678 current_speed = SPEED_100;
5679 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5681 current_speed = SPEED_10;
5682 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5685 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5686 current_duplex = DUPLEX_FULL;
5688 current_duplex = DUPLEX_HALF;
5691 tw32_f(MAC_MODE, tp->mac_mode);
5694 tg3_clear_mac_status(tp);
5696 goto fiber_setup_done;
5699 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5700 tw32_f(MAC_MODE, tp->mac_mode);
5703 tg3_clear_mac_status(tp);
5708 tp->link_config.rmt_adv = 0;
5710 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5711 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5712 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5713 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5714 bmsr |= BMSR_LSTATUS;
5716 bmsr &= ~BMSR_LSTATUS;
5719 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5721 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5722 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5723 /* do nothing, just check for link up at the end */
5724 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5727 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5728 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5729 ADVERTISE_1000XPAUSE |
5730 ADVERTISE_1000XPSE_ASYM |
5733 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5734 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5736 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5737 tg3_writephy(tp, MII_ADVERTISE, newadv);
5738 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5739 tg3_writephy(tp, MII_BMCR, bmcr);
5741 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5742 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5743 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5750 bmcr &= ~BMCR_SPEED1000;
5751 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5753 if (tp->link_config.duplex == DUPLEX_FULL)
5754 new_bmcr |= BMCR_FULLDPLX;
5756 if (new_bmcr != bmcr) {
5757 /* BMCR_SPEED1000 is a reserved bit that needs
5758 * to be set on write.
5760 new_bmcr |= BMCR_SPEED1000;
5762 /* Force a linkdown */
5766 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5767 adv &= ~(ADVERTISE_1000XFULL |
5768 ADVERTISE_1000XHALF |
5770 tg3_writephy(tp, MII_ADVERTISE, adv);
5771 tg3_writephy(tp, MII_BMCR, bmcr |
5775 tg3_carrier_off(tp);
5777 tg3_writephy(tp, MII_BMCR, new_bmcr);
5779 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5780 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5781 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5782 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5783 bmsr |= BMSR_LSTATUS;
5785 bmsr &= ~BMSR_LSTATUS;
5787 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5791 if (bmsr & BMSR_LSTATUS) {
5792 current_speed = SPEED_1000;
5793 current_link_up = true;
5794 if (bmcr & BMCR_FULLDPLX)
5795 current_duplex = DUPLEX_FULL;
5797 current_duplex = DUPLEX_HALF;
5802 if (bmcr & BMCR_ANENABLE) {
5805 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5806 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5807 common = local_adv & remote_adv;
5808 if (common & (ADVERTISE_1000XHALF |
5809 ADVERTISE_1000XFULL)) {
5810 if (common & ADVERTISE_1000XFULL)
5811 current_duplex = DUPLEX_FULL;
5813 current_duplex = DUPLEX_HALF;
5815 tp->link_config.rmt_adv =
5816 mii_adv_to_ethtool_adv_x(remote_adv);
5817 } else if (!tg3_flag(tp, 5780_CLASS)) {
5818 /* Link is up via parallel detect */
5820 current_link_up = false;
5826 if (current_link_up && current_duplex == DUPLEX_FULL)
5827 tg3_setup_flow_control(tp, local_adv, remote_adv);
5829 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5830 if (tp->link_config.active_duplex == DUPLEX_HALF)
5831 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5833 tw32_f(MAC_MODE, tp->mac_mode);
5836 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5838 tp->link_config.active_speed = current_speed;
5839 tp->link_config.active_duplex = current_duplex;
5841 tg3_test_and_report_link_chg(tp, current_link_up);
5845 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5847 if (tp->serdes_counter) {
5848 /* Give autoneg time to complete. */
5849 tp->serdes_counter--;
5854 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5857 tg3_readphy(tp, MII_BMCR, &bmcr);
5858 if (bmcr & BMCR_ANENABLE) {
5861 /* Select shadow register 0x1f */
5862 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5863 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5865 /* Select expansion interrupt status register */
5866 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5867 MII_TG3_DSP_EXP1_INT_STAT);
5868 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5869 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5871 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5872 /* We have signal detect and not receiving
5873 * config code words, link is up by parallel
5877 bmcr &= ~BMCR_ANENABLE;
5878 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5879 tg3_writephy(tp, MII_BMCR, bmcr);
5880 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5883 } else if (tp->link_up &&
5884 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5885 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5888 /* Select expansion interrupt status register */
5889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5890 MII_TG3_DSP_EXP1_INT_STAT);
5891 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5895 /* Config code words received, turn on autoneg. */
5896 tg3_readphy(tp, MII_BMCR, &bmcr);
5897 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5899 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5905 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5910 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5911 err = tg3_setup_fiber_phy(tp, force_reset);
5912 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5913 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5915 err = tg3_setup_copper_phy(tp, force_reset);
5917 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5920 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5921 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5923 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5928 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5929 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5930 tw32(GRC_MISC_CFG, val);
5933 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5934 (6 << TX_LENGTHS_IPG_SHIFT);
5935 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5936 tg3_asic_rev(tp) == ASIC_REV_5762)
5937 val |= tr32(MAC_TX_LENGTHS) &
5938 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5939 TX_LENGTHS_CNT_DWN_VAL_MSK);
5941 if (tp->link_config.active_speed == SPEED_1000 &&
5942 tp->link_config.active_duplex == DUPLEX_HALF)
5943 tw32(MAC_TX_LENGTHS, val |
5944 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5946 tw32(MAC_TX_LENGTHS, val |
5947 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5949 if (!tg3_flag(tp, 5705_PLUS)) {
5951 tw32(HOSTCC_STAT_COAL_TICKS,
5952 tp->coal.stats_block_coalesce_usecs);
5954 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5958 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5959 val = tr32(PCIE_PWR_MGMT_THRESH);
5961 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5964 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5965 tw32(PCIE_PWR_MGMT_THRESH, val);
5971 /* tp->lock must be held */
5972 static u64 tg3_refclk_read(struct tg3 *tp)
5974 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5975 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5978 /* tp->lock must be held */
5979 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5981 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5982 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5983 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5984 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5987 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5988 static inline void tg3_full_unlock(struct tg3 *tp);
5989 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5991 struct tg3 *tp = netdev_priv(dev);
5993 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5994 SOF_TIMESTAMPING_RX_SOFTWARE |
5995 SOF_TIMESTAMPING_SOFTWARE |
5996 SOF_TIMESTAMPING_TX_HARDWARE |
5997 SOF_TIMESTAMPING_RX_HARDWARE |
5998 SOF_TIMESTAMPING_RAW_HARDWARE;
6001 info->phc_index = ptp_clock_index(tp->ptp_clock);
6003 info->phc_index = -1;
6005 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6007 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6008 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6009 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6010 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6014 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6016 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6017 bool neg_adj = false;
6025 /* Frequency adjustment is performed using hardware with a 24 bit
6026 * accumulator and a programmable correction value. On each clk, the
6027 * correction value gets added to the accumulator and when it
6028 * overflows, the time counter is incremented/decremented.
6030 * So conversion from ppb to correction value is
6031 * ppb * (1 << 24) / 1000000000
6033 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6034 TG3_EAV_REF_CLK_CORRECT_MASK;
6036 tg3_full_lock(tp, 0);
6039 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6040 TG3_EAV_REF_CLK_CORRECT_EN |
6041 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6043 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6045 tg3_full_unlock(tp);
6050 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6052 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6054 tg3_full_lock(tp, 0);
6055 tp->ptp_adjust += delta;
6056 tg3_full_unlock(tp);
6061 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6065 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6067 tg3_full_lock(tp, 0);
6068 ns = tg3_refclk_read(tp);
6069 ns += tp->ptp_adjust;
6070 tg3_full_unlock(tp);
6072 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6073 ts->tv_nsec = remainder;
6078 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6079 const struct timespec *ts)
6082 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6084 ns = timespec_to_ns(ts);
6086 tg3_full_lock(tp, 0);
6087 tg3_refclk_write(tp, ns);
6089 tg3_full_unlock(tp);
6094 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6095 struct ptp_clock_request *rq, int on)
6100 static const struct ptp_clock_info tg3_ptp_caps = {
6101 .owner = THIS_MODULE,
6102 .name = "tg3 clock",
6103 .max_adj = 250000000,
6108 .adjfreq = tg3_ptp_adjfreq,
6109 .adjtime = tg3_ptp_adjtime,
6110 .gettime = tg3_ptp_gettime,
6111 .settime = tg3_ptp_settime,
6112 .enable = tg3_ptp_enable,
6115 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6116 struct skb_shared_hwtstamps *timestamp)
6118 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6119 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6123 /* tp->lock must be held */
6124 static void tg3_ptp_init(struct tg3 *tp)
6126 if (!tg3_flag(tp, PTP_CAPABLE))
6129 /* Initialize the hardware clock to the system time. */
6130 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6132 tp->ptp_info = tg3_ptp_caps;
6135 /* tp->lock must be held */
6136 static void tg3_ptp_resume(struct tg3 *tp)
6138 if (!tg3_flag(tp, PTP_CAPABLE))
6141 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6145 static void tg3_ptp_fini(struct tg3 *tp)
6147 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6150 ptp_clock_unregister(tp->ptp_clock);
6151 tp->ptp_clock = NULL;
6155 static inline int tg3_irq_sync(struct tg3 *tp)
6157 return tp->irq_sync;
6160 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6164 dst = (u32 *)((u8 *)dst + off);
6165 for (i = 0; i < len; i += sizeof(u32))
6166 *dst++ = tr32(off + i);
6169 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6171 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6172 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6173 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6174 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6175 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6176 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6177 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6178 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6179 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6180 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6181 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6182 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6183 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6184 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6185 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6186 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6187 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6188 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6189 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6191 if (tg3_flag(tp, SUPPORT_MSIX))
6192 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6194 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6195 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6196 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6197 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6198 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6199 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6200 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6201 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6203 if (!tg3_flag(tp, 5705_PLUS)) {
6204 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6205 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6206 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6209 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6210 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6211 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6212 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6213 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6215 if (tg3_flag(tp, NVRAM))
6216 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6219 static void tg3_dump_state(struct tg3 *tp)
6224 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6228 if (tg3_flag(tp, PCI_EXPRESS)) {
6229 /* Read up to but not including private PCI registers */
6230 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6231 regs[i / sizeof(u32)] = tr32(i);
6233 tg3_dump_legacy_regs(tp, regs);
6235 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6236 if (!regs[i + 0] && !regs[i + 1] &&
6237 !regs[i + 2] && !regs[i + 3])
6240 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6242 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6247 for (i = 0; i < tp->irq_cnt; i++) {
6248 struct tg3_napi *tnapi = &tp->napi[i];
6250 /* SW status block */
6252 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6254 tnapi->hw_status->status,
6255 tnapi->hw_status->status_tag,
6256 tnapi->hw_status->rx_jumbo_consumer,
6257 tnapi->hw_status->rx_consumer,
6258 tnapi->hw_status->rx_mini_consumer,
6259 tnapi->hw_status->idx[0].rx_producer,
6260 tnapi->hw_status->idx[0].tx_consumer);
6263 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6265 tnapi->last_tag, tnapi->last_irq_tag,
6266 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6268 tnapi->prodring.rx_std_prod_idx,
6269 tnapi->prodring.rx_std_cons_idx,
6270 tnapi->prodring.rx_jmb_prod_idx,
6271 tnapi->prodring.rx_jmb_cons_idx);
6275 /* This is called whenever we suspect that the system chipset is re-
6276 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6277 * is bogus tx completions. We try to recover by setting the
6278 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6281 static void tg3_tx_recover(struct tg3 *tp)
6283 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6284 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6286 netdev_warn(tp->dev,
6287 "The system may be re-ordering memory-mapped I/O "
6288 "cycles to the network device, attempting to recover. "
6289 "Please report the problem to the driver maintainer "
6290 "and include system chipset information.\n");
6292 spin_lock(&tp->lock);
6293 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6294 spin_unlock(&tp->lock);
6297 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6299 /* Tell compiler to fetch tx indices from memory. */
6301 return tnapi->tx_pending -
6302 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6305 /* Tigon3 never reports partial packet sends. So we do not
6306 * need special logic to handle SKBs that have not had all
6307 * of their frags sent yet, like SunGEM does.
6309 static void tg3_tx(struct tg3_napi *tnapi)
6311 struct tg3 *tp = tnapi->tp;
6312 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6313 u32 sw_idx = tnapi->tx_cons;
6314 struct netdev_queue *txq;
6315 int index = tnapi - tp->napi;
6316 unsigned int pkts_compl = 0, bytes_compl = 0;
6318 if (tg3_flag(tp, ENABLE_TSS))
6321 txq = netdev_get_tx_queue(tp->dev, index);
6323 while (sw_idx != hw_idx) {
6324 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6325 struct sk_buff *skb = ri->skb;
6328 if (unlikely(skb == NULL)) {
6333 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6334 struct skb_shared_hwtstamps timestamp;
6335 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6336 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6338 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6340 skb_tstamp_tx(skb, ×tamp);
6343 pci_unmap_single(tp->pdev,
6344 dma_unmap_addr(ri, mapping),
6350 while (ri->fragmented) {
6351 ri->fragmented = false;
6352 sw_idx = NEXT_TX(sw_idx);
6353 ri = &tnapi->tx_buffers[sw_idx];
6356 sw_idx = NEXT_TX(sw_idx);
6358 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6359 ri = &tnapi->tx_buffers[sw_idx];
6360 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6363 pci_unmap_page(tp->pdev,
6364 dma_unmap_addr(ri, mapping),
6365 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6368 while (ri->fragmented) {
6369 ri->fragmented = false;
6370 sw_idx = NEXT_TX(sw_idx);
6371 ri = &tnapi->tx_buffers[sw_idx];
6374 sw_idx = NEXT_TX(sw_idx);
6378 bytes_compl += skb->len;
6382 if (unlikely(tx_bug)) {
6388 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6390 tnapi->tx_cons = sw_idx;
6392 /* Need to make the tx_cons update visible to tg3_start_xmit()
6393 * before checking for netif_queue_stopped(). Without the
6394 * memory barrier, there is a small possibility that tg3_start_xmit()
6395 * will miss it and cause the queue to be stopped forever.
6399 if (unlikely(netif_tx_queue_stopped(txq) &&
6400 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6401 __netif_tx_lock(txq, smp_processor_id());
6402 if (netif_tx_queue_stopped(txq) &&
6403 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6404 netif_tx_wake_queue(txq);
6405 __netif_tx_unlock(txq);
6409 static void tg3_frag_free(bool is_frag, void *data)
6412 put_page(virt_to_head_page(data));
6417 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6419 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6420 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6425 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6426 map_sz, PCI_DMA_FROMDEVICE);
6427 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6432 /* Returns size of skb allocated or < 0 on error.
6434 * We only need to fill in the address because the other members
6435 * of the RX descriptor are invariant, see tg3_init_rings.
6437 * Note the purposeful assymetry of cpu vs. chip accesses. For
6438 * posting buffers we only dirty the first cache line of the RX
6439 * descriptor (containing the address). Whereas for the RX status
6440 * buffers the cpu only reads the last cacheline of the RX descriptor
6441 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6443 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6444 u32 opaque_key, u32 dest_idx_unmasked,
6445 unsigned int *frag_size)
6447 struct tg3_rx_buffer_desc *desc;
6448 struct ring_info *map;
6451 int skb_size, data_size, dest_idx;
6453 switch (opaque_key) {
6454 case RXD_OPAQUE_RING_STD:
6455 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6456 desc = &tpr->rx_std[dest_idx];
6457 map = &tpr->rx_std_buffers[dest_idx];
6458 data_size = tp->rx_pkt_map_sz;
6461 case RXD_OPAQUE_RING_JUMBO:
6462 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6463 desc = &tpr->rx_jmb[dest_idx].std;
6464 map = &tpr->rx_jmb_buffers[dest_idx];
6465 data_size = TG3_RX_JMB_MAP_SZ;
6472 /* Do not overwrite any of the map or rp information
6473 * until we are sure we can commit to a new buffer.
6475 * Callers depend upon this behavior and assume that
6476 * we leave everything unchanged if we fail.
6478 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6479 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6480 if (skb_size <= PAGE_SIZE) {
6481 data = netdev_alloc_frag(skb_size);
6482 *frag_size = skb_size;
6484 data = kmalloc(skb_size, GFP_ATOMIC);
6490 mapping = pci_map_single(tp->pdev,
6491 data + TG3_RX_OFFSET(tp),
6493 PCI_DMA_FROMDEVICE);
6494 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6495 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6500 dma_unmap_addr_set(map, mapping, mapping);
6502 desc->addr_hi = ((u64)mapping >> 32);
6503 desc->addr_lo = ((u64)mapping & 0xffffffff);
6508 /* We only need to move over in the address because the other
6509 * members of the RX descriptor are invariant. See notes above
6510 * tg3_alloc_rx_data for full details.
6512 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6513 struct tg3_rx_prodring_set *dpr,
6514 u32 opaque_key, int src_idx,
6515 u32 dest_idx_unmasked)
6517 struct tg3 *tp = tnapi->tp;
6518 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6519 struct ring_info *src_map, *dest_map;
6520 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6523 switch (opaque_key) {
6524 case RXD_OPAQUE_RING_STD:
6525 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6526 dest_desc = &dpr->rx_std[dest_idx];
6527 dest_map = &dpr->rx_std_buffers[dest_idx];
6528 src_desc = &spr->rx_std[src_idx];
6529 src_map = &spr->rx_std_buffers[src_idx];
6532 case RXD_OPAQUE_RING_JUMBO:
6533 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6534 dest_desc = &dpr->rx_jmb[dest_idx].std;
6535 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6536 src_desc = &spr->rx_jmb[src_idx].std;
6537 src_map = &spr->rx_jmb_buffers[src_idx];
6544 dest_map->data = src_map->data;
6545 dma_unmap_addr_set(dest_map, mapping,
6546 dma_unmap_addr(src_map, mapping));
6547 dest_desc->addr_hi = src_desc->addr_hi;
6548 dest_desc->addr_lo = src_desc->addr_lo;
6550 /* Ensure that the update to the skb happens after the physical
6551 * addresses have been transferred to the new BD location.
6555 src_map->data = NULL;
6558 /* The RX ring scheme is composed of multiple rings which post fresh
6559 * buffers to the chip, and one special ring the chip uses to report
6560 * status back to the host.
6562 * The special ring reports the status of received packets to the
6563 * host. The chip does not write into the original descriptor the
6564 * RX buffer was obtained from. The chip simply takes the original
6565 * descriptor as provided by the host, updates the status and length
6566 * field, then writes this into the next status ring entry.
6568 * Each ring the host uses to post buffers to the chip is described
6569 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6570 * it is first placed into the on-chip ram. When the packet's length
6571 * is known, it walks down the TG3_BDINFO entries to select the ring.
6572 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6573 * which is within the range of the new packet's length is chosen.
6575 * The "separate ring for rx status" scheme may sound queer, but it makes
6576 * sense from a cache coherency perspective. If only the host writes
6577 * to the buffer post rings, and only the chip writes to the rx status
6578 * rings, then cache lines never move beyond shared-modified state.
6579 * If both the host and chip were to write into the same ring, cache line
6580 * eviction could occur since both entities want it in an exclusive state.
6582 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6584 struct tg3 *tp = tnapi->tp;
6585 u32 work_mask, rx_std_posted = 0;
6586 u32 std_prod_idx, jmb_prod_idx;
6587 u32 sw_idx = tnapi->rx_rcb_ptr;
6590 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6592 hw_idx = *(tnapi->rx_rcb_prod_idx);
6594 * We need to order the read of hw_idx and the read of
6595 * the opaque cookie.
6600 std_prod_idx = tpr->rx_std_prod_idx;
6601 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6602 while (sw_idx != hw_idx && budget > 0) {
6603 struct ring_info *ri;
6604 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6606 struct sk_buff *skb;
6607 dma_addr_t dma_addr;
6608 u32 opaque_key, desc_idx, *post_ptr;
6612 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6613 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6614 if (opaque_key == RXD_OPAQUE_RING_STD) {
6615 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6616 dma_addr = dma_unmap_addr(ri, mapping);
6618 post_ptr = &std_prod_idx;
6620 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6621 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6622 dma_addr = dma_unmap_addr(ri, mapping);
6624 post_ptr = &jmb_prod_idx;
6626 goto next_pkt_nopost;
6628 work_mask |= opaque_key;
6630 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6631 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6633 tg3_recycle_rx(tnapi, tpr, opaque_key,
6634 desc_idx, *post_ptr);
6636 /* Other statistics kept track of by card. */
6641 prefetch(data + TG3_RX_OFFSET(tp));
6642 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6645 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6646 RXD_FLAG_PTPSTAT_PTPV1 ||
6647 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6648 RXD_FLAG_PTPSTAT_PTPV2) {
6649 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6650 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6653 if (len > TG3_RX_COPY_THRESH(tp)) {
6655 unsigned int frag_size;
6657 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6658 *post_ptr, &frag_size);
6662 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6663 PCI_DMA_FROMDEVICE);
6665 skb = build_skb(data, frag_size);
6667 tg3_frag_free(frag_size != 0, data);
6668 goto drop_it_no_recycle;
6670 skb_reserve(skb, TG3_RX_OFFSET(tp));
6671 /* Ensure that the update to the data happens
6672 * after the usage of the old DMA mapping.
6679 tg3_recycle_rx(tnapi, tpr, opaque_key,
6680 desc_idx, *post_ptr);
6682 skb = netdev_alloc_skb(tp->dev,
6683 len + TG3_RAW_IP_ALIGN);
6685 goto drop_it_no_recycle;
6687 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6688 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6690 data + TG3_RX_OFFSET(tp),
6692 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6697 tg3_hwclock_to_timestamp(tp, tstamp,
6698 skb_hwtstamps(skb));
6700 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6701 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6702 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6703 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6704 skb->ip_summed = CHECKSUM_UNNECESSARY;
6706 skb_checksum_none_assert(skb);
6708 skb->protocol = eth_type_trans(skb, tp->dev);
6710 if (len > (tp->dev->mtu + ETH_HLEN) &&
6711 skb->protocol != htons(ETH_P_8021Q)) {
6713 goto drop_it_no_recycle;
6716 if (desc->type_flags & RXD_FLAG_VLAN &&
6717 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6718 __vlan_hwaccel_put_tag(skb,
6719 desc->err_vlan & RXD_VLAN_MASK);
6721 napi_gro_receive(&tnapi->napi, skb);
6729 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6730 tpr->rx_std_prod_idx = std_prod_idx &
6731 tp->rx_std_ring_mask;
6732 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6733 tpr->rx_std_prod_idx);
6734 work_mask &= ~RXD_OPAQUE_RING_STD;
6739 sw_idx &= tp->rx_ret_ring_mask;
6741 /* Refresh hw_idx to see if there is new work */
6742 if (sw_idx == hw_idx) {
6743 hw_idx = *(tnapi->rx_rcb_prod_idx);
6748 /* ACK the status ring. */
6749 tnapi->rx_rcb_ptr = sw_idx;
6750 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6752 /* Refill RX ring(s). */
6753 if (!tg3_flag(tp, ENABLE_RSS)) {
6754 /* Sync BD data before updating mailbox */
6757 if (work_mask & RXD_OPAQUE_RING_STD) {
6758 tpr->rx_std_prod_idx = std_prod_idx &
6759 tp->rx_std_ring_mask;
6760 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6761 tpr->rx_std_prod_idx);
6763 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6764 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6765 tp->rx_jmb_ring_mask;
6766 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6767 tpr->rx_jmb_prod_idx);
6770 } else if (work_mask) {
6771 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6772 * updated before the producer indices can be updated.
6776 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6777 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6779 if (tnapi != &tp->napi[1]) {
6780 tp->rx_refill = true;
6781 napi_schedule(&tp->napi[1].napi);
6788 static void tg3_poll_link(struct tg3 *tp)
6790 /* handle link change and other phy events */
6791 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6792 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6794 if (sblk->status & SD_STATUS_LINK_CHG) {
6795 sblk->status = SD_STATUS_UPDATED |
6796 (sblk->status & ~SD_STATUS_LINK_CHG);
6797 spin_lock(&tp->lock);
6798 if (tg3_flag(tp, USE_PHYLIB)) {
6800 (MAC_STATUS_SYNC_CHANGED |
6801 MAC_STATUS_CFG_CHANGED |
6802 MAC_STATUS_MI_COMPLETION |
6803 MAC_STATUS_LNKSTATE_CHANGED));
6806 tg3_setup_phy(tp, false);
6807 spin_unlock(&tp->lock);
6812 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6813 struct tg3_rx_prodring_set *dpr,
6814 struct tg3_rx_prodring_set *spr)
6816 u32 si, di, cpycnt, src_prod_idx;
6820 src_prod_idx = spr->rx_std_prod_idx;
6822 /* Make sure updates to the rx_std_buffers[] entries and the
6823 * standard producer index are seen in the correct order.
6827 if (spr->rx_std_cons_idx == src_prod_idx)
6830 if (spr->rx_std_cons_idx < src_prod_idx)
6831 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6833 cpycnt = tp->rx_std_ring_mask + 1 -
6834 spr->rx_std_cons_idx;
6836 cpycnt = min(cpycnt,
6837 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6839 si = spr->rx_std_cons_idx;
6840 di = dpr->rx_std_prod_idx;
6842 for (i = di; i < di + cpycnt; i++) {
6843 if (dpr->rx_std_buffers[i].data) {
6853 /* Ensure that updates to the rx_std_buffers ring and the
6854 * shadowed hardware producer ring from tg3_recycle_skb() are
6855 * ordered correctly WRT the skb check above.
6859 memcpy(&dpr->rx_std_buffers[di],
6860 &spr->rx_std_buffers[si],
6861 cpycnt * sizeof(struct ring_info));
6863 for (i = 0; i < cpycnt; i++, di++, si++) {
6864 struct tg3_rx_buffer_desc *sbd, *dbd;
6865 sbd = &spr->rx_std[si];
6866 dbd = &dpr->rx_std[di];
6867 dbd->addr_hi = sbd->addr_hi;
6868 dbd->addr_lo = sbd->addr_lo;
6871 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6872 tp->rx_std_ring_mask;
6873 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6874 tp->rx_std_ring_mask;
6878 src_prod_idx = spr->rx_jmb_prod_idx;
6880 /* Make sure updates to the rx_jmb_buffers[] entries and
6881 * the jumbo producer index are seen in the correct order.
6885 if (spr->rx_jmb_cons_idx == src_prod_idx)
6888 if (spr->rx_jmb_cons_idx < src_prod_idx)
6889 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6891 cpycnt = tp->rx_jmb_ring_mask + 1 -
6892 spr->rx_jmb_cons_idx;
6894 cpycnt = min(cpycnt,
6895 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6897 si = spr->rx_jmb_cons_idx;
6898 di = dpr->rx_jmb_prod_idx;
6900 for (i = di; i < di + cpycnt; i++) {
6901 if (dpr->rx_jmb_buffers[i].data) {
6911 /* Ensure that updates to the rx_jmb_buffers ring and the
6912 * shadowed hardware producer ring from tg3_recycle_skb() are
6913 * ordered correctly WRT the skb check above.
6917 memcpy(&dpr->rx_jmb_buffers[di],
6918 &spr->rx_jmb_buffers[si],
6919 cpycnt * sizeof(struct ring_info));
6921 for (i = 0; i < cpycnt; i++, di++, si++) {
6922 struct tg3_rx_buffer_desc *sbd, *dbd;
6923 sbd = &spr->rx_jmb[si].std;
6924 dbd = &dpr->rx_jmb[di].std;
6925 dbd->addr_hi = sbd->addr_hi;
6926 dbd->addr_lo = sbd->addr_lo;
6929 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6930 tp->rx_jmb_ring_mask;
6931 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6932 tp->rx_jmb_ring_mask;
6938 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6940 struct tg3 *tp = tnapi->tp;
6942 /* run TX completion thread */
6943 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6945 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6949 if (!tnapi->rx_rcb_prod_idx)
6952 /* run RX thread, within the bounds set by NAPI.
6953 * All RX "locking" is done by ensuring outside
6954 * code synchronizes with tg3->napi.poll()
6956 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6957 work_done += tg3_rx(tnapi, budget - work_done);
6959 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6960 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6962 u32 std_prod_idx = dpr->rx_std_prod_idx;
6963 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6965 tp->rx_refill = false;
6966 for (i = 1; i <= tp->rxq_cnt; i++)
6967 err |= tg3_rx_prodring_xfer(tp, dpr,
6968 &tp->napi[i].prodring);
6972 if (std_prod_idx != dpr->rx_std_prod_idx)
6973 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6974 dpr->rx_std_prod_idx);
6976 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6977 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6978 dpr->rx_jmb_prod_idx);
6983 tw32_f(HOSTCC_MODE, tp->coal_now);
6989 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6991 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6992 schedule_work(&tp->reset_task);
6995 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6997 cancel_work_sync(&tp->reset_task);
6998 tg3_flag_clear(tp, RESET_TASK_PENDING);
6999 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7002 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7004 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7005 struct tg3 *tp = tnapi->tp;
7007 struct tg3_hw_status *sblk = tnapi->hw_status;
7010 work_done = tg3_poll_work(tnapi, work_done, budget);
7012 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7015 if (unlikely(work_done >= budget))
7018 /* tp->last_tag is used in tg3_int_reenable() below
7019 * to tell the hw how much work has been processed,
7020 * so we must read it before checking for more work.
7022 tnapi->last_tag = sblk->status_tag;
7023 tnapi->last_irq_tag = tnapi->last_tag;
7026 /* check for RX/TX work to do */
7027 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7028 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7030 /* This test here is not race free, but will reduce
7031 * the number of interrupts by looping again.
7033 if (tnapi == &tp->napi[1] && tp->rx_refill)
7036 napi_complete(napi);
7037 /* Reenable interrupts. */
7038 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7040 /* This test here is synchronized by napi_schedule()
7041 * and napi_complete() to close the race condition.
7043 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7044 tw32(HOSTCC_MODE, tp->coalesce_mode |
7045 HOSTCC_MODE_ENABLE |
7056 /* work_done is guaranteed to be less than budget. */
7057 napi_complete(napi);
7058 tg3_reset_task_schedule(tp);
7062 static void tg3_process_error(struct tg3 *tp)
7065 bool real_error = false;
7067 if (tg3_flag(tp, ERROR_PROCESSED))
7070 /* Check Flow Attention register */
7071 val = tr32(HOSTCC_FLOW_ATTN);
7072 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7073 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7077 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7078 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7082 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7083 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7092 tg3_flag_set(tp, ERROR_PROCESSED);
7093 tg3_reset_task_schedule(tp);
7096 static int tg3_poll(struct napi_struct *napi, int budget)
7098 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7099 struct tg3 *tp = tnapi->tp;
7101 struct tg3_hw_status *sblk = tnapi->hw_status;
7104 if (sblk->status & SD_STATUS_ERROR)
7105 tg3_process_error(tp);
7109 work_done = tg3_poll_work(tnapi, work_done, budget);
7111 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7114 if (unlikely(work_done >= budget))
7117 if (tg3_flag(tp, TAGGED_STATUS)) {
7118 /* tp->last_tag is used in tg3_int_reenable() below
7119 * to tell the hw how much work has been processed,
7120 * so we must read it before checking for more work.
7122 tnapi->last_tag = sblk->status_tag;
7123 tnapi->last_irq_tag = tnapi->last_tag;
7126 sblk->status &= ~SD_STATUS_UPDATED;
7128 if (likely(!tg3_has_work(tnapi))) {
7129 napi_complete(napi);
7130 tg3_int_reenable(tnapi);
7138 /* work_done is guaranteed to be less than budget. */
7139 napi_complete(napi);
7140 tg3_reset_task_schedule(tp);
7144 static void tg3_napi_disable(struct tg3 *tp)
7148 for (i = tp->irq_cnt - 1; i >= 0; i--)
7149 napi_disable(&tp->napi[i].napi);
7152 static void tg3_napi_enable(struct tg3 *tp)
7156 for (i = 0; i < tp->irq_cnt; i++)
7157 napi_enable(&tp->napi[i].napi);
7160 static void tg3_napi_init(struct tg3 *tp)
7164 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7165 for (i = 1; i < tp->irq_cnt; i++)
7166 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7169 static void tg3_napi_fini(struct tg3 *tp)
7173 for (i = 0; i < tp->irq_cnt; i++)
7174 netif_napi_del(&tp->napi[i].napi);
7177 static inline void tg3_netif_stop(struct tg3 *tp)
7179 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7180 tg3_napi_disable(tp);
7181 netif_carrier_off(tp->dev);
7182 netif_tx_disable(tp->dev);
7185 /* tp->lock must be held */
7186 static inline void tg3_netif_start(struct tg3 *tp)
7190 /* NOTE: unconditional netif_tx_wake_all_queues is only
7191 * appropriate so long as all callers are assured to
7192 * have free tx slots (such as after tg3_init_hw)
7194 netif_tx_wake_all_queues(tp->dev);
7197 netif_carrier_on(tp->dev);
7199 tg3_napi_enable(tp);
7200 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7201 tg3_enable_ints(tp);
7204 static void tg3_irq_quiesce(struct tg3 *tp)
7208 BUG_ON(tp->irq_sync);
7213 for (i = 0; i < tp->irq_cnt; i++)
7214 synchronize_irq(tp->napi[i].irq_vec);
7217 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7218 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7219 * with as well. Most of the time, this is not necessary except when
7220 * shutting down the device.
7222 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7224 spin_lock_bh(&tp->lock);
7226 tg3_irq_quiesce(tp);
7229 static inline void tg3_full_unlock(struct tg3 *tp)
7231 spin_unlock_bh(&tp->lock);
7234 /* One-shot MSI handler - Chip automatically disables interrupt
7235 * after sending MSI so driver doesn't have to do it.
7237 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7239 struct tg3_napi *tnapi = dev_id;
7240 struct tg3 *tp = tnapi->tp;
7242 prefetch(tnapi->hw_status);
7244 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7246 if (likely(!tg3_irq_sync(tp)))
7247 napi_schedule(&tnapi->napi);
7252 /* MSI ISR - No need to check for interrupt sharing and no need to
7253 * flush status block and interrupt mailbox. PCI ordering rules
7254 * guarantee that MSI will arrive after the status block.
7256 static irqreturn_t tg3_msi(int irq, void *dev_id)
7258 struct tg3_napi *tnapi = dev_id;
7259 struct tg3 *tp = tnapi->tp;
7261 prefetch(tnapi->hw_status);
7263 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7265 * Writing any value to intr-mbox-0 clears PCI INTA# and
7266 * chip-internal interrupt pending events.
7267 * Writing non-zero to intr-mbox-0 additional tells the
7268 * NIC to stop sending us irqs, engaging "in-intr-handler"
7271 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7272 if (likely(!tg3_irq_sync(tp)))
7273 napi_schedule(&tnapi->napi);
7275 return IRQ_RETVAL(1);
7278 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7280 struct tg3_napi *tnapi = dev_id;
7281 struct tg3 *tp = tnapi->tp;
7282 struct tg3_hw_status *sblk = tnapi->hw_status;
7283 unsigned int handled = 1;
7285 /* In INTx mode, it is possible for the interrupt to arrive at
7286 * the CPU before the status block posted prior to the interrupt.
7287 * Reading the PCI State register will confirm whether the
7288 * interrupt is ours and will flush the status block.
7290 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7291 if (tg3_flag(tp, CHIP_RESETTING) ||
7292 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7299 * Writing any value to intr-mbox-0 clears PCI INTA# and
7300 * chip-internal interrupt pending events.
7301 * Writing non-zero to intr-mbox-0 additional tells the
7302 * NIC to stop sending us irqs, engaging "in-intr-handler"
7305 * Flush the mailbox to de-assert the IRQ immediately to prevent
7306 * spurious interrupts. The flush impacts performance but
7307 * excessive spurious interrupts can be worse in some cases.
7309 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7310 if (tg3_irq_sync(tp))
7312 sblk->status &= ~SD_STATUS_UPDATED;
7313 if (likely(tg3_has_work(tnapi))) {
7314 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7315 napi_schedule(&tnapi->napi);
7317 /* No work, shared interrupt perhaps? re-enable
7318 * interrupts, and flush that PCI write
7320 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7324 return IRQ_RETVAL(handled);
7327 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7329 struct tg3_napi *tnapi = dev_id;
7330 struct tg3 *tp = tnapi->tp;
7331 struct tg3_hw_status *sblk = tnapi->hw_status;
7332 unsigned int handled = 1;
7334 /* In INTx mode, it is possible for the interrupt to arrive at
7335 * the CPU before the status block posted prior to the interrupt.
7336 * Reading the PCI State register will confirm whether the
7337 * interrupt is ours and will flush the status block.
7339 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7340 if (tg3_flag(tp, CHIP_RESETTING) ||
7341 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7348 * writing any value to intr-mbox-0 clears PCI INTA# and
7349 * chip-internal interrupt pending events.
7350 * writing non-zero to intr-mbox-0 additional tells the
7351 * NIC to stop sending us irqs, engaging "in-intr-handler"
7354 * Flush the mailbox to de-assert the IRQ immediately to prevent
7355 * spurious interrupts. The flush impacts performance but
7356 * excessive spurious interrupts can be worse in some cases.
7358 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7361 * In a shared interrupt configuration, sometimes other devices'
7362 * interrupts will scream. We record the current status tag here
7363 * so that the above check can report that the screaming interrupts
7364 * are unhandled. Eventually they will be silenced.
7366 tnapi->last_irq_tag = sblk->status_tag;
7368 if (tg3_irq_sync(tp))
7371 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7373 napi_schedule(&tnapi->napi);
7376 return IRQ_RETVAL(handled);
7379 /* ISR for interrupt test */
7380 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7382 struct tg3_napi *tnapi = dev_id;
7383 struct tg3 *tp = tnapi->tp;
7384 struct tg3_hw_status *sblk = tnapi->hw_status;
7386 if ((sblk->status & SD_STATUS_UPDATED) ||
7387 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7388 tg3_disable_ints(tp);
7389 return IRQ_RETVAL(1);
7391 return IRQ_RETVAL(0);
7394 #ifdef CONFIG_NET_POLL_CONTROLLER
7395 static void tg3_poll_controller(struct net_device *dev)
7398 struct tg3 *tp = netdev_priv(dev);
7400 if (tg3_irq_sync(tp))
7403 for (i = 0; i < tp->irq_cnt; i++)
7404 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7408 static void tg3_tx_timeout(struct net_device *dev)
7410 struct tg3 *tp = netdev_priv(dev);
7412 if (netif_msg_tx_err(tp)) {
7413 netdev_err(dev, "transmit timed out, resetting\n");
7417 tg3_reset_task_schedule(tp);
7420 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7421 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7423 u32 base = (u32) mapping & 0xffffffff;
7425 return (base > 0xffffdcc0) && (base + len + 8 < base);
7428 /* Test for DMA addresses > 40-bit */
7429 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7432 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7433 if (tg3_flag(tp, 40BIT_DMA_BUG))
7434 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7441 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7442 dma_addr_t mapping, u32 len, u32 flags,
7445 txbd->addr_hi = ((u64) mapping >> 32);
7446 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7447 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7448 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7451 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7452 dma_addr_t map, u32 len, u32 flags,
7455 struct tg3 *tp = tnapi->tp;
7458 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7461 if (tg3_4g_overflow_test(map, len))
7464 if (tg3_40bit_overflow_test(tp, map, len))
7467 if (tp->dma_limit) {
7468 u32 prvidx = *entry;
7469 u32 tmp_flag = flags & ~TXD_FLAG_END;
7470 while (len > tp->dma_limit && *budget) {
7471 u32 frag_len = tp->dma_limit;
7472 len -= tp->dma_limit;
7474 /* Avoid the 8byte DMA problem */
7476 len += tp->dma_limit / 2;
7477 frag_len = tp->dma_limit / 2;
7480 tnapi->tx_buffers[*entry].fragmented = true;
7482 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7483 frag_len, tmp_flag, mss, vlan);
7486 *entry = NEXT_TX(*entry);
7493 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7494 len, flags, mss, vlan);
7496 *entry = NEXT_TX(*entry);
7499 tnapi->tx_buffers[prvidx].fragmented = false;
7503 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7504 len, flags, mss, vlan);
7505 *entry = NEXT_TX(*entry);
7511 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7514 struct sk_buff *skb;
7515 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7520 pci_unmap_single(tnapi->tp->pdev,
7521 dma_unmap_addr(txb, mapping),
7525 while (txb->fragmented) {
7526 txb->fragmented = false;
7527 entry = NEXT_TX(entry);
7528 txb = &tnapi->tx_buffers[entry];
7531 for (i = 0; i <= last; i++) {
7532 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7534 entry = NEXT_TX(entry);
7535 txb = &tnapi->tx_buffers[entry];
7537 pci_unmap_page(tnapi->tp->pdev,
7538 dma_unmap_addr(txb, mapping),
7539 skb_frag_size(frag), PCI_DMA_TODEVICE);
7541 while (txb->fragmented) {
7542 txb->fragmented = false;
7543 entry = NEXT_TX(entry);
7544 txb = &tnapi->tx_buffers[entry];
7549 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7550 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7551 struct sk_buff **pskb,
7552 u32 *entry, u32 *budget,
7553 u32 base_flags, u32 mss, u32 vlan)
7555 struct tg3 *tp = tnapi->tp;
7556 struct sk_buff *new_skb, *skb = *pskb;
7557 dma_addr_t new_addr = 0;
7560 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7561 new_skb = skb_copy(skb, GFP_ATOMIC);
7563 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7565 new_skb = skb_copy_expand(skb,
7566 skb_headroom(skb) + more_headroom,
7567 skb_tailroom(skb), GFP_ATOMIC);
7573 /* New SKB is guaranteed to be linear. */
7574 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7576 /* Make sure the mapping succeeded */
7577 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7578 dev_kfree_skb(new_skb);
7581 u32 save_entry = *entry;
7583 base_flags |= TXD_FLAG_END;
7585 tnapi->tx_buffers[*entry].skb = new_skb;
7586 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7589 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7590 new_skb->len, base_flags,
7592 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7593 dev_kfree_skb(new_skb);
7604 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7606 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7607 * TSO header is greater than 80 bytes.
7609 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7611 struct sk_buff *segs, *nskb;
7612 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7614 /* Estimate the number of fragments in the worst case */
7615 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7616 netif_stop_queue(tp->dev);
7618 /* netif_tx_stop_queue() must be done before checking
7619 * checking tx index in tg3_tx_avail() below, because in
7620 * tg3_tx(), we update tx index before checking for
7621 * netif_tx_queue_stopped().
7624 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7625 return NETDEV_TX_BUSY;
7627 netif_wake_queue(tp->dev);
7630 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7632 goto tg3_tso_bug_end;
7638 tg3_start_xmit(nskb, tp->dev);
7644 return NETDEV_TX_OK;
7647 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7648 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7650 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7652 struct tg3 *tp = netdev_priv(dev);
7653 u32 len, entry, base_flags, mss, vlan = 0;
7655 int i = -1, would_hit_hwbug;
7657 struct tg3_napi *tnapi;
7658 struct netdev_queue *txq;
7661 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7662 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7663 if (tg3_flag(tp, ENABLE_TSS))
7666 budget = tg3_tx_avail(tnapi);
7668 /* We are running in BH disabled context with netif_tx_lock
7669 * and TX reclaim runs via tp->napi.poll inside of a software
7670 * interrupt. Furthermore, IRQ processing runs lockless so we have
7671 * no IRQ context deadlocks to worry about either. Rejoice!
7673 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7674 if (!netif_tx_queue_stopped(txq)) {
7675 netif_tx_stop_queue(txq);
7677 /* This is a hard error, log it. */
7679 "BUG! Tx Ring full when queue awake!\n");
7681 return NETDEV_TX_BUSY;
7684 entry = tnapi->tx_prod;
7686 if (skb->ip_summed == CHECKSUM_PARTIAL)
7687 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7689 mss = skb_shinfo(skb)->gso_size;
7692 u32 tcp_opt_len, hdr_len;
7694 if (skb_header_cloned(skb) &&
7695 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7699 tcp_opt_len = tcp_optlen(skb);
7701 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7703 if (!skb_is_gso_v6(skb)) {
7705 iph->tot_len = htons(mss + hdr_len);
7708 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7709 tg3_flag(tp, TSO_BUG))
7710 return tg3_tso_bug(tp, skb);
7712 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7713 TXD_FLAG_CPU_POST_DMA);
7715 if (tg3_flag(tp, HW_TSO_1) ||
7716 tg3_flag(tp, HW_TSO_2) ||
7717 tg3_flag(tp, HW_TSO_3)) {
7718 tcp_hdr(skb)->check = 0;
7719 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7721 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7726 if (tg3_flag(tp, HW_TSO_3)) {
7727 mss |= (hdr_len & 0xc) << 12;
7729 base_flags |= 0x00000010;
7730 base_flags |= (hdr_len & 0x3e0) << 5;
7731 } else if (tg3_flag(tp, HW_TSO_2))
7732 mss |= hdr_len << 9;
7733 else if (tg3_flag(tp, HW_TSO_1) ||
7734 tg3_asic_rev(tp) == ASIC_REV_5705) {
7735 if (tcp_opt_len || iph->ihl > 5) {
7738 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7739 mss |= (tsflags << 11);
7742 if (tcp_opt_len || iph->ihl > 5) {
7745 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7746 base_flags |= tsflags << 12;
7751 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7752 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7753 base_flags |= TXD_FLAG_JMB_PKT;
7755 if (vlan_tx_tag_present(skb)) {
7756 base_flags |= TXD_FLAG_VLAN;
7757 vlan = vlan_tx_tag_get(skb);
7760 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7761 tg3_flag(tp, TX_TSTAMP_EN)) {
7762 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7763 base_flags |= TXD_FLAG_HWTSTAMP;
7766 len = skb_headlen(skb);
7768 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7769 if (pci_dma_mapping_error(tp->pdev, mapping))
7773 tnapi->tx_buffers[entry].skb = skb;
7774 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7776 would_hit_hwbug = 0;
7778 if (tg3_flag(tp, 5701_DMA_BUG))
7779 would_hit_hwbug = 1;
7781 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7782 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7784 would_hit_hwbug = 1;
7785 } else if (skb_shinfo(skb)->nr_frags > 0) {
7788 if (!tg3_flag(tp, HW_TSO_1) &&
7789 !tg3_flag(tp, HW_TSO_2) &&
7790 !tg3_flag(tp, HW_TSO_3))
7793 /* Now loop through additional data
7794 * fragments, and queue them.
7796 last = skb_shinfo(skb)->nr_frags - 1;
7797 for (i = 0; i <= last; i++) {
7798 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7800 len = skb_frag_size(frag);
7801 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7802 len, DMA_TO_DEVICE);
7804 tnapi->tx_buffers[entry].skb = NULL;
7805 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7807 if (dma_mapping_error(&tp->pdev->dev, mapping))
7811 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7813 ((i == last) ? TXD_FLAG_END : 0),
7815 would_hit_hwbug = 1;
7821 if (would_hit_hwbug) {
7822 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7824 /* If the workaround fails due to memory/mapping
7825 * failure, silently drop this packet.
7827 entry = tnapi->tx_prod;
7828 budget = tg3_tx_avail(tnapi);
7829 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7830 base_flags, mss, vlan))
7834 skb_tx_timestamp(skb);
7835 netdev_tx_sent_queue(txq, skb->len);
7837 /* Sync BD data before updating mailbox */
7840 /* Packets are ready, update Tx producer idx local and on card. */
7841 tw32_tx_mbox(tnapi->prodmbox, entry);
7843 tnapi->tx_prod = entry;
7844 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7845 netif_tx_stop_queue(txq);
7847 /* netif_tx_stop_queue() must be done before checking
7848 * checking tx index in tg3_tx_avail() below, because in
7849 * tg3_tx(), we update tx index before checking for
7850 * netif_tx_queue_stopped().
7853 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7854 netif_tx_wake_queue(txq);
7858 return NETDEV_TX_OK;
7861 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7862 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7867 return NETDEV_TX_OK;
7870 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7873 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7874 MAC_MODE_PORT_MODE_MASK);
7876 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7878 if (!tg3_flag(tp, 5705_PLUS))
7879 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7881 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7882 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7884 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7886 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7888 if (tg3_flag(tp, 5705_PLUS) ||
7889 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7890 tg3_asic_rev(tp) == ASIC_REV_5700)
7891 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7894 tw32(MAC_MODE, tp->mac_mode);
7898 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7900 u32 val, bmcr, mac_mode, ptest = 0;
7902 tg3_phy_toggle_apd(tp, false);
7903 tg3_phy_toggle_automdix(tp, false);
7905 if (extlpbk && tg3_phy_set_extloopbk(tp))
7908 bmcr = BMCR_FULLDPLX;
7913 bmcr |= BMCR_SPEED100;
7917 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7919 bmcr |= BMCR_SPEED100;
7922 bmcr |= BMCR_SPEED1000;
7927 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7928 tg3_readphy(tp, MII_CTRL1000, &val);
7929 val |= CTL1000_AS_MASTER |
7930 CTL1000_ENABLE_MASTER;
7931 tg3_writephy(tp, MII_CTRL1000, val);
7933 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7934 MII_TG3_FET_PTEST_TRIM_2;
7935 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7938 bmcr |= BMCR_LOOPBACK;
7940 tg3_writephy(tp, MII_BMCR, bmcr);
7942 /* The write needs to be flushed for the FETs */
7943 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7944 tg3_readphy(tp, MII_BMCR, &bmcr);
7948 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7949 tg3_asic_rev(tp) == ASIC_REV_5785) {
7950 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7951 MII_TG3_FET_PTEST_FRC_TX_LINK |
7952 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7954 /* The write needs to be flushed for the AC131 */
7955 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7958 /* Reset to prevent losing 1st rx packet intermittently */
7959 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7960 tg3_flag(tp, 5780_CLASS)) {
7961 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7963 tw32_f(MAC_RX_MODE, tp->rx_mode);
7966 mac_mode = tp->mac_mode &
7967 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7968 if (speed == SPEED_1000)
7969 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7971 mac_mode |= MAC_MODE_PORT_MODE_MII;
7973 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7974 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7976 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7977 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7978 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7979 mac_mode |= MAC_MODE_LINK_POLARITY;
7981 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7982 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7985 tw32(MAC_MODE, mac_mode);
7991 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7993 struct tg3 *tp = netdev_priv(dev);
7995 if (features & NETIF_F_LOOPBACK) {
7996 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7999 spin_lock_bh(&tp->lock);
8000 tg3_mac_loopback(tp, true);
8001 netif_carrier_on(tp->dev);
8002 spin_unlock_bh(&tp->lock);
8003 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8005 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8008 spin_lock_bh(&tp->lock);
8009 tg3_mac_loopback(tp, false);
8010 /* Force link status check */
8011 tg3_setup_phy(tp, true);
8012 spin_unlock_bh(&tp->lock);
8013 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8017 static netdev_features_t tg3_fix_features(struct net_device *dev,
8018 netdev_features_t features)
8020 struct tg3 *tp = netdev_priv(dev);
8022 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8023 features &= ~NETIF_F_ALL_TSO;
8028 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8030 netdev_features_t changed = dev->features ^ features;
8032 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8033 tg3_set_loopback(dev, features);
8038 static void tg3_rx_prodring_free(struct tg3 *tp,
8039 struct tg3_rx_prodring_set *tpr)
8043 if (tpr != &tp->napi[0].prodring) {
8044 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8045 i = (i + 1) & tp->rx_std_ring_mask)
8046 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8049 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8050 for (i = tpr->rx_jmb_cons_idx;
8051 i != tpr->rx_jmb_prod_idx;
8052 i = (i + 1) & tp->rx_jmb_ring_mask) {
8053 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8061 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8062 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8065 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8066 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8067 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8072 /* Initialize rx rings for packet processing.
8074 * The chip has been shut down and the driver detached from
8075 * the networking, so no interrupts or new tx packets will
8076 * end up in the driver. tp->{tx,}lock are held and thus
8079 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8080 struct tg3_rx_prodring_set *tpr)
8082 u32 i, rx_pkt_dma_sz;
8084 tpr->rx_std_cons_idx = 0;
8085 tpr->rx_std_prod_idx = 0;
8086 tpr->rx_jmb_cons_idx = 0;
8087 tpr->rx_jmb_prod_idx = 0;
8089 if (tpr != &tp->napi[0].prodring) {
8090 memset(&tpr->rx_std_buffers[0], 0,
8091 TG3_RX_STD_BUFF_RING_SIZE(tp));
8092 if (tpr->rx_jmb_buffers)
8093 memset(&tpr->rx_jmb_buffers[0], 0,
8094 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8098 /* Zero out all descriptors. */
8099 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8101 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8102 if (tg3_flag(tp, 5780_CLASS) &&
8103 tp->dev->mtu > ETH_DATA_LEN)
8104 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8105 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8107 /* Initialize invariants of the rings, we only set this
8108 * stuff once. This works because the card does not
8109 * write into the rx buffer posting rings.
8111 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8112 struct tg3_rx_buffer_desc *rxd;
8114 rxd = &tpr->rx_std[i];
8115 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8116 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8117 rxd->opaque = (RXD_OPAQUE_RING_STD |
8118 (i << RXD_OPAQUE_INDEX_SHIFT));
8121 /* Now allocate fresh SKBs for each rx ring. */
8122 for (i = 0; i < tp->rx_pending; i++) {
8123 unsigned int frag_size;
8125 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8127 netdev_warn(tp->dev,
8128 "Using a smaller RX standard ring. Only "
8129 "%d out of %d buffers were allocated "
8130 "successfully\n", i, tp->rx_pending);
8138 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8141 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8143 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8146 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8147 struct tg3_rx_buffer_desc *rxd;
8149 rxd = &tpr->rx_jmb[i].std;
8150 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8151 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8153 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8154 (i << RXD_OPAQUE_INDEX_SHIFT));
8157 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8158 unsigned int frag_size;
8160 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8162 netdev_warn(tp->dev,
8163 "Using a smaller RX jumbo ring. Only %d "
8164 "out of %d buffers were allocated "
8165 "successfully\n", i, tp->rx_jumbo_pending);
8168 tp->rx_jumbo_pending = i;
8177 tg3_rx_prodring_free(tp, tpr);
8181 static void tg3_rx_prodring_fini(struct tg3 *tp,
8182 struct tg3_rx_prodring_set *tpr)
8184 kfree(tpr->rx_std_buffers);
8185 tpr->rx_std_buffers = NULL;
8186 kfree(tpr->rx_jmb_buffers);
8187 tpr->rx_jmb_buffers = NULL;
8189 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8190 tpr->rx_std, tpr->rx_std_mapping);
8194 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8195 tpr->rx_jmb, tpr->rx_jmb_mapping);
8200 static int tg3_rx_prodring_init(struct tg3 *tp,
8201 struct tg3_rx_prodring_set *tpr)
8203 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8205 if (!tpr->rx_std_buffers)
8208 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8209 TG3_RX_STD_RING_BYTES(tp),
8210 &tpr->rx_std_mapping,
8215 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8216 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8218 if (!tpr->rx_jmb_buffers)
8221 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8222 TG3_RX_JMB_RING_BYTES(tp),
8223 &tpr->rx_jmb_mapping,
8232 tg3_rx_prodring_fini(tp, tpr);
8236 /* Free up pending packets in all rx/tx rings.
8238 * The chip has been shut down and the driver detached from
8239 * the networking, so no interrupts or new tx packets will
8240 * end up in the driver. tp->{tx,}lock is not held and we are not
8241 * in an interrupt context and thus may sleep.
8243 static void tg3_free_rings(struct tg3 *tp)
8247 for (j = 0; j < tp->irq_cnt; j++) {
8248 struct tg3_napi *tnapi = &tp->napi[j];
8250 tg3_rx_prodring_free(tp, &tnapi->prodring);
8252 if (!tnapi->tx_buffers)
8255 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8256 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8261 tg3_tx_skb_unmap(tnapi, i,
8262 skb_shinfo(skb)->nr_frags - 1);
8264 dev_kfree_skb_any(skb);
8266 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8270 /* Initialize tx/rx rings for packet processing.
8272 * The chip has been shut down and the driver detached from
8273 * the networking, so no interrupts or new tx packets will
8274 * end up in the driver. tp->{tx,}lock are held and thus
8277 static int tg3_init_rings(struct tg3 *tp)
8281 /* Free up all the SKBs. */
8284 for (i = 0; i < tp->irq_cnt; i++) {
8285 struct tg3_napi *tnapi = &tp->napi[i];
8287 tnapi->last_tag = 0;
8288 tnapi->last_irq_tag = 0;
8289 tnapi->hw_status->status = 0;
8290 tnapi->hw_status->status_tag = 0;
8291 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8296 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8298 tnapi->rx_rcb_ptr = 0;
8300 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8302 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8311 static void tg3_mem_tx_release(struct tg3 *tp)
8315 for (i = 0; i < tp->irq_max; i++) {
8316 struct tg3_napi *tnapi = &tp->napi[i];
8318 if (tnapi->tx_ring) {
8319 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8320 tnapi->tx_ring, tnapi->tx_desc_mapping);
8321 tnapi->tx_ring = NULL;
8324 kfree(tnapi->tx_buffers);
8325 tnapi->tx_buffers = NULL;
8329 static int tg3_mem_tx_acquire(struct tg3 *tp)
8332 struct tg3_napi *tnapi = &tp->napi[0];
8334 /* If multivector TSS is enabled, vector 0 does not handle
8335 * tx interrupts. Don't allocate any resources for it.
8337 if (tg3_flag(tp, ENABLE_TSS))
8340 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8341 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8342 TG3_TX_RING_SIZE, GFP_KERNEL);
8343 if (!tnapi->tx_buffers)
8346 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8348 &tnapi->tx_desc_mapping,
8350 if (!tnapi->tx_ring)
8357 tg3_mem_tx_release(tp);
8361 static void tg3_mem_rx_release(struct tg3 *tp)
8365 for (i = 0; i < tp->irq_max; i++) {
8366 struct tg3_napi *tnapi = &tp->napi[i];
8368 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8373 dma_free_coherent(&tp->pdev->dev,
8374 TG3_RX_RCB_RING_BYTES(tp),
8376 tnapi->rx_rcb_mapping);
8377 tnapi->rx_rcb = NULL;
8381 static int tg3_mem_rx_acquire(struct tg3 *tp)
8383 unsigned int i, limit;
8385 limit = tp->rxq_cnt;
8387 /* If RSS is enabled, we need a (dummy) producer ring
8388 * set on vector zero. This is the true hw prodring.
8390 if (tg3_flag(tp, ENABLE_RSS))
8393 for (i = 0; i < limit; i++) {
8394 struct tg3_napi *tnapi = &tp->napi[i];
8396 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8399 /* If multivector RSS is enabled, vector 0
8400 * does not handle rx or tx interrupts.
8401 * Don't allocate any resources for it.
8403 if (!i && tg3_flag(tp, ENABLE_RSS))
8406 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8407 TG3_RX_RCB_RING_BYTES(tp),
8408 &tnapi->rx_rcb_mapping,
8409 GFP_KERNEL | __GFP_ZERO);
8417 tg3_mem_rx_release(tp);
8422 * Must not be invoked with interrupt sources disabled and
8423 * the hardware shutdown down.
8425 static void tg3_free_consistent(struct tg3 *tp)
8429 for (i = 0; i < tp->irq_cnt; i++) {
8430 struct tg3_napi *tnapi = &tp->napi[i];
8432 if (tnapi->hw_status) {
8433 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8435 tnapi->status_mapping);
8436 tnapi->hw_status = NULL;
8440 tg3_mem_rx_release(tp);
8441 tg3_mem_tx_release(tp);
8444 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8445 tp->hw_stats, tp->stats_mapping);
8446 tp->hw_stats = NULL;
8451 * Must not be invoked with interrupt sources disabled and
8452 * the hardware shutdown down. Can sleep.
8454 static int tg3_alloc_consistent(struct tg3 *tp)
8458 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8459 sizeof(struct tg3_hw_stats),
8461 GFP_KERNEL | __GFP_ZERO);
8465 for (i = 0; i < tp->irq_cnt; i++) {
8466 struct tg3_napi *tnapi = &tp->napi[i];
8467 struct tg3_hw_status *sblk;
8469 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8471 &tnapi->status_mapping,
8472 GFP_KERNEL | __GFP_ZERO);
8473 if (!tnapi->hw_status)
8476 sblk = tnapi->hw_status;
8478 if (tg3_flag(tp, ENABLE_RSS)) {
8479 u16 *prodptr = NULL;
8482 * When RSS is enabled, the status block format changes
8483 * slightly. The "rx_jumbo_consumer", "reserved",
8484 * and "rx_mini_consumer" members get mapped to the
8485 * other three rx return ring producer indexes.
8489 prodptr = &sblk->idx[0].rx_producer;
8492 prodptr = &sblk->rx_jumbo_consumer;
8495 prodptr = &sblk->reserved;
8498 prodptr = &sblk->rx_mini_consumer;
8501 tnapi->rx_rcb_prod_idx = prodptr;
8503 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8507 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8513 tg3_free_consistent(tp);
8517 #define MAX_WAIT_CNT 1000
8519 /* To stop a block, clear the enable bit and poll till it
8520 * clears. tp->lock is held.
8522 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8527 if (tg3_flag(tp, 5705_PLUS)) {
8534 /* We can't enable/disable these bits of the
8535 * 5705/5750, just say success.
8548 for (i = 0; i < MAX_WAIT_CNT; i++) {
8551 if ((val & enable_bit) == 0)
8555 if (i == MAX_WAIT_CNT && !silent) {
8556 dev_err(&tp->pdev->dev,
8557 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8565 /* tp->lock is held. */
8566 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8570 tg3_disable_ints(tp);
8572 tp->rx_mode &= ~RX_MODE_ENABLE;
8573 tw32_f(MAC_RX_MODE, tp->rx_mode);
8576 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8577 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8578 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8579 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8580 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8581 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8583 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8584 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8585 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8586 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8587 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8588 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8589 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8591 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8592 tw32_f(MAC_MODE, tp->mac_mode);
8595 tp->tx_mode &= ~TX_MODE_ENABLE;
8596 tw32_f(MAC_TX_MODE, tp->tx_mode);
8598 for (i = 0; i < MAX_WAIT_CNT; i++) {
8600 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8603 if (i >= MAX_WAIT_CNT) {
8604 dev_err(&tp->pdev->dev,
8605 "%s timed out, TX_MODE_ENABLE will not clear "
8606 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8610 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8611 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8612 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8614 tw32(FTQ_RESET, 0xffffffff);
8615 tw32(FTQ_RESET, 0x00000000);
8617 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8618 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8620 for (i = 0; i < tp->irq_cnt; i++) {
8621 struct tg3_napi *tnapi = &tp->napi[i];
8622 if (tnapi->hw_status)
8623 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8629 /* Save PCI command register before chip reset */
8630 static void tg3_save_pci_state(struct tg3 *tp)
8632 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8635 /* Restore PCI state after chip reset */
8636 static void tg3_restore_pci_state(struct tg3 *tp)
8640 /* Re-enable indirect register accesses. */
8641 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8642 tp->misc_host_ctrl);
8644 /* Set MAX PCI retry to zero. */
8645 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8646 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8647 tg3_flag(tp, PCIX_MODE))
8648 val |= PCISTATE_RETRY_SAME_DMA;
8649 /* Allow reads and writes to the APE register and memory space. */
8650 if (tg3_flag(tp, ENABLE_APE))
8651 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8652 PCISTATE_ALLOW_APE_SHMEM_WR |
8653 PCISTATE_ALLOW_APE_PSPACE_WR;
8654 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8656 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8658 if (!tg3_flag(tp, PCI_EXPRESS)) {
8659 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8660 tp->pci_cacheline_sz);
8661 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8665 /* Make sure PCI-X relaxed ordering bit is clear. */
8666 if (tg3_flag(tp, PCIX_MODE)) {
8669 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8671 pcix_cmd &= ~PCI_X_CMD_ERO;
8672 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8676 if (tg3_flag(tp, 5780_CLASS)) {
8678 /* Chip reset on 5780 will reset MSI enable bit,
8679 * so need to restore it.
8681 if (tg3_flag(tp, USING_MSI)) {
8684 pci_read_config_word(tp->pdev,
8685 tp->msi_cap + PCI_MSI_FLAGS,
8687 pci_write_config_word(tp->pdev,
8688 tp->msi_cap + PCI_MSI_FLAGS,
8689 ctrl | PCI_MSI_FLAGS_ENABLE);
8690 val = tr32(MSGINT_MODE);
8691 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8696 /* tp->lock is held. */
8697 static int tg3_chip_reset(struct tg3 *tp)
8700 void (*write_op)(struct tg3 *, u32, u32);
8705 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8707 /* No matching tg3_nvram_unlock() after this because
8708 * chip reset below will undo the nvram lock.
8710 tp->nvram_lock_cnt = 0;
8712 /* GRC_MISC_CFG core clock reset will clear the memory
8713 * enable bit in PCI register 4 and the MSI enable bit
8714 * on some chips, so we save relevant registers here.
8716 tg3_save_pci_state(tp);
8718 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8719 tg3_flag(tp, 5755_PLUS))
8720 tw32(GRC_FASTBOOT_PC, 0);
8723 * We must avoid the readl() that normally takes place.
8724 * It locks machines, causes machine checks, and other
8725 * fun things. So, temporarily disable the 5701
8726 * hardware workaround, while we do the reset.
8728 write_op = tp->write32;
8729 if (write_op == tg3_write_flush_reg32)
8730 tp->write32 = tg3_write32;
8732 /* Prevent the irq handler from reading or writing PCI registers
8733 * during chip reset when the memory enable bit in the PCI command
8734 * register may be cleared. The chip does not generate interrupt
8735 * at this time, but the irq handler may still be called due to irq
8736 * sharing or irqpoll.
8738 tg3_flag_set(tp, CHIP_RESETTING);
8739 for (i = 0; i < tp->irq_cnt; i++) {
8740 struct tg3_napi *tnapi = &tp->napi[i];
8741 if (tnapi->hw_status) {
8742 tnapi->hw_status->status = 0;
8743 tnapi->hw_status->status_tag = 0;
8745 tnapi->last_tag = 0;
8746 tnapi->last_irq_tag = 0;
8750 for (i = 0; i < tp->irq_cnt; i++)
8751 synchronize_irq(tp->napi[i].irq_vec);
8753 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8754 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8755 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8759 val = GRC_MISC_CFG_CORECLK_RESET;
8761 if (tg3_flag(tp, PCI_EXPRESS)) {
8762 /* Force PCIe 1.0a mode */
8763 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8764 !tg3_flag(tp, 57765_PLUS) &&
8765 tr32(TG3_PCIE_PHY_TSTCTL) ==
8766 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8767 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8769 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8770 tw32(GRC_MISC_CFG, (1 << 29));
8775 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8776 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8777 tw32(GRC_VCPU_EXT_CTRL,
8778 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8781 /* Manage gphy power for all CPMU absent PCIe devices. */
8782 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8783 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8785 tw32(GRC_MISC_CFG, val);
8787 /* restore 5701 hardware bug workaround write method */
8788 tp->write32 = write_op;
8790 /* Unfortunately, we have to delay before the PCI read back.
8791 * Some 575X chips even will not respond to a PCI cfg access
8792 * when the reset command is given to the chip.
8794 * How do these hardware designers expect things to work
8795 * properly if the PCI write is posted for a long period
8796 * of time? It is always necessary to have some method by
8797 * which a register read back can occur to push the write
8798 * out which does the reset.
8800 * For most tg3 variants the trick below was working.
8805 /* Flush PCI posted writes. The normal MMIO registers
8806 * are inaccessible at this time so this is the only
8807 * way to make this reliably (actually, this is no longer
8808 * the case, see above). I tried to use indirect
8809 * register read/write but this upset some 5701 variants.
8811 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8815 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8818 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8822 /* Wait for link training to complete. */
8823 for (j = 0; j < 5000; j++)
8826 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8827 pci_write_config_dword(tp->pdev, 0xc4,
8828 cfg_val | (1 << 15));
8831 /* Clear the "no snoop" and "relaxed ordering" bits. */
8832 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8834 * Older PCIe devices only support the 128 byte
8835 * MPS setting. Enforce the restriction.
8837 if (!tg3_flag(tp, CPMU_PRESENT))
8838 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8839 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8841 /* Clear error status */
8842 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8843 PCI_EXP_DEVSTA_CED |
8844 PCI_EXP_DEVSTA_NFED |
8845 PCI_EXP_DEVSTA_FED |
8846 PCI_EXP_DEVSTA_URD);
8849 tg3_restore_pci_state(tp);
8851 tg3_flag_clear(tp, CHIP_RESETTING);
8852 tg3_flag_clear(tp, ERROR_PROCESSED);
8855 if (tg3_flag(tp, 5780_CLASS))
8856 val = tr32(MEMARB_MODE);
8857 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8859 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8861 tw32(0x5000, 0x400);
8864 if (tg3_flag(tp, IS_SSB_CORE)) {
8866 * BCM4785: In order to avoid repercussions from using
8867 * potentially defective internal ROM, stop the Rx RISC CPU,
8868 * which is not required.
8871 tg3_halt_cpu(tp, RX_CPU_BASE);
8874 tw32(GRC_MODE, tp->grc_mode);
8876 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8879 tw32(0xc4, val | (1 << 15));
8882 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8883 tg3_asic_rev(tp) == ASIC_REV_5705) {
8884 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8885 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8886 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8887 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8890 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8891 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8893 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8894 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8899 tw32_f(MAC_MODE, val);
8902 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8904 err = tg3_poll_fw(tp);
8910 if (tg3_flag(tp, PCI_EXPRESS) &&
8911 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8912 tg3_asic_rev(tp) != ASIC_REV_5785 &&
8913 !tg3_flag(tp, 57765_PLUS)) {
8916 tw32(0x7c00, val | (1 << 25));
8919 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8920 val = tr32(TG3_CPMU_CLCK_ORIDE);
8921 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8924 /* Reprobe ASF enable state. */
8925 tg3_flag_clear(tp, ENABLE_ASF);
8926 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
8927 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
8929 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8930 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8931 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8934 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8935 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8936 tg3_flag_set(tp, ENABLE_ASF);
8937 tp->last_event_jiffies = jiffies;
8938 if (tg3_flag(tp, 5750_PLUS))
8939 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8941 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
8942 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
8943 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
8944 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
8945 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
8952 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8953 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8955 /* tp->lock is held. */
8956 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
8962 tg3_write_sig_pre_reset(tp, kind);
8964 tg3_abort_hw(tp, silent);
8965 err = tg3_chip_reset(tp);
8967 __tg3_set_mac_addr(tp, false);
8969 tg3_write_sig_legacy(tp, kind);
8970 tg3_write_sig_post_reset(tp, kind);
8973 /* Save the stats across chip resets... */
8974 tg3_get_nstats(tp, &tp->net_stats_prev);
8975 tg3_get_estats(tp, &tp->estats_prev);
8977 /* And make sure the next sample is new data */
8978 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8987 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8989 struct tg3 *tp = netdev_priv(dev);
8990 struct sockaddr *addr = p;
8992 bool skip_mac_1 = false;
8994 if (!is_valid_ether_addr(addr->sa_data))
8995 return -EADDRNOTAVAIL;
8997 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8999 if (!netif_running(dev))
9002 if (tg3_flag(tp, ENABLE_ASF)) {
9003 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9005 addr0_high = tr32(MAC_ADDR_0_HIGH);
9006 addr0_low = tr32(MAC_ADDR_0_LOW);
9007 addr1_high = tr32(MAC_ADDR_1_HIGH);
9008 addr1_low = tr32(MAC_ADDR_1_LOW);
9010 /* Skip MAC addr 1 if ASF is using it. */
9011 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9012 !(addr1_high == 0 && addr1_low == 0))
9015 spin_lock_bh(&tp->lock);
9016 __tg3_set_mac_addr(tp, skip_mac_1);
9017 spin_unlock_bh(&tp->lock);
9022 /* tp->lock is held. */
9023 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9024 dma_addr_t mapping, u32 maxlen_flags,
9028 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9029 ((u64) mapping >> 32));
9031 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9032 ((u64) mapping & 0xffffffff));
9034 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9037 if (!tg3_flag(tp, 5705_PLUS))
9039 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9044 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9048 if (!tg3_flag(tp, ENABLE_TSS)) {
9049 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9050 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9051 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9053 tw32(HOSTCC_TXCOL_TICKS, 0);
9054 tw32(HOSTCC_TXMAX_FRAMES, 0);
9055 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9057 for (; i < tp->txq_cnt; i++) {
9060 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9061 tw32(reg, ec->tx_coalesce_usecs);
9062 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9063 tw32(reg, ec->tx_max_coalesced_frames);
9064 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9065 tw32(reg, ec->tx_max_coalesced_frames_irq);
9069 for (; i < tp->irq_max - 1; i++) {
9070 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9071 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9072 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9076 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9079 u32 limit = tp->rxq_cnt;
9081 if (!tg3_flag(tp, ENABLE_RSS)) {
9082 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9083 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9084 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9087 tw32(HOSTCC_RXCOL_TICKS, 0);
9088 tw32(HOSTCC_RXMAX_FRAMES, 0);
9089 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9092 for (; i < limit; i++) {
9095 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9096 tw32(reg, ec->rx_coalesce_usecs);
9097 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9098 tw32(reg, ec->rx_max_coalesced_frames);
9099 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9100 tw32(reg, ec->rx_max_coalesced_frames_irq);
9103 for (; i < tp->irq_max - 1; i++) {
9104 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9105 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9106 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9110 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9112 tg3_coal_tx_init(tp, ec);
9113 tg3_coal_rx_init(tp, ec);
9115 if (!tg3_flag(tp, 5705_PLUS)) {
9116 u32 val = ec->stats_block_coalesce_usecs;
9118 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9119 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9124 tw32(HOSTCC_STAT_COAL_TICKS, val);
9128 /* tp->lock is held. */
9129 static void tg3_rings_reset(struct tg3 *tp)
9132 u32 stblk, txrcb, rxrcb, limit;
9133 struct tg3_napi *tnapi = &tp->napi[0];
9135 /* Disable all transmit rings but the first. */
9136 if (!tg3_flag(tp, 5705_PLUS))
9137 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9138 else if (tg3_flag(tp, 5717_PLUS))
9139 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9140 else if (tg3_flag(tp, 57765_CLASS) ||
9141 tg3_asic_rev(tp) == ASIC_REV_5762)
9142 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9144 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9146 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9147 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9148 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9149 BDINFO_FLAGS_DISABLED);
9152 /* Disable all receive return rings but the first. */
9153 if (tg3_flag(tp, 5717_PLUS))
9154 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9155 else if (!tg3_flag(tp, 5705_PLUS))
9156 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9157 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9158 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9159 tg3_flag(tp, 57765_CLASS))
9160 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9162 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9164 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9165 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9166 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9167 BDINFO_FLAGS_DISABLED);
9169 /* Disable interrupts */
9170 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9171 tp->napi[0].chk_msi_cnt = 0;
9172 tp->napi[0].last_rx_cons = 0;
9173 tp->napi[0].last_tx_cons = 0;
9175 /* Zero mailbox registers. */
9176 if (tg3_flag(tp, SUPPORT_MSIX)) {
9177 for (i = 1; i < tp->irq_max; i++) {
9178 tp->napi[i].tx_prod = 0;
9179 tp->napi[i].tx_cons = 0;
9180 if (tg3_flag(tp, ENABLE_TSS))
9181 tw32_mailbox(tp->napi[i].prodmbox, 0);
9182 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9183 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9184 tp->napi[i].chk_msi_cnt = 0;
9185 tp->napi[i].last_rx_cons = 0;
9186 tp->napi[i].last_tx_cons = 0;
9188 if (!tg3_flag(tp, ENABLE_TSS))
9189 tw32_mailbox(tp->napi[0].prodmbox, 0);
9191 tp->napi[0].tx_prod = 0;
9192 tp->napi[0].tx_cons = 0;
9193 tw32_mailbox(tp->napi[0].prodmbox, 0);
9194 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9197 /* Make sure the NIC-based send BD rings are disabled. */
9198 if (!tg3_flag(tp, 5705_PLUS)) {
9199 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9200 for (i = 0; i < 16; i++)
9201 tw32_tx_mbox(mbox + i * 8, 0);
9204 txrcb = NIC_SRAM_SEND_RCB;
9205 rxrcb = NIC_SRAM_RCV_RET_RCB;
9207 /* Clear status block in ram. */
9208 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9210 /* Set status block DMA address */
9211 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9212 ((u64) tnapi->status_mapping >> 32));
9213 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9214 ((u64) tnapi->status_mapping & 0xffffffff));
9216 if (tnapi->tx_ring) {
9217 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9218 (TG3_TX_RING_SIZE <<
9219 BDINFO_FLAGS_MAXLEN_SHIFT),
9220 NIC_SRAM_TX_BUFFER_DESC);
9221 txrcb += TG3_BDINFO_SIZE;
9224 if (tnapi->rx_rcb) {
9225 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9226 (tp->rx_ret_ring_mask + 1) <<
9227 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9228 rxrcb += TG3_BDINFO_SIZE;
9231 stblk = HOSTCC_STATBLCK_RING1;
9233 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9234 u64 mapping = (u64)tnapi->status_mapping;
9235 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9236 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9238 /* Clear status block in ram. */
9239 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9241 if (tnapi->tx_ring) {
9242 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9243 (TG3_TX_RING_SIZE <<
9244 BDINFO_FLAGS_MAXLEN_SHIFT),
9245 NIC_SRAM_TX_BUFFER_DESC);
9246 txrcb += TG3_BDINFO_SIZE;
9249 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9250 ((tp->rx_ret_ring_mask + 1) <<
9251 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9254 rxrcb += TG3_BDINFO_SIZE;
9258 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9260 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9262 if (!tg3_flag(tp, 5750_PLUS) ||
9263 tg3_flag(tp, 5780_CLASS) ||
9264 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9265 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9266 tg3_flag(tp, 57765_PLUS))
9267 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9268 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9269 tg3_asic_rev(tp) == ASIC_REV_5787)
9270 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9272 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9274 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9275 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9277 val = min(nic_rep_thresh, host_rep_thresh);
9278 tw32(RCVBDI_STD_THRESH, val);
9280 if (tg3_flag(tp, 57765_PLUS))
9281 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9283 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9286 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9288 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9290 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9291 tw32(RCVBDI_JUMBO_THRESH, val);
9293 if (tg3_flag(tp, 57765_PLUS))
9294 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9297 static inline u32 calc_crc(unsigned char *buf, int len)
9305 for (j = 0; j < len; j++) {
9308 for (k = 0; k < 8; k++) {
9321 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9323 /* accept or reject all multicast frames */
9324 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9325 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9326 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9327 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9330 static void __tg3_set_rx_mode(struct net_device *dev)
9332 struct tg3 *tp = netdev_priv(dev);
9335 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9336 RX_MODE_KEEP_VLAN_TAG);
9338 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9339 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9342 if (!tg3_flag(tp, ENABLE_ASF))
9343 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9346 if (dev->flags & IFF_PROMISC) {
9347 /* Promiscuous mode. */
9348 rx_mode |= RX_MODE_PROMISC;
9349 } else if (dev->flags & IFF_ALLMULTI) {
9350 /* Accept all multicast. */
9351 tg3_set_multi(tp, 1);
9352 } else if (netdev_mc_empty(dev)) {
9353 /* Reject all multicast. */
9354 tg3_set_multi(tp, 0);
9356 /* Accept one or more multicast(s). */
9357 struct netdev_hw_addr *ha;
9358 u32 mc_filter[4] = { 0, };
9363 netdev_for_each_mc_addr(ha, dev) {
9364 crc = calc_crc(ha->addr, ETH_ALEN);
9366 regidx = (bit & 0x60) >> 5;
9368 mc_filter[regidx] |= (1 << bit);
9371 tw32(MAC_HASH_REG_0, mc_filter[0]);
9372 tw32(MAC_HASH_REG_1, mc_filter[1]);
9373 tw32(MAC_HASH_REG_2, mc_filter[2]);
9374 tw32(MAC_HASH_REG_3, mc_filter[3]);
9377 if (rx_mode != tp->rx_mode) {
9378 tp->rx_mode = rx_mode;
9379 tw32_f(MAC_RX_MODE, rx_mode);
9384 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9388 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9389 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9392 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9396 if (!tg3_flag(tp, SUPPORT_MSIX))
9399 if (tp->rxq_cnt == 1) {
9400 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9404 /* Validate table against current IRQ count */
9405 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9406 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9410 if (i != TG3_RSS_INDIR_TBL_SIZE)
9411 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9414 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9417 u32 reg = MAC_RSS_INDIR_TBL_0;
9419 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9420 u32 val = tp->rss_ind_tbl[i];
9422 for (; i % 8; i++) {
9424 val |= tp->rss_ind_tbl[i];
9431 /* tp->lock is held. */
9432 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9434 u32 val, rdmac_mode;
9436 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9438 tg3_disable_ints(tp);
9442 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9444 if (tg3_flag(tp, INIT_COMPLETE))
9445 tg3_abort_hw(tp, 1);
9447 /* Enable MAC control of LPI */
9448 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9449 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9450 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9451 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9452 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9454 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9456 tw32_f(TG3_CPMU_EEE_CTRL,
9457 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9459 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9460 TG3_CPMU_EEEMD_LPI_IN_TX |
9461 TG3_CPMU_EEEMD_LPI_IN_RX |
9462 TG3_CPMU_EEEMD_EEE_ENABLE;
9464 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9465 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9467 if (tg3_flag(tp, ENABLE_APE))
9468 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9470 tw32_f(TG3_CPMU_EEE_MODE, val);
9472 tw32_f(TG3_CPMU_EEE_DBTMR1,
9473 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9474 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9476 tw32_f(TG3_CPMU_EEE_DBTMR2,
9477 TG3_CPMU_DBTMR2_APE_TX_2047US |
9478 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9481 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9482 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9483 tg3_phy_pull_config(tp);
9484 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9490 err = tg3_chip_reset(tp);
9494 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9496 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9497 val = tr32(TG3_CPMU_CTRL);
9498 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9499 tw32(TG3_CPMU_CTRL, val);
9501 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9502 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9503 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9504 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9506 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9507 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9508 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9509 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9511 val = tr32(TG3_CPMU_HST_ACC);
9512 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9513 val |= CPMU_HST_ACC_MACCLK_6_25;
9514 tw32(TG3_CPMU_HST_ACC, val);
9517 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9518 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9519 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9520 PCIE_PWR_MGMT_L1_THRESH_4MS;
9521 tw32(PCIE_PWR_MGMT_THRESH, val);
9523 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9524 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9526 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9528 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9529 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9532 if (tg3_flag(tp, L1PLLPD_EN)) {
9533 u32 grc_mode = tr32(GRC_MODE);
9535 /* Access the lower 1K of PL PCIE block registers. */
9536 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9537 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9539 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9540 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9541 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9543 tw32(GRC_MODE, grc_mode);
9546 if (tg3_flag(tp, 57765_CLASS)) {
9547 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9548 u32 grc_mode = tr32(GRC_MODE);
9550 /* Access the lower 1K of PL PCIE block registers. */
9551 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9552 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9554 val = tr32(TG3_PCIE_TLDLPL_PORT +
9555 TG3_PCIE_PL_LO_PHYCTL5);
9556 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9557 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9559 tw32(GRC_MODE, grc_mode);
9562 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9565 /* Fix transmit hangs */
9566 val = tr32(TG3_CPMU_PADRNG_CTL);
9567 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9568 tw32(TG3_CPMU_PADRNG_CTL, val);
9570 grc_mode = tr32(GRC_MODE);
9572 /* Access the lower 1K of DL PCIE block registers. */
9573 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9574 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9576 val = tr32(TG3_PCIE_TLDLPL_PORT +
9577 TG3_PCIE_DL_LO_FTSMAX);
9578 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9579 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9580 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9582 tw32(GRC_MODE, grc_mode);
9585 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9586 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9587 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9588 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9591 /* This works around an issue with Athlon chipsets on
9592 * B3 tigon3 silicon. This bit has no effect on any
9593 * other revision. But do not set this on PCI Express
9594 * chips and don't even touch the clocks if the CPMU is present.
9596 if (!tg3_flag(tp, CPMU_PRESENT)) {
9597 if (!tg3_flag(tp, PCI_EXPRESS))
9598 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9599 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9602 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9603 tg3_flag(tp, PCIX_MODE)) {
9604 val = tr32(TG3PCI_PCISTATE);
9605 val |= PCISTATE_RETRY_SAME_DMA;
9606 tw32(TG3PCI_PCISTATE, val);
9609 if (tg3_flag(tp, ENABLE_APE)) {
9610 /* Allow reads and writes to the
9611 * APE register and memory space.
9613 val = tr32(TG3PCI_PCISTATE);
9614 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9615 PCISTATE_ALLOW_APE_SHMEM_WR |
9616 PCISTATE_ALLOW_APE_PSPACE_WR;
9617 tw32(TG3PCI_PCISTATE, val);
9620 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9621 /* Enable some hw fixes. */
9622 val = tr32(TG3PCI_MSI_DATA);
9623 val |= (1 << 26) | (1 << 28) | (1 << 29);
9624 tw32(TG3PCI_MSI_DATA, val);
9627 /* Descriptor ring init may make accesses to the
9628 * NIC SRAM area to setup the TX descriptors, so we
9629 * can only do this after the hardware has been
9630 * successfully reset.
9632 err = tg3_init_rings(tp);
9636 if (tg3_flag(tp, 57765_PLUS)) {
9637 val = tr32(TG3PCI_DMA_RW_CTRL) &
9638 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9639 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9640 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9641 if (!tg3_flag(tp, 57765_CLASS) &&
9642 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9643 tg3_asic_rev(tp) != ASIC_REV_5762)
9644 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9645 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9646 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9647 tg3_asic_rev(tp) != ASIC_REV_5761) {
9648 /* This value is determined during the probe time DMA
9649 * engine test, tg3_test_dma.
9651 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9654 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9655 GRC_MODE_4X_NIC_SEND_RINGS |
9656 GRC_MODE_NO_TX_PHDR_CSUM |
9657 GRC_MODE_NO_RX_PHDR_CSUM);
9658 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9660 /* Pseudo-header checksum is done by hardware logic and not
9661 * the offload processers, so make the chip do the pseudo-
9662 * header checksums on receive. For transmit it is more
9663 * convenient to do the pseudo-header checksum in software
9664 * as Linux does that on transmit for us in all cases.
9666 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9668 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9670 tw32(TG3_RX_PTP_CTL,
9671 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9673 if (tg3_flag(tp, PTP_CAPABLE))
9674 val |= GRC_MODE_TIME_SYNC_ENABLE;
9676 tw32(GRC_MODE, tp->grc_mode | val);
9678 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9679 val = tr32(GRC_MISC_CFG);
9681 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9682 tw32(GRC_MISC_CFG, val);
9684 /* Initialize MBUF/DESC pool. */
9685 if (tg3_flag(tp, 5750_PLUS)) {
9687 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9688 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9689 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9690 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9692 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9693 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9694 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9695 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9698 fw_len = tp->fw_len;
9699 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9700 tw32(BUFMGR_MB_POOL_ADDR,
9701 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9702 tw32(BUFMGR_MB_POOL_SIZE,
9703 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9706 if (tp->dev->mtu <= ETH_DATA_LEN) {
9707 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9708 tp->bufmgr_config.mbuf_read_dma_low_water);
9709 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9710 tp->bufmgr_config.mbuf_mac_rx_low_water);
9711 tw32(BUFMGR_MB_HIGH_WATER,
9712 tp->bufmgr_config.mbuf_high_water);
9714 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9715 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9716 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9717 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9718 tw32(BUFMGR_MB_HIGH_WATER,
9719 tp->bufmgr_config.mbuf_high_water_jumbo);
9721 tw32(BUFMGR_DMA_LOW_WATER,
9722 tp->bufmgr_config.dma_low_water);
9723 tw32(BUFMGR_DMA_HIGH_WATER,
9724 tp->bufmgr_config.dma_high_water);
9726 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9727 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9728 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9729 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9730 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9731 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9732 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9733 tw32(BUFMGR_MODE, val);
9734 for (i = 0; i < 2000; i++) {
9735 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9740 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9744 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9745 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9747 tg3_setup_rxbd_thresholds(tp);
9749 /* Initialize TG3_BDINFO's at:
9750 * RCVDBDI_STD_BD: standard eth size rx ring
9751 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9752 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9755 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9756 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9757 * ring attribute flags
9758 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9760 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9761 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9763 * The size of each ring is fixed in the firmware, but the location is
9766 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9767 ((u64) tpr->rx_std_mapping >> 32));
9768 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9769 ((u64) tpr->rx_std_mapping & 0xffffffff));
9770 if (!tg3_flag(tp, 5717_PLUS))
9771 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9772 NIC_SRAM_RX_BUFFER_DESC);
9774 /* Disable the mini ring */
9775 if (!tg3_flag(tp, 5705_PLUS))
9776 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9777 BDINFO_FLAGS_DISABLED);
9779 /* Program the jumbo buffer descriptor ring control
9780 * blocks on those devices that have them.
9782 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9783 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9785 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9786 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9787 ((u64) tpr->rx_jmb_mapping >> 32));
9788 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9789 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9790 val = TG3_RX_JMB_RING_SIZE(tp) <<
9791 BDINFO_FLAGS_MAXLEN_SHIFT;
9792 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9793 val | BDINFO_FLAGS_USE_EXT_RECV);
9794 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9795 tg3_flag(tp, 57765_CLASS) ||
9796 tg3_asic_rev(tp) == ASIC_REV_5762)
9797 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9798 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9800 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9801 BDINFO_FLAGS_DISABLED);
9804 if (tg3_flag(tp, 57765_PLUS)) {
9805 val = TG3_RX_STD_RING_SIZE(tp);
9806 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9807 val |= (TG3_RX_STD_DMA_SZ << 2);
9809 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9811 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9813 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9815 tpr->rx_std_prod_idx = tp->rx_pending;
9816 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9818 tpr->rx_jmb_prod_idx =
9819 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9820 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9822 tg3_rings_reset(tp);
9824 /* Initialize MAC address and backoff seed. */
9825 __tg3_set_mac_addr(tp, false);
9827 /* MTU + ethernet header + FCS + optional VLAN tag */
9828 tw32(MAC_RX_MTU_SIZE,
9829 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9831 /* The slot time is changed by tg3_setup_phy if we
9832 * run at gigabit with half duplex.
9834 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9835 (6 << TX_LENGTHS_IPG_SHIFT) |
9836 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9838 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9839 tg3_asic_rev(tp) == ASIC_REV_5762)
9840 val |= tr32(MAC_TX_LENGTHS) &
9841 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9842 TX_LENGTHS_CNT_DWN_VAL_MSK);
9844 tw32(MAC_TX_LENGTHS, val);
9846 /* Receive rules. */
9847 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9848 tw32(RCVLPC_CONFIG, 0x0181);
9850 /* Calculate RDMAC_MODE setting early, we need it to determine
9851 * the RCVLPC_STATE_ENABLE mask.
9853 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9854 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9855 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9856 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9857 RDMAC_MODE_LNGREAD_ENAB);
9859 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9860 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9862 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9863 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9864 tg3_asic_rev(tp) == ASIC_REV_57780)
9865 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9866 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9867 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9869 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9870 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9871 if (tg3_flag(tp, TSO_CAPABLE) &&
9872 tg3_asic_rev(tp) == ASIC_REV_5705) {
9873 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9874 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9875 !tg3_flag(tp, IS_5788)) {
9876 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9880 if (tg3_flag(tp, PCI_EXPRESS))
9881 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9883 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9885 if (tp->dev->mtu <= ETH_DATA_LEN) {
9886 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9887 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9891 if (tg3_flag(tp, HW_TSO_1) ||
9892 tg3_flag(tp, HW_TSO_2) ||
9893 tg3_flag(tp, HW_TSO_3))
9894 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9896 if (tg3_flag(tp, 57765_PLUS) ||
9897 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9898 tg3_asic_rev(tp) == ASIC_REV_57780)
9899 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9901 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9902 tg3_asic_rev(tp) == ASIC_REV_5762)
9903 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9905 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9906 tg3_asic_rev(tp) == ASIC_REV_5784 ||
9907 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9908 tg3_asic_rev(tp) == ASIC_REV_57780 ||
9909 tg3_flag(tp, 57765_PLUS)) {
9912 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9913 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9915 tgtreg = TG3_RDMA_RSRVCTRL_REG;
9918 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9919 tg3_asic_rev(tp) == ASIC_REV_5762) {
9920 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9921 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9922 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9923 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9924 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9925 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9927 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9930 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9931 tg3_asic_rev(tp) == ASIC_REV_5720 ||
9932 tg3_asic_rev(tp) == ASIC_REV_5762) {
9935 if (tg3_asic_rev(tp) == ASIC_REV_5762)
9936 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9938 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9942 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9943 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9946 /* Receive/send statistics. */
9947 if (tg3_flag(tp, 5750_PLUS)) {
9948 val = tr32(RCVLPC_STATS_ENABLE);
9949 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9950 tw32(RCVLPC_STATS_ENABLE, val);
9951 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9952 tg3_flag(tp, TSO_CAPABLE)) {
9953 val = tr32(RCVLPC_STATS_ENABLE);
9954 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9955 tw32(RCVLPC_STATS_ENABLE, val);
9957 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9959 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9960 tw32(SNDDATAI_STATSENAB, 0xffffff);
9961 tw32(SNDDATAI_STATSCTRL,
9962 (SNDDATAI_SCTRL_ENABLE |
9963 SNDDATAI_SCTRL_FASTUPD));
9965 /* Setup host coalescing engine. */
9966 tw32(HOSTCC_MODE, 0);
9967 for (i = 0; i < 2000; i++) {
9968 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9973 __tg3_set_coalesce(tp, &tp->coal);
9975 if (!tg3_flag(tp, 5705_PLUS)) {
9976 /* Status/statistics block address. See tg3_timer,
9977 * the tg3_periodic_fetch_stats call there, and
9978 * tg3_get_stats to see how this works for 5705/5750 chips.
9980 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9981 ((u64) tp->stats_mapping >> 32));
9982 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9983 ((u64) tp->stats_mapping & 0xffffffff));
9984 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9986 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9988 /* Clear statistics and status block memory areas */
9989 for (i = NIC_SRAM_STATS_BLK;
9990 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9992 tg3_write_mem(tp, i, 0);
9997 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9999 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10000 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10001 if (!tg3_flag(tp, 5705_PLUS))
10002 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10004 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10005 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10006 /* reset to prevent losing 1st rx packet intermittently */
10007 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10011 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10012 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10013 MAC_MODE_FHDE_ENABLE;
10014 if (tg3_flag(tp, ENABLE_APE))
10015 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10016 if (!tg3_flag(tp, 5705_PLUS) &&
10017 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10018 tg3_asic_rev(tp) != ASIC_REV_5700)
10019 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10020 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10023 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10024 * If TG3_FLAG_IS_NIC is zero, we should read the
10025 * register to preserve the GPIO settings for LOMs. The GPIOs,
10026 * whether used as inputs or outputs, are set by boot code after
10029 if (!tg3_flag(tp, IS_NIC)) {
10032 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10033 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10034 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10036 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10037 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10038 GRC_LCLCTRL_GPIO_OUTPUT3;
10040 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10041 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10043 tp->grc_local_ctrl &= ~gpio_mask;
10044 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10046 /* GPIO1 must be driven high for eeprom write protect */
10047 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10048 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10049 GRC_LCLCTRL_GPIO_OUTPUT1);
10051 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10054 if (tg3_flag(tp, USING_MSIX)) {
10055 val = tr32(MSGINT_MODE);
10056 val |= MSGINT_MODE_ENABLE;
10057 if (tp->irq_cnt > 1)
10058 val |= MSGINT_MODE_MULTIVEC_EN;
10059 if (!tg3_flag(tp, 1SHOT_MSI))
10060 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10061 tw32(MSGINT_MODE, val);
10064 if (!tg3_flag(tp, 5705_PLUS)) {
10065 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10069 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10070 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10071 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10072 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10073 WDMAC_MODE_LNGREAD_ENAB);
10075 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10076 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10077 if (tg3_flag(tp, TSO_CAPABLE) &&
10078 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10079 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10081 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10082 !tg3_flag(tp, IS_5788)) {
10083 val |= WDMAC_MODE_RX_ACCEL;
10087 /* Enable host coalescing bug fix */
10088 if (tg3_flag(tp, 5755_PLUS))
10089 val |= WDMAC_MODE_STATUS_TAG_FIX;
10091 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10092 val |= WDMAC_MODE_BURST_ALL_DATA;
10094 tw32_f(WDMAC_MODE, val);
10097 if (tg3_flag(tp, PCIX_MODE)) {
10100 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10102 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10103 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10104 pcix_cmd |= PCI_X_CMD_READ_2K;
10105 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10106 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10107 pcix_cmd |= PCI_X_CMD_READ_2K;
10109 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10113 tw32_f(RDMAC_MODE, rdmac_mode);
10116 if (tg3_asic_rev(tp) == ASIC_REV_5719) {
10117 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10118 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10121 if (i < TG3_NUM_RDMA_CHANNELS) {
10122 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10123 val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
10124 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10125 tg3_flag_set(tp, 5719_RDMA_BUG);
10129 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10130 if (!tg3_flag(tp, 5705_PLUS))
10131 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10133 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10134 tw32(SNDDATAC_MODE,
10135 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10137 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10139 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10140 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10141 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10142 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10143 val |= RCVDBDI_MODE_LRG_RING_SZ;
10144 tw32(RCVDBDI_MODE, val);
10145 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10146 if (tg3_flag(tp, HW_TSO_1) ||
10147 tg3_flag(tp, HW_TSO_2) ||
10148 tg3_flag(tp, HW_TSO_3))
10149 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10150 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10151 if (tg3_flag(tp, ENABLE_TSS))
10152 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10153 tw32(SNDBDI_MODE, val);
10154 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10156 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10157 err = tg3_load_5701_a0_firmware_fix(tp);
10162 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10163 /* Ignore any errors for the firmware download. If download
10164 * fails, the device will operate with EEE disabled
10166 tg3_load_57766_firmware(tp);
10169 if (tg3_flag(tp, TSO_CAPABLE)) {
10170 err = tg3_load_tso_firmware(tp);
10175 tp->tx_mode = TX_MODE_ENABLE;
10177 if (tg3_flag(tp, 5755_PLUS) ||
10178 tg3_asic_rev(tp) == ASIC_REV_5906)
10179 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10181 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10182 tg3_asic_rev(tp) == ASIC_REV_5762) {
10183 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10184 tp->tx_mode &= ~val;
10185 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10188 tw32_f(MAC_TX_MODE, tp->tx_mode);
10191 if (tg3_flag(tp, ENABLE_RSS)) {
10192 tg3_rss_write_indir_tbl(tp);
10194 /* Setup the "secret" hash key. */
10195 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10196 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10197 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10198 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10199 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10200 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10201 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10202 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10203 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10204 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10207 tp->rx_mode = RX_MODE_ENABLE;
10208 if (tg3_flag(tp, 5755_PLUS))
10209 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10211 if (tg3_flag(tp, ENABLE_RSS))
10212 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10213 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10214 RX_MODE_RSS_IPV6_HASH_EN |
10215 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10216 RX_MODE_RSS_IPV4_HASH_EN |
10217 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10219 tw32_f(MAC_RX_MODE, tp->rx_mode);
10222 tw32(MAC_LED_CTRL, tp->led_ctrl);
10224 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10225 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10226 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10229 tw32_f(MAC_RX_MODE, tp->rx_mode);
10232 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10233 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10234 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10235 /* Set drive transmission level to 1.2V */
10236 /* only if the signal pre-emphasis bit is not set */
10237 val = tr32(MAC_SERDES_CFG);
10240 tw32(MAC_SERDES_CFG, val);
10242 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10243 tw32(MAC_SERDES_CFG, 0x616000);
10246 /* Prevent chip from dropping frames when flow control
10249 if (tg3_flag(tp, 57765_CLASS))
10253 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10255 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10256 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10257 /* Use hardware link auto-negotiation */
10258 tg3_flag_set(tp, HW_AUTONEG);
10261 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10262 tg3_asic_rev(tp) == ASIC_REV_5714) {
10265 tmp = tr32(SERDES_RX_CTRL);
10266 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10267 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10268 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10269 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10272 if (!tg3_flag(tp, USE_PHYLIB)) {
10273 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10274 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10276 err = tg3_setup_phy(tp, false);
10280 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10281 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10284 /* Clear CRC stats. */
10285 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10286 tg3_writephy(tp, MII_TG3_TEST1,
10287 tmp | MII_TG3_TEST1_CRC_EN);
10288 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10293 __tg3_set_rx_mode(tp->dev);
10295 /* Initialize receive rules. */
10296 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10297 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10298 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10299 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10301 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10305 if (tg3_flag(tp, ENABLE_ASF))
10309 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10311 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10313 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10315 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10317 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10319 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10321 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10323 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10325 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10327 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10329 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10331 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10333 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10335 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10343 if (tg3_flag(tp, ENABLE_APE))
10344 /* Write our heartbeat update interval to APE. */
10345 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10346 APE_HOST_HEARTBEAT_INT_DISABLE);
10348 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10353 /* Called at device open time to get the chip ready for
10354 * packet processing. Invoked with tp->lock held.
10356 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10358 tg3_switch_clocks(tp);
10360 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10362 return tg3_reset_hw(tp, reset_phy);
10365 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10369 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10370 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10372 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10375 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10376 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10377 memset(ocir, 0, TG3_OCIR_LEN);
10381 /* sysfs attributes for hwmon */
10382 static ssize_t tg3_show_temp(struct device *dev,
10383 struct device_attribute *devattr, char *buf)
10385 struct pci_dev *pdev = to_pci_dev(dev);
10386 struct net_device *netdev = pci_get_drvdata(pdev);
10387 struct tg3 *tp = netdev_priv(netdev);
10388 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10391 spin_lock_bh(&tp->lock);
10392 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10393 sizeof(temperature));
10394 spin_unlock_bh(&tp->lock);
10395 return sprintf(buf, "%u\n", temperature);
10399 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10400 TG3_TEMP_SENSOR_OFFSET);
10401 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10402 TG3_TEMP_CAUTION_OFFSET);
10403 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10404 TG3_TEMP_MAX_OFFSET);
10406 static struct attribute *tg3_attributes[] = {
10407 &sensor_dev_attr_temp1_input.dev_attr.attr,
10408 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10409 &sensor_dev_attr_temp1_max.dev_attr.attr,
10413 static const struct attribute_group tg3_group = {
10414 .attrs = tg3_attributes,
10417 static void tg3_hwmon_close(struct tg3 *tp)
10419 if (tp->hwmon_dev) {
10420 hwmon_device_unregister(tp->hwmon_dev);
10421 tp->hwmon_dev = NULL;
10422 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10426 static void tg3_hwmon_open(struct tg3 *tp)
10430 struct pci_dev *pdev = tp->pdev;
10431 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10433 tg3_sd_scan_scratchpad(tp, ocirs);
10435 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10436 if (!ocirs[i].src_data_length)
10439 size += ocirs[i].src_hdr_length;
10440 size += ocirs[i].src_data_length;
10446 /* Register hwmon sysfs hooks */
10447 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10449 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10453 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10454 if (IS_ERR(tp->hwmon_dev)) {
10455 tp->hwmon_dev = NULL;
10456 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10457 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10462 #define TG3_STAT_ADD32(PSTAT, REG) \
10463 do { u32 __val = tr32(REG); \
10464 (PSTAT)->low += __val; \
10465 if ((PSTAT)->low < __val) \
10466 (PSTAT)->high += 1; \
10469 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10471 struct tg3_hw_stats *sp = tp->hw_stats;
10476 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10477 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10478 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10479 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10480 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10481 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10482 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10483 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10484 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10485 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10486 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10487 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10488 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10489 if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10490 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10491 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10494 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10495 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10496 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10497 tg3_flag_clear(tp, 5719_RDMA_BUG);
10500 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10501 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10502 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10503 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10504 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10505 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10506 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10507 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10508 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10509 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10510 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10511 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10512 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10513 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10515 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10516 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10517 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10518 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10519 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10521 u32 val = tr32(HOSTCC_FLOW_ATTN);
10522 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10524 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10525 sp->rx_discards.low += val;
10526 if (sp->rx_discards.low < val)
10527 sp->rx_discards.high += 1;
10529 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10531 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10534 static void tg3_chk_missed_msi(struct tg3 *tp)
10538 for (i = 0; i < tp->irq_cnt; i++) {
10539 struct tg3_napi *tnapi = &tp->napi[i];
10541 if (tg3_has_work(tnapi)) {
10542 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10543 tnapi->last_tx_cons == tnapi->tx_cons) {
10544 if (tnapi->chk_msi_cnt < 1) {
10545 tnapi->chk_msi_cnt++;
10551 tnapi->chk_msi_cnt = 0;
10552 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10553 tnapi->last_tx_cons = tnapi->tx_cons;
10557 static void tg3_timer(unsigned long __opaque)
10559 struct tg3 *tp = (struct tg3 *) __opaque;
10561 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10562 goto restart_timer;
10564 spin_lock(&tp->lock);
10566 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10567 tg3_flag(tp, 57765_CLASS))
10568 tg3_chk_missed_msi(tp);
10570 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10571 /* BCM4785: Flush posted writes from GbE to host memory. */
10575 if (!tg3_flag(tp, TAGGED_STATUS)) {
10576 /* All of this garbage is because when using non-tagged
10577 * IRQ status the mailbox/status_block protocol the chip
10578 * uses with the cpu is race prone.
10580 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10581 tw32(GRC_LOCAL_CTRL,
10582 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10584 tw32(HOSTCC_MODE, tp->coalesce_mode |
10585 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10588 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10589 spin_unlock(&tp->lock);
10590 tg3_reset_task_schedule(tp);
10591 goto restart_timer;
10595 /* This part only runs once per second. */
10596 if (!--tp->timer_counter) {
10597 if (tg3_flag(tp, 5705_PLUS))
10598 tg3_periodic_fetch_stats(tp);
10600 if (tp->setlpicnt && !--tp->setlpicnt)
10601 tg3_phy_eee_enable(tp);
10603 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10607 mac_stat = tr32(MAC_STATUS);
10610 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10611 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10613 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10617 tg3_setup_phy(tp, false);
10618 } else if (tg3_flag(tp, POLL_SERDES)) {
10619 u32 mac_stat = tr32(MAC_STATUS);
10620 int need_setup = 0;
10623 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10626 if (!tp->link_up &&
10627 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10628 MAC_STATUS_SIGNAL_DET))) {
10632 if (!tp->serdes_counter) {
10635 ~MAC_MODE_PORT_MODE_MASK));
10637 tw32_f(MAC_MODE, tp->mac_mode);
10640 tg3_setup_phy(tp, false);
10642 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10643 tg3_flag(tp, 5780_CLASS)) {
10644 tg3_serdes_parallel_detect(tp);
10647 tp->timer_counter = tp->timer_multiplier;
10650 /* Heartbeat is only sent once every 2 seconds.
10652 * The heartbeat is to tell the ASF firmware that the host
10653 * driver is still alive. In the event that the OS crashes,
10654 * ASF needs to reset the hardware to free up the FIFO space
10655 * that may be filled with rx packets destined for the host.
10656 * If the FIFO is full, ASF will no longer function properly.
10658 * Unintended resets have been reported on real time kernels
10659 * where the timer doesn't run on time. Netpoll will also have
10662 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10663 * to check the ring condition when the heartbeat is expiring
10664 * before doing the reset. This will prevent most unintended
10667 if (!--tp->asf_counter) {
10668 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10669 tg3_wait_for_event_ack(tp);
10671 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10672 FWCMD_NICDRV_ALIVE3);
10673 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10674 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10675 TG3_FW_UPDATE_TIMEOUT_SEC);
10677 tg3_generate_fw_event(tp);
10679 tp->asf_counter = tp->asf_multiplier;
10682 spin_unlock(&tp->lock);
10685 tp->timer.expires = jiffies + tp->timer_offset;
10686 add_timer(&tp->timer);
10689 static void tg3_timer_init(struct tg3 *tp)
10691 if (tg3_flag(tp, TAGGED_STATUS) &&
10692 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10693 !tg3_flag(tp, 57765_CLASS))
10694 tp->timer_offset = HZ;
10696 tp->timer_offset = HZ / 10;
10698 BUG_ON(tp->timer_offset > HZ);
10700 tp->timer_multiplier = (HZ / tp->timer_offset);
10701 tp->asf_multiplier = (HZ / tp->timer_offset) *
10702 TG3_FW_UPDATE_FREQ_SEC;
10704 init_timer(&tp->timer);
10705 tp->timer.data = (unsigned long) tp;
10706 tp->timer.function = tg3_timer;
10709 static void tg3_timer_start(struct tg3 *tp)
10711 tp->asf_counter = tp->asf_multiplier;
10712 tp->timer_counter = tp->timer_multiplier;
10714 tp->timer.expires = jiffies + tp->timer_offset;
10715 add_timer(&tp->timer);
10718 static void tg3_timer_stop(struct tg3 *tp)
10720 del_timer_sync(&tp->timer);
10723 /* Restart hardware after configuration changes, self-test, etc.
10724 * Invoked with tp->lock held.
10726 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10727 __releases(tp->lock)
10728 __acquires(tp->lock)
10732 err = tg3_init_hw(tp, reset_phy);
10734 netdev_err(tp->dev,
10735 "Failed to re-initialize device, aborting\n");
10736 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10737 tg3_full_unlock(tp);
10738 tg3_timer_stop(tp);
10740 tg3_napi_enable(tp);
10741 dev_close(tp->dev);
10742 tg3_full_lock(tp, 0);
10747 static void tg3_reset_task(struct work_struct *work)
10749 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10752 tg3_full_lock(tp, 0);
10754 if (!netif_running(tp->dev)) {
10755 tg3_flag_clear(tp, RESET_TASK_PENDING);
10756 tg3_full_unlock(tp);
10760 tg3_full_unlock(tp);
10764 tg3_netif_stop(tp);
10766 tg3_full_lock(tp, 1);
10768 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10769 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10770 tp->write32_rx_mbox = tg3_write_flush_reg32;
10771 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10772 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10775 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10776 err = tg3_init_hw(tp, true);
10780 tg3_netif_start(tp);
10783 tg3_full_unlock(tp);
10788 tg3_flag_clear(tp, RESET_TASK_PENDING);
10791 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10794 unsigned long flags;
10796 struct tg3_napi *tnapi = &tp->napi[irq_num];
10798 if (tp->irq_cnt == 1)
10799 name = tp->dev->name;
10801 name = &tnapi->irq_lbl[0];
10802 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10803 name[IFNAMSIZ-1] = 0;
10806 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10808 if (tg3_flag(tp, 1SHOT_MSI))
10809 fn = tg3_msi_1shot;
10812 fn = tg3_interrupt;
10813 if (tg3_flag(tp, TAGGED_STATUS))
10814 fn = tg3_interrupt_tagged;
10815 flags = IRQF_SHARED;
10818 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10821 static int tg3_test_interrupt(struct tg3 *tp)
10823 struct tg3_napi *tnapi = &tp->napi[0];
10824 struct net_device *dev = tp->dev;
10825 int err, i, intr_ok = 0;
10828 if (!netif_running(dev))
10831 tg3_disable_ints(tp);
10833 free_irq(tnapi->irq_vec, tnapi);
10836 * Turn off MSI one shot mode. Otherwise this test has no
10837 * observable way to know whether the interrupt was delivered.
10839 if (tg3_flag(tp, 57765_PLUS)) {
10840 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10841 tw32(MSGINT_MODE, val);
10844 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10845 IRQF_SHARED, dev->name, tnapi);
10849 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10850 tg3_enable_ints(tp);
10852 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10855 for (i = 0; i < 5; i++) {
10856 u32 int_mbox, misc_host_ctrl;
10858 int_mbox = tr32_mailbox(tnapi->int_mbox);
10859 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10861 if ((int_mbox != 0) ||
10862 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10867 if (tg3_flag(tp, 57765_PLUS) &&
10868 tnapi->hw_status->status_tag != tnapi->last_tag)
10869 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10874 tg3_disable_ints(tp);
10876 free_irq(tnapi->irq_vec, tnapi);
10878 err = tg3_request_irq(tp, 0);
10884 /* Reenable MSI one shot mode. */
10885 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10886 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10887 tw32(MSGINT_MODE, val);
10895 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10896 * successfully restored
10898 static int tg3_test_msi(struct tg3 *tp)
10903 if (!tg3_flag(tp, USING_MSI))
10906 /* Turn off SERR reporting in case MSI terminates with Master
10909 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10910 pci_write_config_word(tp->pdev, PCI_COMMAND,
10911 pci_cmd & ~PCI_COMMAND_SERR);
10913 err = tg3_test_interrupt(tp);
10915 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10920 /* other failures */
10924 /* MSI test failed, go back to INTx mode */
10925 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10926 "to INTx mode. Please report this failure to the PCI "
10927 "maintainer and include system chipset information\n");
10929 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10931 pci_disable_msi(tp->pdev);
10933 tg3_flag_clear(tp, USING_MSI);
10934 tp->napi[0].irq_vec = tp->pdev->irq;
10936 err = tg3_request_irq(tp, 0);
10940 /* Need to reset the chip because the MSI cycle may have terminated
10941 * with Master Abort.
10943 tg3_full_lock(tp, 1);
10945 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10946 err = tg3_init_hw(tp, true);
10948 tg3_full_unlock(tp);
10951 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10956 static int tg3_request_firmware(struct tg3 *tp)
10958 const struct tg3_firmware_hdr *fw_hdr;
10960 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10961 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10966 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10968 /* Firmware blob starts with version numbers, followed by
10969 * start address and _full_ length including BSS sections
10970 * (which must be longer than the actual data, of course
10973 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10974 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10975 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10976 tp->fw_len, tp->fw_needed);
10977 release_firmware(tp->fw);
10982 /* We no longer need firmware; we have it. */
10983 tp->fw_needed = NULL;
10987 static u32 tg3_irq_count(struct tg3 *tp)
10989 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10992 /* We want as many rx rings enabled as there are cpus.
10993 * In multiqueue MSI-X mode, the first MSI-X vector
10994 * only deals with link interrupts, etc, so we add
10995 * one to the number of vectors we are requesting.
10997 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11003 static bool tg3_enable_msix(struct tg3 *tp)
11006 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11008 tp->txq_cnt = tp->txq_req;
11009 tp->rxq_cnt = tp->rxq_req;
11011 tp->rxq_cnt = netif_get_num_default_rss_queues();
11012 if (tp->rxq_cnt > tp->rxq_max)
11013 tp->rxq_cnt = tp->rxq_max;
11015 /* Disable multiple TX rings by default. Simple round-robin hardware
11016 * scheduling of the TX rings can cause starvation of rings with
11017 * small packets when other rings have TSO or jumbo packets.
11022 tp->irq_cnt = tg3_irq_count(tp);
11024 for (i = 0; i < tp->irq_max; i++) {
11025 msix_ent[i].entry = i;
11026 msix_ent[i].vector = 0;
11029 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11032 } else if (rc != 0) {
11033 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11035 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11038 tp->rxq_cnt = max(rc - 1, 1);
11040 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11043 for (i = 0; i < tp->irq_max; i++)
11044 tp->napi[i].irq_vec = msix_ent[i].vector;
11046 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11047 pci_disable_msix(tp->pdev);
11051 if (tp->irq_cnt == 1)
11054 tg3_flag_set(tp, ENABLE_RSS);
11056 if (tp->txq_cnt > 1)
11057 tg3_flag_set(tp, ENABLE_TSS);
11059 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11064 static void tg3_ints_init(struct tg3 *tp)
11066 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11067 !tg3_flag(tp, TAGGED_STATUS)) {
11068 /* All MSI supporting chips should support tagged
11069 * status. Assert that this is the case.
11071 netdev_warn(tp->dev,
11072 "MSI without TAGGED_STATUS? Not using MSI\n");
11076 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11077 tg3_flag_set(tp, USING_MSIX);
11078 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11079 tg3_flag_set(tp, USING_MSI);
11081 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11082 u32 msi_mode = tr32(MSGINT_MODE);
11083 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11084 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11085 if (!tg3_flag(tp, 1SHOT_MSI))
11086 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11087 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11090 if (!tg3_flag(tp, USING_MSIX)) {
11092 tp->napi[0].irq_vec = tp->pdev->irq;
11095 if (tp->irq_cnt == 1) {
11098 netif_set_real_num_tx_queues(tp->dev, 1);
11099 netif_set_real_num_rx_queues(tp->dev, 1);
11103 static void tg3_ints_fini(struct tg3 *tp)
11105 if (tg3_flag(tp, USING_MSIX))
11106 pci_disable_msix(tp->pdev);
11107 else if (tg3_flag(tp, USING_MSI))
11108 pci_disable_msi(tp->pdev);
11109 tg3_flag_clear(tp, USING_MSI);
11110 tg3_flag_clear(tp, USING_MSIX);
11111 tg3_flag_clear(tp, ENABLE_RSS);
11112 tg3_flag_clear(tp, ENABLE_TSS);
11115 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11118 struct net_device *dev = tp->dev;
11122 * Setup interrupts first so we know how
11123 * many NAPI resources to allocate
11127 tg3_rss_check_indir_tbl(tp);
11129 /* The placement of this call is tied
11130 * to the setup and use of Host TX descriptors.
11132 err = tg3_alloc_consistent(tp);
11138 tg3_napi_enable(tp);
11140 for (i = 0; i < tp->irq_cnt; i++) {
11141 struct tg3_napi *tnapi = &tp->napi[i];
11142 err = tg3_request_irq(tp, i);
11144 for (i--; i >= 0; i--) {
11145 tnapi = &tp->napi[i];
11146 free_irq(tnapi->irq_vec, tnapi);
11152 tg3_full_lock(tp, 0);
11154 err = tg3_init_hw(tp, reset_phy);
11156 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11157 tg3_free_rings(tp);
11160 tg3_full_unlock(tp);
11165 if (test_irq && tg3_flag(tp, USING_MSI)) {
11166 err = tg3_test_msi(tp);
11169 tg3_full_lock(tp, 0);
11170 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11171 tg3_free_rings(tp);
11172 tg3_full_unlock(tp);
11177 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11178 u32 val = tr32(PCIE_TRANSACTION_CFG);
11180 tw32(PCIE_TRANSACTION_CFG,
11181 val | PCIE_TRANS_CFG_1SHOT_MSI);
11187 tg3_hwmon_open(tp);
11189 tg3_full_lock(tp, 0);
11191 tg3_timer_start(tp);
11192 tg3_flag_set(tp, INIT_COMPLETE);
11193 tg3_enable_ints(tp);
11198 tg3_ptp_resume(tp);
11201 tg3_full_unlock(tp);
11203 netif_tx_start_all_queues(dev);
11206 * Reset loopback feature if it was turned on while the device was down
11207 * make sure that it's installed properly now.
11209 if (dev->features & NETIF_F_LOOPBACK)
11210 tg3_set_loopback(dev, dev->features);
11215 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11216 struct tg3_napi *tnapi = &tp->napi[i];
11217 free_irq(tnapi->irq_vec, tnapi);
11221 tg3_napi_disable(tp);
11223 tg3_free_consistent(tp);
11231 static void tg3_stop(struct tg3 *tp)
11235 tg3_reset_task_cancel(tp);
11236 tg3_netif_stop(tp);
11238 tg3_timer_stop(tp);
11240 tg3_hwmon_close(tp);
11244 tg3_full_lock(tp, 1);
11246 tg3_disable_ints(tp);
11248 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11249 tg3_free_rings(tp);
11250 tg3_flag_clear(tp, INIT_COMPLETE);
11252 tg3_full_unlock(tp);
11254 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11255 struct tg3_napi *tnapi = &tp->napi[i];
11256 free_irq(tnapi->irq_vec, tnapi);
11263 tg3_free_consistent(tp);
11266 static int tg3_open(struct net_device *dev)
11268 struct tg3 *tp = netdev_priv(dev);
11271 if (tp->fw_needed) {
11272 err = tg3_request_firmware(tp);
11273 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11275 netdev_warn(tp->dev, "EEE capability disabled\n");
11276 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11277 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11278 netdev_warn(tp->dev, "EEE capability restored\n");
11279 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11281 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11285 netdev_warn(tp->dev, "TSO capability disabled\n");
11286 tg3_flag_clear(tp, TSO_CAPABLE);
11287 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11288 netdev_notice(tp->dev, "TSO capability restored\n");
11289 tg3_flag_set(tp, TSO_CAPABLE);
11293 tg3_carrier_off(tp);
11295 err = tg3_power_up(tp);
11299 tg3_full_lock(tp, 0);
11301 tg3_disable_ints(tp);
11302 tg3_flag_clear(tp, INIT_COMPLETE);
11304 tg3_full_unlock(tp);
11306 err = tg3_start(tp,
11307 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11310 tg3_frob_aux_power(tp, false);
11311 pci_set_power_state(tp->pdev, PCI_D3hot);
11314 if (tg3_flag(tp, PTP_CAPABLE)) {
11315 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11317 if (IS_ERR(tp->ptp_clock))
11318 tp->ptp_clock = NULL;
11324 static int tg3_close(struct net_device *dev)
11326 struct tg3 *tp = netdev_priv(dev);
11332 /* Clear stats across close / open calls */
11333 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11334 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11336 tg3_power_down(tp);
11338 tg3_carrier_off(tp);
11343 static inline u64 get_stat64(tg3_stat64_t *val)
11345 return ((u64)val->high << 32) | ((u64)val->low);
11348 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11350 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11352 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11353 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11354 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11357 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11358 tg3_writephy(tp, MII_TG3_TEST1,
11359 val | MII_TG3_TEST1_CRC_EN);
11360 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11364 tp->phy_crc_errors += val;
11366 return tp->phy_crc_errors;
11369 return get_stat64(&hw_stats->rx_fcs_errors);
11372 #define ESTAT_ADD(member) \
11373 estats->member = old_estats->member + \
11374 get_stat64(&hw_stats->member)
11376 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11378 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11379 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11381 ESTAT_ADD(rx_octets);
11382 ESTAT_ADD(rx_fragments);
11383 ESTAT_ADD(rx_ucast_packets);
11384 ESTAT_ADD(rx_mcast_packets);
11385 ESTAT_ADD(rx_bcast_packets);
11386 ESTAT_ADD(rx_fcs_errors);
11387 ESTAT_ADD(rx_align_errors);
11388 ESTAT_ADD(rx_xon_pause_rcvd);
11389 ESTAT_ADD(rx_xoff_pause_rcvd);
11390 ESTAT_ADD(rx_mac_ctrl_rcvd);
11391 ESTAT_ADD(rx_xoff_entered);
11392 ESTAT_ADD(rx_frame_too_long_errors);
11393 ESTAT_ADD(rx_jabbers);
11394 ESTAT_ADD(rx_undersize_packets);
11395 ESTAT_ADD(rx_in_length_errors);
11396 ESTAT_ADD(rx_out_length_errors);
11397 ESTAT_ADD(rx_64_or_less_octet_packets);
11398 ESTAT_ADD(rx_65_to_127_octet_packets);
11399 ESTAT_ADD(rx_128_to_255_octet_packets);
11400 ESTAT_ADD(rx_256_to_511_octet_packets);
11401 ESTAT_ADD(rx_512_to_1023_octet_packets);
11402 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11403 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11404 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11405 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11406 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11408 ESTAT_ADD(tx_octets);
11409 ESTAT_ADD(tx_collisions);
11410 ESTAT_ADD(tx_xon_sent);
11411 ESTAT_ADD(tx_xoff_sent);
11412 ESTAT_ADD(tx_flow_control);
11413 ESTAT_ADD(tx_mac_errors);
11414 ESTAT_ADD(tx_single_collisions);
11415 ESTAT_ADD(tx_mult_collisions);
11416 ESTAT_ADD(tx_deferred);
11417 ESTAT_ADD(tx_excessive_collisions);
11418 ESTAT_ADD(tx_late_collisions);
11419 ESTAT_ADD(tx_collide_2times);
11420 ESTAT_ADD(tx_collide_3times);
11421 ESTAT_ADD(tx_collide_4times);
11422 ESTAT_ADD(tx_collide_5times);
11423 ESTAT_ADD(tx_collide_6times);
11424 ESTAT_ADD(tx_collide_7times);
11425 ESTAT_ADD(tx_collide_8times);
11426 ESTAT_ADD(tx_collide_9times);
11427 ESTAT_ADD(tx_collide_10times);
11428 ESTAT_ADD(tx_collide_11times);
11429 ESTAT_ADD(tx_collide_12times);
11430 ESTAT_ADD(tx_collide_13times);
11431 ESTAT_ADD(tx_collide_14times);
11432 ESTAT_ADD(tx_collide_15times);
11433 ESTAT_ADD(tx_ucast_packets);
11434 ESTAT_ADD(tx_mcast_packets);
11435 ESTAT_ADD(tx_bcast_packets);
11436 ESTAT_ADD(tx_carrier_sense_errors);
11437 ESTAT_ADD(tx_discards);
11438 ESTAT_ADD(tx_errors);
11440 ESTAT_ADD(dma_writeq_full);
11441 ESTAT_ADD(dma_write_prioq_full);
11442 ESTAT_ADD(rxbds_empty);
11443 ESTAT_ADD(rx_discards);
11444 ESTAT_ADD(rx_errors);
11445 ESTAT_ADD(rx_threshold_hit);
11447 ESTAT_ADD(dma_readq_full);
11448 ESTAT_ADD(dma_read_prioq_full);
11449 ESTAT_ADD(tx_comp_queue_full);
11451 ESTAT_ADD(ring_set_send_prod_index);
11452 ESTAT_ADD(ring_status_update);
11453 ESTAT_ADD(nic_irqs);
11454 ESTAT_ADD(nic_avoided_irqs);
11455 ESTAT_ADD(nic_tx_threshold_hit);
11457 ESTAT_ADD(mbuf_lwm_thresh_hit);
11460 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11462 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11463 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11465 stats->rx_packets = old_stats->rx_packets +
11466 get_stat64(&hw_stats->rx_ucast_packets) +
11467 get_stat64(&hw_stats->rx_mcast_packets) +
11468 get_stat64(&hw_stats->rx_bcast_packets);
11470 stats->tx_packets = old_stats->tx_packets +
11471 get_stat64(&hw_stats->tx_ucast_packets) +
11472 get_stat64(&hw_stats->tx_mcast_packets) +
11473 get_stat64(&hw_stats->tx_bcast_packets);
11475 stats->rx_bytes = old_stats->rx_bytes +
11476 get_stat64(&hw_stats->rx_octets);
11477 stats->tx_bytes = old_stats->tx_bytes +
11478 get_stat64(&hw_stats->tx_octets);
11480 stats->rx_errors = old_stats->rx_errors +
11481 get_stat64(&hw_stats->rx_errors);
11482 stats->tx_errors = old_stats->tx_errors +
11483 get_stat64(&hw_stats->tx_errors) +
11484 get_stat64(&hw_stats->tx_mac_errors) +
11485 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11486 get_stat64(&hw_stats->tx_discards);
11488 stats->multicast = old_stats->multicast +
11489 get_stat64(&hw_stats->rx_mcast_packets);
11490 stats->collisions = old_stats->collisions +
11491 get_stat64(&hw_stats->tx_collisions);
11493 stats->rx_length_errors = old_stats->rx_length_errors +
11494 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11495 get_stat64(&hw_stats->rx_undersize_packets);
11497 stats->rx_over_errors = old_stats->rx_over_errors +
11498 get_stat64(&hw_stats->rxbds_empty);
11499 stats->rx_frame_errors = old_stats->rx_frame_errors +
11500 get_stat64(&hw_stats->rx_align_errors);
11501 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11502 get_stat64(&hw_stats->tx_discards);
11503 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11504 get_stat64(&hw_stats->tx_carrier_sense_errors);
11506 stats->rx_crc_errors = old_stats->rx_crc_errors +
11507 tg3_calc_crc_errors(tp);
11509 stats->rx_missed_errors = old_stats->rx_missed_errors +
11510 get_stat64(&hw_stats->rx_discards);
11512 stats->rx_dropped = tp->rx_dropped;
11513 stats->tx_dropped = tp->tx_dropped;
11516 static int tg3_get_regs_len(struct net_device *dev)
11518 return TG3_REG_BLK_SIZE;
11521 static void tg3_get_regs(struct net_device *dev,
11522 struct ethtool_regs *regs, void *_p)
11524 struct tg3 *tp = netdev_priv(dev);
11528 memset(_p, 0, TG3_REG_BLK_SIZE);
11530 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11533 tg3_full_lock(tp, 0);
11535 tg3_dump_legacy_regs(tp, (u32 *)_p);
11537 tg3_full_unlock(tp);
11540 static int tg3_get_eeprom_len(struct net_device *dev)
11542 struct tg3 *tp = netdev_priv(dev);
11544 return tp->nvram_size;
11547 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11549 struct tg3 *tp = netdev_priv(dev);
11552 u32 i, offset, len, b_offset, b_count;
11555 if (tg3_flag(tp, NO_NVRAM))
11558 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11561 offset = eeprom->offset;
11565 eeprom->magic = TG3_EEPROM_MAGIC;
11568 /* adjustments to start on required 4 byte boundary */
11569 b_offset = offset & 3;
11570 b_count = 4 - b_offset;
11571 if (b_count > len) {
11572 /* i.e. offset=1 len=2 */
11575 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11578 memcpy(data, ((char *)&val) + b_offset, b_count);
11581 eeprom->len += b_count;
11584 /* read bytes up to the last 4 byte boundary */
11585 pd = &data[eeprom->len];
11586 for (i = 0; i < (len - (len & 3)); i += 4) {
11587 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11592 memcpy(pd + i, &val, 4);
11597 /* read last bytes not ending on 4 byte boundary */
11598 pd = &data[eeprom->len];
11600 b_offset = offset + len - b_count;
11601 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11604 memcpy(pd, &val, b_count);
11605 eeprom->len += b_count;
11610 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11612 struct tg3 *tp = netdev_priv(dev);
11614 u32 offset, len, b_offset, odd_len;
11618 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11621 if (tg3_flag(tp, NO_NVRAM) ||
11622 eeprom->magic != TG3_EEPROM_MAGIC)
11625 offset = eeprom->offset;
11628 if ((b_offset = (offset & 3))) {
11629 /* adjustments to start on required 4 byte boundary */
11630 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11641 /* adjustments to end on required 4 byte boundary */
11643 len = (len + 3) & ~3;
11644 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11650 if (b_offset || odd_len) {
11651 buf = kmalloc(len, GFP_KERNEL);
11655 memcpy(buf, &start, 4);
11657 memcpy(buf+len-4, &end, 4);
11658 memcpy(buf + b_offset, data, eeprom->len);
11661 ret = tg3_nvram_write_block(tp, offset, len, buf);
11669 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11671 struct tg3 *tp = netdev_priv(dev);
11673 if (tg3_flag(tp, USE_PHYLIB)) {
11674 struct phy_device *phydev;
11675 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11677 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11678 return phy_ethtool_gset(phydev, cmd);
11681 cmd->supported = (SUPPORTED_Autoneg);
11683 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11684 cmd->supported |= (SUPPORTED_1000baseT_Half |
11685 SUPPORTED_1000baseT_Full);
11687 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11688 cmd->supported |= (SUPPORTED_100baseT_Half |
11689 SUPPORTED_100baseT_Full |
11690 SUPPORTED_10baseT_Half |
11691 SUPPORTED_10baseT_Full |
11693 cmd->port = PORT_TP;
11695 cmd->supported |= SUPPORTED_FIBRE;
11696 cmd->port = PORT_FIBRE;
11699 cmd->advertising = tp->link_config.advertising;
11700 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11701 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11702 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11703 cmd->advertising |= ADVERTISED_Pause;
11705 cmd->advertising |= ADVERTISED_Pause |
11706 ADVERTISED_Asym_Pause;
11708 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11709 cmd->advertising |= ADVERTISED_Asym_Pause;
11712 if (netif_running(dev) && tp->link_up) {
11713 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11714 cmd->duplex = tp->link_config.active_duplex;
11715 cmd->lp_advertising = tp->link_config.rmt_adv;
11716 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11717 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11718 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11720 cmd->eth_tp_mdix = ETH_TP_MDI;
11723 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11724 cmd->duplex = DUPLEX_UNKNOWN;
11725 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11727 cmd->phy_address = tp->phy_addr;
11728 cmd->transceiver = XCVR_INTERNAL;
11729 cmd->autoneg = tp->link_config.autoneg;
11735 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11737 struct tg3 *tp = netdev_priv(dev);
11738 u32 speed = ethtool_cmd_speed(cmd);
11740 if (tg3_flag(tp, USE_PHYLIB)) {
11741 struct phy_device *phydev;
11742 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11744 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11745 return phy_ethtool_sset(phydev, cmd);
11748 if (cmd->autoneg != AUTONEG_ENABLE &&
11749 cmd->autoneg != AUTONEG_DISABLE)
11752 if (cmd->autoneg == AUTONEG_DISABLE &&
11753 cmd->duplex != DUPLEX_FULL &&
11754 cmd->duplex != DUPLEX_HALF)
11757 if (cmd->autoneg == AUTONEG_ENABLE) {
11758 u32 mask = ADVERTISED_Autoneg |
11760 ADVERTISED_Asym_Pause;
11762 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11763 mask |= ADVERTISED_1000baseT_Half |
11764 ADVERTISED_1000baseT_Full;
11766 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11767 mask |= ADVERTISED_100baseT_Half |
11768 ADVERTISED_100baseT_Full |
11769 ADVERTISED_10baseT_Half |
11770 ADVERTISED_10baseT_Full |
11773 mask |= ADVERTISED_FIBRE;
11775 if (cmd->advertising & ~mask)
11778 mask &= (ADVERTISED_1000baseT_Half |
11779 ADVERTISED_1000baseT_Full |
11780 ADVERTISED_100baseT_Half |
11781 ADVERTISED_100baseT_Full |
11782 ADVERTISED_10baseT_Half |
11783 ADVERTISED_10baseT_Full);
11785 cmd->advertising &= mask;
11787 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11788 if (speed != SPEED_1000)
11791 if (cmd->duplex != DUPLEX_FULL)
11794 if (speed != SPEED_100 &&
11800 tg3_full_lock(tp, 0);
11802 tp->link_config.autoneg = cmd->autoneg;
11803 if (cmd->autoneg == AUTONEG_ENABLE) {
11804 tp->link_config.advertising = (cmd->advertising |
11805 ADVERTISED_Autoneg);
11806 tp->link_config.speed = SPEED_UNKNOWN;
11807 tp->link_config.duplex = DUPLEX_UNKNOWN;
11809 tp->link_config.advertising = 0;
11810 tp->link_config.speed = speed;
11811 tp->link_config.duplex = cmd->duplex;
11814 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11816 tg3_warn_mgmt_link_flap(tp);
11818 if (netif_running(dev))
11819 tg3_setup_phy(tp, true);
11821 tg3_full_unlock(tp);
11826 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11828 struct tg3 *tp = netdev_priv(dev);
11830 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11831 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11832 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11833 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11836 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11838 struct tg3 *tp = netdev_priv(dev);
11840 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11841 wol->supported = WAKE_MAGIC;
11843 wol->supported = 0;
11845 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11846 wol->wolopts = WAKE_MAGIC;
11847 memset(&wol->sopass, 0, sizeof(wol->sopass));
11850 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11852 struct tg3 *tp = netdev_priv(dev);
11853 struct device *dp = &tp->pdev->dev;
11855 if (wol->wolopts & ~WAKE_MAGIC)
11857 if ((wol->wolopts & WAKE_MAGIC) &&
11858 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11861 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11863 spin_lock_bh(&tp->lock);
11864 if (device_may_wakeup(dp))
11865 tg3_flag_set(tp, WOL_ENABLE);
11867 tg3_flag_clear(tp, WOL_ENABLE);
11868 spin_unlock_bh(&tp->lock);
11873 static u32 tg3_get_msglevel(struct net_device *dev)
11875 struct tg3 *tp = netdev_priv(dev);
11876 return tp->msg_enable;
11879 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11881 struct tg3 *tp = netdev_priv(dev);
11882 tp->msg_enable = value;
11885 static int tg3_nway_reset(struct net_device *dev)
11887 struct tg3 *tp = netdev_priv(dev);
11890 if (!netif_running(dev))
11893 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11896 tg3_warn_mgmt_link_flap(tp);
11898 if (tg3_flag(tp, USE_PHYLIB)) {
11899 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11901 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11905 spin_lock_bh(&tp->lock);
11907 tg3_readphy(tp, MII_BMCR, &bmcr);
11908 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11909 ((bmcr & BMCR_ANENABLE) ||
11910 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11911 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11915 spin_unlock_bh(&tp->lock);
11921 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11923 struct tg3 *tp = netdev_priv(dev);
11925 ering->rx_max_pending = tp->rx_std_ring_mask;
11926 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11927 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11929 ering->rx_jumbo_max_pending = 0;
11931 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11933 ering->rx_pending = tp->rx_pending;
11934 if (tg3_flag(tp, JUMBO_RING_ENABLE))
11935 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11937 ering->rx_jumbo_pending = 0;
11939 ering->tx_pending = tp->napi[0].tx_pending;
11942 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11944 struct tg3 *tp = netdev_priv(dev);
11945 int i, irq_sync = 0, err = 0;
11947 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11948 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11949 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11950 (ering->tx_pending <= MAX_SKB_FRAGS) ||
11951 (tg3_flag(tp, TSO_BUG) &&
11952 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11955 if (netif_running(dev)) {
11957 tg3_netif_stop(tp);
11961 tg3_full_lock(tp, irq_sync);
11963 tp->rx_pending = ering->rx_pending;
11965 if (tg3_flag(tp, MAX_RXPEND_64) &&
11966 tp->rx_pending > 63)
11967 tp->rx_pending = 63;
11968 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11970 for (i = 0; i < tp->irq_max; i++)
11971 tp->napi[i].tx_pending = ering->tx_pending;
11973 if (netif_running(dev)) {
11974 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11975 err = tg3_restart_hw(tp, false);
11977 tg3_netif_start(tp);
11980 tg3_full_unlock(tp);
11982 if (irq_sync && !err)
11988 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11990 struct tg3 *tp = netdev_priv(dev);
11992 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11994 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11995 epause->rx_pause = 1;
11997 epause->rx_pause = 0;
11999 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12000 epause->tx_pause = 1;
12002 epause->tx_pause = 0;
12005 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12007 struct tg3 *tp = netdev_priv(dev);
12010 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12011 tg3_warn_mgmt_link_flap(tp);
12013 if (tg3_flag(tp, USE_PHYLIB)) {
12015 struct phy_device *phydev;
12017 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12019 if (!(phydev->supported & SUPPORTED_Pause) ||
12020 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12021 (epause->rx_pause != epause->tx_pause)))
12024 tp->link_config.flowctrl = 0;
12025 if (epause->rx_pause) {
12026 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12028 if (epause->tx_pause) {
12029 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12030 newadv = ADVERTISED_Pause;
12032 newadv = ADVERTISED_Pause |
12033 ADVERTISED_Asym_Pause;
12034 } else if (epause->tx_pause) {
12035 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12036 newadv = ADVERTISED_Asym_Pause;
12040 if (epause->autoneg)
12041 tg3_flag_set(tp, PAUSE_AUTONEG);
12043 tg3_flag_clear(tp, PAUSE_AUTONEG);
12045 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12046 u32 oldadv = phydev->advertising &
12047 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12048 if (oldadv != newadv) {
12049 phydev->advertising &=
12050 ~(ADVERTISED_Pause |
12051 ADVERTISED_Asym_Pause);
12052 phydev->advertising |= newadv;
12053 if (phydev->autoneg) {
12055 * Always renegotiate the link to
12056 * inform our link partner of our
12057 * flow control settings, even if the
12058 * flow control is forced. Let
12059 * tg3_adjust_link() do the final
12060 * flow control setup.
12062 return phy_start_aneg(phydev);
12066 if (!epause->autoneg)
12067 tg3_setup_flow_control(tp, 0, 0);
12069 tp->link_config.advertising &=
12070 ~(ADVERTISED_Pause |
12071 ADVERTISED_Asym_Pause);
12072 tp->link_config.advertising |= newadv;
12077 if (netif_running(dev)) {
12078 tg3_netif_stop(tp);
12082 tg3_full_lock(tp, irq_sync);
12084 if (epause->autoneg)
12085 tg3_flag_set(tp, PAUSE_AUTONEG);
12087 tg3_flag_clear(tp, PAUSE_AUTONEG);
12088 if (epause->rx_pause)
12089 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12091 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12092 if (epause->tx_pause)
12093 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12095 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12097 if (netif_running(dev)) {
12098 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12099 err = tg3_restart_hw(tp, false);
12101 tg3_netif_start(tp);
12104 tg3_full_unlock(tp);
12107 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12112 static int tg3_get_sset_count(struct net_device *dev, int sset)
12116 return TG3_NUM_TEST;
12118 return TG3_NUM_STATS;
12120 return -EOPNOTSUPP;
12124 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12125 u32 *rules __always_unused)
12127 struct tg3 *tp = netdev_priv(dev);
12129 if (!tg3_flag(tp, SUPPORT_MSIX))
12130 return -EOPNOTSUPP;
12132 switch (info->cmd) {
12133 case ETHTOOL_GRXRINGS:
12134 if (netif_running(tp->dev))
12135 info->data = tp->rxq_cnt;
12137 info->data = num_online_cpus();
12138 if (info->data > TG3_RSS_MAX_NUM_QS)
12139 info->data = TG3_RSS_MAX_NUM_QS;
12142 /* The first interrupt vector only
12143 * handles link interrupts.
12149 return -EOPNOTSUPP;
12153 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12156 struct tg3 *tp = netdev_priv(dev);
12158 if (tg3_flag(tp, SUPPORT_MSIX))
12159 size = TG3_RSS_INDIR_TBL_SIZE;
12164 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12166 struct tg3 *tp = netdev_priv(dev);
12169 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12170 indir[i] = tp->rss_ind_tbl[i];
12175 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12177 struct tg3 *tp = netdev_priv(dev);
12180 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12181 tp->rss_ind_tbl[i] = indir[i];
12183 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12186 /* It is legal to write the indirection
12187 * table while the device is running.
12189 tg3_full_lock(tp, 0);
12190 tg3_rss_write_indir_tbl(tp);
12191 tg3_full_unlock(tp);
12196 static void tg3_get_channels(struct net_device *dev,
12197 struct ethtool_channels *channel)
12199 struct tg3 *tp = netdev_priv(dev);
12200 u32 deflt_qs = netif_get_num_default_rss_queues();
12202 channel->max_rx = tp->rxq_max;
12203 channel->max_tx = tp->txq_max;
12205 if (netif_running(dev)) {
12206 channel->rx_count = tp->rxq_cnt;
12207 channel->tx_count = tp->txq_cnt;
12210 channel->rx_count = tp->rxq_req;
12212 channel->rx_count = min(deflt_qs, tp->rxq_max);
12215 channel->tx_count = tp->txq_req;
12217 channel->tx_count = min(deflt_qs, tp->txq_max);
12221 static int tg3_set_channels(struct net_device *dev,
12222 struct ethtool_channels *channel)
12224 struct tg3 *tp = netdev_priv(dev);
12226 if (!tg3_flag(tp, SUPPORT_MSIX))
12227 return -EOPNOTSUPP;
12229 if (channel->rx_count > tp->rxq_max ||
12230 channel->tx_count > tp->txq_max)
12233 tp->rxq_req = channel->rx_count;
12234 tp->txq_req = channel->tx_count;
12236 if (!netif_running(dev))
12241 tg3_carrier_off(tp);
12243 tg3_start(tp, true, false, false);
12248 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12250 switch (stringset) {
12252 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12255 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12258 WARN_ON(1); /* we need a WARN() */
12263 static int tg3_set_phys_id(struct net_device *dev,
12264 enum ethtool_phys_id_state state)
12266 struct tg3 *tp = netdev_priv(dev);
12268 if (!netif_running(tp->dev))
12272 case ETHTOOL_ID_ACTIVE:
12273 return 1; /* cycle on/off once per second */
12275 case ETHTOOL_ID_ON:
12276 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12277 LED_CTRL_1000MBPS_ON |
12278 LED_CTRL_100MBPS_ON |
12279 LED_CTRL_10MBPS_ON |
12280 LED_CTRL_TRAFFIC_OVERRIDE |
12281 LED_CTRL_TRAFFIC_BLINK |
12282 LED_CTRL_TRAFFIC_LED);
12285 case ETHTOOL_ID_OFF:
12286 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12287 LED_CTRL_TRAFFIC_OVERRIDE);
12290 case ETHTOOL_ID_INACTIVE:
12291 tw32(MAC_LED_CTRL, tp->led_ctrl);
12298 static void tg3_get_ethtool_stats(struct net_device *dev,
12299 struct ethtool_stats *estats, u64 *tmp_stats)
12301 struct tg3 *tp = netdev_priv(dev);
12304 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12306 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12309 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12313 u32 offset = 0, len = 0;
12316 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12319 if (magic == TG3_EEPROM_MAGIC) {
12320 for (offset = TG3_NVM_DIR_START;
12321 offset < TG3_NVM_DIR_END;
12322 offset += TG3_NVM_DIRENT_SIZE) {
12323 if (tg3_nvram_read(tp, offset, &val))
12326 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12327 TG3_NVM_DIRTYPE_EXTVPD)
12331 if (offset != TG3_NVM_DIR_END) {
12332 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12333 if (tg3_nvram_read(tp, offset + 4, &offset))
12336 offset = tg3_nvram_logical_addr(tp, offset);
12340 if (!offset || !len) {
12341 offset = TG3_NVM_VPD_OFF;
12342 len = TG3_NVM_VPD_LEN;
12345 buf = kmalloc(len, GFP_KERNEL);
12349 if (magic == TG3_EEPROM_MAGIC) {
12350 for (i = 0; i < len; i += 4) {
12351 /* The data is in little-endian format in NVRAM.
12352 * Use the big-endian read routines to preserve
12353 * the byte order as it exists in NVRAM.
12355 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12361 unsigned int pos = 0;
12363 ptr = (u8 *)&buf[0];
12364 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12365 cnt = pci_read_vpd(tp->pdev, pos,
12367 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12385 #define NVRAM_TEST_SIZE 0x100
12386 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12387 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12388 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12389 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12390 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12391 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12392 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12393 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12395 static int tg3_test_nvram(struct tg3 *tp)
12397 u32 csum, magic, len;
12399 int i, j, k, err = 0, size;
12401 if (tg3_flag(tp, NO_NVRAM))
12404 if (tg3_nvram_read(tp, 0, &magic) != 0)
12407 if (magic == TG3_EEPROM_MAGIC)
12408 size = NVRAM_TEST_SIZE;
12409 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12410 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12411 TG3_EEPROM_SB_FORMAT_1) {
12412 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12413 case TG3_EEPROM_SB_REVISION_0:
12414 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12416 case TG3_EEPROM_SB_REVISION_2:
12417 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12419 case TG3_EEPROM_SB_REVISION_3:
12420 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12422 case TG3_EEPROM_SB_REVISION_4:
12423 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12425 case TG3_EEPROM_SB_REVISION_5:
12426 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12428 case TG3_EEPROM_SB_REVISION_6:
12429 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12436 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12437 size = NVRAM_SELFBOOT_HW_SIZE;
12441 buf = kmalloc(size, GFP_KERNEL);
12446 for (i = 0, j = 0; i < size; i += 4, j++) {
12447 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12454 /* Selfboot format */
12455 magic = be32_to_cpu(buf[0]);
12456 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12457 TG3_EEPROM_MAGIC_FW) {
12458 u8 *buf8 = (u8 *) buf, csum8 = 0;
12460 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12461 TG3_EEPROM_SB_REVISION_2) {
12462 /* For rev 2, the csum doesn't include the MBA. */
12463 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12465 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12468 for (i = 0; i < size; i++)
12481 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12482 TG3_EEPROM_MAGIC_HW) {
12483 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12484 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12485 u8 *buf8 = (u8 *) buf;
12487 /* Separate the parity bits and the data bytes. */
12488 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12489 if ((i == 0) || (i == 8)) {
12493 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12494 parity[k++] = buf8[i] & msk;
12496 } else if (i == 16) {
12500 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12501 parity[k++] = buf8[i] & msk;
12504 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12505 parity[k++] = buf8[i] & msk;
12508 data[j++] = buf8[i];
12512 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12513 u8 hw8 = hweight8(data[i]);
12515 if ((hw8 & 0x1) && parity[i])
12517 else if (!(hw8 & 0x1) && !parity[i])
12526 /* Bootstrap checksum at offset 0x10 */
12527 csum = calc_crc((unsigned char *) buf, 0x10);
12528 if (csum != le32_to_cpu(buf[0x10/4]))
12531 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12532 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12533 if (csum != le32_to_cpu(buf[0xfc/4]))
12538 buf = tg3_vpd_readblock(tp, &len);
12542 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12544 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12548 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12551 i += PCI_VPD_LRDT_TAG_SIZE;
12552 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12553 PCI_VPD_RO_KEYWORD_CHKSUM);
12557 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12559 for (i = 0; i <= j; i++)
12560 csum8 += ((u8 *)buf)[i];
12574 #define TG3_SERDES_TIMEOUT_SEC 2
12575 #define TG3_COPPER_TIMEOUT_SEC 6
12577 static int tg3_test_link(struct tg3 *tp)
12581 if (!netif_running(tp->dev))
12584 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12585 max = TG3_SERDES_TIMEOUT_SEC;
12587 max = TG3_COPPER_TIMEOUT_SEC;
12589 for (i = 0; i < max; i++) {
12593 if (msleep_interruptible(1000))
12600 /* Only test the commonly used registers */
12601 static int tg3_test_registers(struct tg3 *tp)
12603 int i, is_5705, is_5750;
12604 u32 offset, read_mask, write_mask, val, save_val, read_val;
12608 #define TG3_FL_5705 0x1
12609 #define TG3_FL_NOT_5705 0x2
12610 #define TG3_FL_NOT_5788 0x4
12611 #define TG3_FL_NOT_5750 0x8
12615 /* MAC Control Registers */
12616 { MAC_MODE, TG3_FL_NOT_5705,
12617 0x00000000, 0x00ef6f8c },
12618 { MAC_MODE, TG3_FL_5705,
12619 0x00000000, 0x01ef6b8c },
12620 { MAC_STATUS, TG3_FL_NOT_5705,
12621 0x03800107, 0x00000000 },
12622 { MAC_STATUS, TG3_FL_5705,
12623 0x03800100, 0x00000000 },
12624 { MAC_ADDR_0_HIGH, 0x0000,
12625 0x00000000, 0x0000ffff },
12626 { MAC_ADDR_0_LOW, 0x0000,
12627 0x00000000, 0xffffffff },
12628 { MAC_RX_MTU_SIZE, 0x0000,
12629 0x00000000, 0x0000ffff },
12630 { MAC_TX_MODE, 0x0000,
12631 0x00000000, 0x00000070 },
12632 { MAC_TX_LENGTHS, 0x0000,
12633 0x00000000, 0x00003fff },
12634 { MAC_RX_MODE, TG3_FL_NOT_5705,
12635 0x00000000, 0x000007fc },
12636 { MAC_RX_MODE, TG3_FL_5705,
12637 0x00000000, 0x000007dc },
12638 { MAC_HASH_REG_0, 0x0000,
12639 0x00000000, 0xffffffff },
12640 { MAC_HASH_REG_1, 0x0000,
12641 0x00000000, 0xffffffff },
12642 { MAC_HASH_REG_2, 0x0000,
12643 0x00000000, 0xffffffff },
12644 { MAC_HASH_REG_3, 0x0000,
12645 0x00000000, 0xffffffff },
12647 /* Receive Data and Receive BD Initiator Control Registers. */
12648 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12649 0x00000000, 0xffffffff },
12650 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12651 0x00000000, 0xffffffff },
12652 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12653 0x00000000, 0x00000003 },
12654 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12655 0x00000000, 0xffffffff },
12656 { RCVDBDI_STD_BD+0, 0x0000,
12657 0x00000000, 0xffffffff },
12658 { RCVDBDI_STD_BD+4, 0x0000,
12659 0x00000000, 0xffffffff },
12660 { RCVDBDI_STD_BD+8, 0x0000,
12661 0x00000000, 0xffff0002 },
12662 { RCVDBDI_STD_BD+0xc, 0x0000,
12663 0x00000000, 0xffffffff },
12665 /* Receive BD Initiator Control Registers. */
12666 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12667 0x00000000, 0xffffffff },
12668 { RCVBDI_STD_THRESH, TG3_FL_5705,
12669 0x00000000, 0x000003ff },
12670 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12671 0x00000000, 0xffffffff },
12673 /* Host Coalescing Control Registers. */
12674 { HOSTCC_MODE, TG3_FL_NOT_5705,
12675 0x00000000, 0x00000004 },
12676 { HOSTCC_MODE, TG3_FL_5705,
12677 0x00000000, 0x000000f6 },
12678 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12679 0x00000000, 0xffffffff },
12680 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12681 0x00000000, 0x000003ff },
12682 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12683 0x00000000, 0xffffffff },
12684 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12685 0x00000000, 0x000003ff },
12686 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12687 0x00000000, 0xffffffff },
12688 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12689 0x00000000, 0x000000ff },
12690 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12691 0x00000000, 0xffffffff },
12692 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12693 0x00000000, 0x000000ff },
12694 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12695 0x00000000, 0xffffffff },
12696 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12697 0x00000000, 0xffffffff },
12698 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12699 0x00000000, 0xffffffff },
12700 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12701 0x00000000, 0x000000ff },
12702 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12703 0x00000000, 0xffffffff },
12704 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12705 0x00000000, 0x000000ff },
12706 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12707 0x00000000, 0xffffffff },
12708 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12709 0x00000000, 0xffffffff },
12710 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12711 0x00000000, 0xffffffff },
12712 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12713 0x00000000, 0xffffffff },
12714 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12715 0x00000000, 0xffffffff },
12716 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12717 0xffffffff, 0x00000000 },
12718 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12719 0xffffffff, 0x00000000 },
12721 /* Buffer Manager Control Registers. */
12722 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12723 0x00000000, 0x007fff80 },
12724 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12725 0x00000000, 0x007fffff },
12726 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12727 0x00000000, 0x0000003f },
12728 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12729 0x00000000, 0x000001ff },
12730 { BUFMGR_MB_HIGH_WATER, 0x0000,
12731 0x00000000, 0x000001ff },
12732 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12733 0xffffffff, 0x00000000 },
12734 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12735 0xffffffff, 0x00000000 },
12737 /* Mailbox Registers */
12738 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12739 0x00000000, 0x000001ff },
12740 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12741 0x00000000, 0x000001ff },
12742 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12743 0x00000000, 0x000007ff },
12744 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12745 0x00000000, 0x000001ff },
12747 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12750 is_5705 = is_5750 = 0;
12751 if (tg3_flag(tp, 5705_PLUS)) {
12753 if (tg3_flag(tp, 5750_PLUS))
12757 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12758 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12761 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12764 if (tg3_flag(tp, IS_5788) &&
12765 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12768 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12771 offset = (u32) reg_tbl[i].offset;
12772 read_mask = reg_tbl[i].read_mask;
12773 write_mask = reg_tbl[i].write_mask;
12775 /* Save the original register content */
12776 save_val = tr32(offset);
12778 /* Determine the read-only value. */
12779 read_val = save_val & read_mask;
12781 /* Write zero to the register, then make sure the read-only bits
12782 * are not changed and the read/write bits are all zeros.
12786 val = tr32(offset);
12788 /* Test the read-only and read/write bits. */
12789 if (((val & read_mask) != read_val) || (val & write_mask))
12792 /* Write ones to all the bits defined by RdMask and WrMask, then
12793 * make sure the read-only bits are not changed and the
12794 * read/write bits are all ones.
12796 tw32(offset, read_mask | write_mask);
12798 val = tr32(offset);
12800 /* Test the read-only bits. */
12801 if ((val & read_mask) != read_val)
12804 /* Test the read/write bits. */
12805 if ((val & write_mask) != write_mask)
12808 tw32(offset, save_val);
12814 if (netif_msg_hw(tp))
12815 netdev_err(tp->dev,
12816 "Register test failed at offset %x\n", offset);
12817 tw32(offset, save_val);
12821 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12823 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12827 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12828 for (j = 0; j < len; j += 4) {
12831 tg3_write_mem(tp, offset + j, test_pattern[i]);
12832 tg3_read_mem(tp, offset + j, &val);
12833 if (val != test_pattern[i])
12840 static int tg3_test_memory(struct tg3 *tp)
12842 static struct mem_entry {
12845 } mem_tbl_570x[] = {
12846 { 0x00000000, 0x00b50},
12847 { 0x00002000, 0x1c000},
12848 { 0xffffffff, 0x00000}
12849 }, mem_tbl_5705[] = {
12850 { 0x00000100, 0x0000c},
12851 { 0x00000200, 0x00008},
12852 { 0x00004000, 0x00800},
12853 { 0x00006000, 0x01000},
12854 { 0x00008000, 0x02000},
12855 { 0x00010000, 0x0e000},
12856 { 0xffffffff, 0x00000}
12857 }, mem_tbl_5755[] = {
12858 { 0x00000200, 0x00008},
12859 { 0x00004000, 0x00800},
12860 { 0x00006000, 0x00800},
12861 { 0x00008000, 0x02000},
12862 { 0x00010000, 0x0c000},
12863 { 0xffffffff, 0x00000}
12864 }, mem_tbl_5906[] = {
12865 { 0x00000200, 0x00008},
12866 { 0x00004000, 0x00400},
12867 { 0x00006000, 0x00400},
12868 { 0x00008000, 0x01000},
12869 { 0x00010000, 0x01000},
12870 { 0xffffffff, 0x00000}
12871 }, mem_tbl_5717[] = {
12872 { 0x00000200, 0x00008},
12873 { 0x00010000, 0x0a000},
12874 { 0x00020000, 0x13c00},
12875 { 0xffffffff, 0x00000}
12876 }, mem_tbl_57765[] = {
12877 { 0x00000200, 0x00008},
12878 { 0x00004000, 0x00800},
12879 { 0x00006000, 0x09800},
12880 { 0x00010000, 0x0a000},
12881 { 0xffffffff, 0x00000}
12883 struct mem_entry *mem_tbl;
12887 if (tg3_flag(tp, 5717_PLUS))
12888 mem_tbl = mem_tbl_5717;
12889 else if (tg3_flag(tp, 57765_CLASS) ||
12890 tg3_asic_rev(tp) == ASIC_REV_5762)
12891 mem_tbl = mem_tbl_57765;
12892 else if (tg3_flag(tp, 5755_PLUS))
12893 mem_tbl = mem_tbl_5755;
12894 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12895 mem_tbl = mem_tbl_5906;
12896 else if (tg3_flag(tp, 5705_PLUS))
12897 mem_tbl = mem_tbl_5705;
12899 mem_tbl = mem_tbl_570x;
12901 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12902 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12910 #define TG3_TSO_MSS 500
12912 #define TG3_TSO_IP_HDR_LEN 20
12913 #define TG3_TSO_TCP_HDR_LEN 20
12914 #define TG3_TSO_TCP_OPT_LEN 12
12916 static const u8 tg3_tso_header[] = {
12918 0x45, 0x00, 0x00, 0x00,
12919 0x00, 0x00, 0x40, 0x00,
12920 0x40, 0x06, 0x00, 0x00,
12921 0x0a, 0x00, 0x00, 0x01,
12922 0x0a, 0x00, 0x00, 0x02,
12923 0x0d, 0x00, 0xe0, 0x00,
12924 0x00, 0x00, 0x01, 0x00,
12925 0x00, 0x00, 0x02, 0x00,
12926 0x80, 0x10, 0x10, 0x00,
12927 0x14, 0x09, 0x00, 0x00,
12928 0x01, 0x01, 0x08, 0x0a,
12929 0x11, 0x11, 0x11, 0x11,
12930 0x11, 0x11, 0x11, 0x11,
12933 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12935 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12936 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12938 struct sk_buff *skb;
12939 u8 *tx_data, *rx_data;
12941 int num_pkts, tx_len, rx_len, i, err;
12942 struct tg3_rx_buffer_desc *desc;
12943 struct tg3_napi *tnapi, *rnapi;
12944 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12946 tnapi = &tp->napi[0];
12947 rnapi = &tp->napi[0];
12948 if (tp->irq_cnt > 1) {
12949 if (tg3_flag(tp, ENABLE_RSS))
12950 rnapi = &tp->napi[1];
12951 if (tg3_flag(tp, ENABLE_TSS))
12952 tnapi = &tp->napi[1];
12954 coal_now = tnapi->coal_now | rnapi->coal_now;
12959 skb = netdev_alloc_skb(tp->dev, tx_len);
12963 tx_data = skb_put(skb, tx_len);
12964 memcpy(tx_data, tp->dev->dev_addr, 6);
12965 memset(tx_data + 6, 0x0, 8);
12967 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12969 if (tso_loopback) {
12970 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12972 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12973 TG3_TSO_TCP_OPT_LEN;
12975 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12976 sizeof(tg3_tso_header));
12979 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12980 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12982 /* Set the total length field in the IP header */
12983 iph->tot_len = htons((u16)(mss + hdr_len));
12985 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12986 TXD_FLAG_CPU_POST_DMA);
12988 if (tg3_flag(tp, HW_TSO_1) ||
12989 tg3_flag(tp, HW_TSO_2) ||
12990 tg3_flag(tp, HW_TSO_3)) {
12992 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12993 th = (struct tcphdr *)&tx_data[val];
12996 base_flags |= TXD_FLAG_TCPUDP_CSUM;
12998 if (tg3_flag(tp, HW_TSO_3)) {
12999 mss |= (hdr_len & 0xc) << 12;
13000 if (hdr_len & 0x10)
13001 base_flags |= 0x00000010;
13002 base_flags |= (hdr_len & 0x3e0) << 5;
13003 } else if (tg3_flag(tp, HW_TSO_2))
13004 mss |= hdr_len << 9;
13005 else if (tg3_flag(tp, HW_TSO_1) ||
13006 tg3_asic_rev(tp) == ASIC_REV_5705) {
13007 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13009 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13012 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13015 data_off = ETH_HLEN;
13017 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13018 tx_len > VLAN_ETH_FRAME_LEN)
13019 base_flags |= TXD_FLAG_JMB_PKT;
13022 for (i = data_off; i < tx_len; i++)
13023 tx_data[i] = (u8) (i & 0xff);
13025 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13026 if (pci_dma_mapping_error(tp->pdev, map)) {
13027 dev_kfree_skb(skb);
13031 val = tnapi->tx_prod;
13032 tnapi->tx_buffers[val].skb = skb;
13033 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13035 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13040 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13042 budget = tg3_tx_avail(tnapi);
13043 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13044 base_flags | TXD_FLAG_END, mss, 0)) {
13045 tnapi->tx_buffers[val].skb = NULL;
13046 dev_kfree_skb(skb);
13052 /* Sync BD data before updating mailbox */
13055 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13056 tr32_mailbox(tnapi->prodmbox);
13060 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13061 for (i = 0; i < 35; i++) {
13062 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13067 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13068 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13069 if ((tx_idx == tnapi->tx_prod) &&
13070 (rx_idx == (rx_start_idx + num_pkts)))
13074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13075 dev_kfree_skb(skb);
13077 if (tx_idx != tnapi->tx_prod)
13080 if (rx_idx != rx_start_idx + num_pkts)
13084 while (rx_idx != rx_start_idx) {
13085 desc = &rnapi->rx_rcb[rx_start_idx++];
13086 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13087 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13089 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13090 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13093 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13096 if (!tso_loopback) {
13097 if (rx_len != tx_len)
13100 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13101 if (opaque_key != RXD_OPAQUE_RING_STD)
13104 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13107 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13108 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13109 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13113 if (opaque_key == RXD_OPAQUE_RING_STD) {
13114 rx_data = tpr->rx_std_buffers[desc_idx].data;
13115 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13117 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13118 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13119 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13124 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13125 PCI_DMA_FROMDEVICE);
13127 rx_data += TG3_RX_OFFSET(tp);
13128 for (i = data_off; i < rx_len; i++, val++) {
13129 if (*(rx_data + i) != (u8) (val & 0xff))
13136 /* tg3_free_rings will unmap and free the rx_data */
13141 #define TG3_STD_LOOPBACK_FAILED 1
13142 #define TG3_JMB_LOOPBACK_FAILED 2
13143 #define TG3_TSO_LOOPBACK_FAILED 4
13144 #define TG3_LOOPBACK_FAILED \
13145 (TG3_STD_LOOPBACK_FAILED | \
13146 TG3_JMB_LOOPBACK_FAILED | \
13147 TG3_TSO_LOOPBACK_FAILED)
13149 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13153 u32 jmb_pkt_sz = 9000;
13156 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13158 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13159 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13161 if (!netif_running(tp->dev)) {
13162 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13163 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13165 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13169 err = tg3_reset_hw(tp, true);
13171 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13172 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13174 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13178 if (tg3_flag(tp, ENABLE_RSS)) {
13181 /* Reroute all rx packets to the 1st queue */
13182 for (i = MAC_RSS_INDIR_TBL_0;
13183 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13187 /* HW errata - mac loopback fails in some cases on 5780.
13188 * Normal traffic and PHY loopback are not affected by
13189 * errata. Also, the MAC loopback test is deprecated for
13190 * all newer ASIC revisions.
13192 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13193 !tg3_flag(tp, CPMU_PRESENT)) {
13194 tg3_mac_loopback(tp, true);
13196 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13197 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13199 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13200 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13201 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13203 tg3_mac_loopback(tp, false);
13206 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13207 !tg3_flag(tp, USE_PHYLIB)) {
13210 tg3_phy_lpbk_set(tp, 0, false);
13212 /* Wait for link */
13213 for (i = 0; i < 100; i++) {
13214 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13219 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13220 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13221 if (tg3_flag(tp, TSO_CAPABLE) &&
13222 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13223 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13224 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13225 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13226 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13229 tg3_phy_lpbk_set(tp, 0, true);
13231 /* All link indications report up, but the hardware
13232 * isn't really ready for about 20 msec. Double it
13237 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13238 data[TG3_EXT_LOOPB_TEST] |=
13239 TG3_STD_LOOPBACK_FAILED;
13240 if (tg3_flag(tp, TSO_CAPABLE) &&
13241 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13242 data[TG3_EXT_LOOPB_TEST] |=
13243 TG3_TSO_LOOPBACK_FAILED;
13244 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13245 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13246 data[TG3_EXT_LOOPB_TEST] |=
13247 TG3_JMB_LOOPBACK_FAILED;
13250 /* Re-enable gphy autopowerdown. */
13251 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13252 tg3_phy_toggle_apd(tp, true);
13255 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13256 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13259 tp->phy_flags |= eee_cap;
13264 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13267 struct tg3 *tp = netdev_priv(dev);
13268 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13270 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13271 tg3_power_up(tp)) {
13272 etest->flags |= ETH_TEST_FL_FAILED;
13273 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13277 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13279 if (tg3_test_nvram(tp) != 0) {
13280 etest->flags |= ETH_TEST_FL_FAILED;
13281 data[TG3_NVRAM_TEST] = 1;
13283 if (!doextlpbk && tg3_test_link(tp)) {
13284 etest->flags |= ETH_TEST_FL_FAILED;
13285 data[TG3_LINK_TEST] = 1;
13287 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13288 int err, err2 = 0, irq_sync = 0;
13290 if (netif_running(dev)) {
13292 tg3_netif_stop(tp);
13296 tg3_full_lock(tp, irq_sync);
13297 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13298 err = tg3_nvram_lock(tp);
13299 tg3_halt_cpu(tp, RX_CPU_BASE);
13300 if (!tg3_flag(tp, 5705_PLUS))
13301 tg3_halt_cpu(tp, TX_CPU_BASE);
13303 tg3_nvram_unlock(tp);
13305 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13308 if (tg3_test_registers(tp) != 0) {
13309 etest->flags |= ETH_TEST_FL_FAILED;
13310 data[TG3_REGISTER_TEST] = 1;
13313 if (tg3_test_memory(tp) != 0) {
13314 etest->flags |= ETH_TEST_FL_FAILED;
13315 data[TG3_MEMORY_TEST] = 1;
13319 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13321 if (tg3_test_loopback(tp, data, doextlpbk))
13322 etest->flags |= ETH_TEST_FL_FAILED;
13324 tg3_full_unlock(tp);
13326 if (tg3_test_interrupt(tp) != 0) {
13327 etest->flags |= ETH_TEST_FL_FAILED;
13328 data[TG3_INTERRUPT_TEST] = 1;
13331 tg3_full_lock(tp, 0);
13333 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13334 if (netif_running(dev)) {
13335 tg3_flag_set(tp, INIT_COMPLETE);
13336 err2 = tg3_restart_hw(tp, true);
13338 tg3_netif_start(tp);
13341 tg3_full_unlock(tp);
13343 if (irq_sync && !err2)
13346 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13347 tg3_power_down(tp);
13351 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13352 struct ifreq *ifr, int cmd)
13354 struct tg3 *tp = netdev_priv(dev);
13355 struct hwtstamp_config stmpconf;
13357 if (!tg3_flag(tp, PTP_CAPABLE))
13360 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13363 if (stmpconf.flags)
13366 switch (stmpconf.tx_type) {
13367 case HWTSTAMP_TX_ON:
13368 tg3_flag_set(tp, TX_TSTAMP_EN);
13370 case HWTSTAMP_TX_OFF:
13371 tg3_flag_clear(tp, TX_TSTAMP_EN);
13377 switch (stmpconf.rx_filter) {
13378 case HWTSTAMP_FILTER_NONE:
13381 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13382 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13383 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13385 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13386 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13387 TG3_RX_PTP_CTL_SYNC_EVNT;
13389 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13390 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13391 TG3_RX_PTP_CTL_DELAY_REQ;
13393 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13394 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13395 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13397 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13398 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13399 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13401 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13402 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13403 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13405 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13406 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13407 TG3_RX_PTP_CTL_SYNC_EVNT;
13409 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13410 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13411 TG3_RX_PTP_CTL_SYNC_EVNT;
13413 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13414 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13415 TG3_RX_PTP_CTL_SYNC_EVNT;
13417 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13418 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13419 TG3_RX_PTP_CTL_DELAY_REQ;
13421 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13422 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13423 TG3_RX_PTP_CTL_DELAY_REQ;
13425 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13426 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13427 TG3_RX_PTP_CTL_DELAY_REQ;
13433 if (netif_running(dev) && tp->rxptpctl)
13434 tw32(TG3_RX_PTP_CTL,
13435 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13437 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13441 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13443 struct mii_ioctl_data *data = if_mii(ifr);
13444 struct tg3 *tp = netdev_priv(dev);
13447 if (tg3_flag(tp, USE_PHYLIB)) {
13448 struct phy_device *phydev;
13449 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13451 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13452 return phy_mii_ioctl(phydev, ifr, cmd);
13457 data->phy_id = tp->phy_addr;
13460 case SIOCGMIIREG: {
13463 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13464 break; /* We have no PHY */
13466 if (!netif_running(dev))
13469 spin_lock_bh(&tp->lock);
13470 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13471 data->reg_num & 0x1f, &mii_regval);
13472 spin_unlock_bh(&tp->lock);
13474 data->val_out = mii_regval;
13480 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13481 break; /* We have no PHY */
13483 if (!netif_running(dev))
13486 spin_lock_bh(&tp->lock);
13487 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13488 data->reg_num & 0x1f, data->val_in);
13489 spin_unlock_bh(&tp->lock);
13493 case SIOCSHWTSTAMP:
13494 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13500 return -EOPNOTSUPP;
13503 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13505 struct tg3 *tp = netdev_priv(dev);
13507 memcpy(ec, &tp->coal, sizeof(*ec));
13511 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13513 struct tg3 *tp = netdev_priv(dev);
13514 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13515 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13517 if (!tg3_flag(tp, 5705_PLUS)) {
13518 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13519 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13520 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13521 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13524 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13525 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13526 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13527 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13528 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13529 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13530 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13531 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13532 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13533 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13536 /* No rx interrupts will be generated if both are zero */
13537 if ((ec->rx_coalesce_usecs == 0) &&
13538 (ec->rx_max_coalesced_frames == 0))
13541 /* No tx interrupts will be generated if both are zero */
13542 if ((ec->tx_coalesce_usecs == 0) &&
13543 (ec->tx_max_coalesced_frames == 0))
13546 /* Only copy relevant parameters, ignore all others. */
13547 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13548 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13549 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13550 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13551 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13552 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13553 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13554 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13555 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13557 if (netif_running(dev)) {
13558 tg3_full_lock(tp, 0);
13559 __tg3_set_coalesce(tp, &tp->coal);
13560 tg3_full_unlock(tp);
13565 static const struct ethtool_ops tg3_ethtool_ops = {
13566 .get_settings = tg3_get_settings,
13567 .set_settings = tg3_set_settings,
13568 .get_drvinfo = tg3_get_drvinfo,
13569 .get_regs_len = tg3_get_regs_len,
13570 .get_regs = tg3_get_regs,
13571 .get_wol = tg3_get_wol,
13572 .set_wol = tg3_set_wol,
13573 .get_msglevel = tg3_get_msglevel,
13574 .set_msglevel = tg3_set_msglevel,
13575 .nway_reset = tg3_nway_reset,
13576 .get_link = ethtool_op_get_link,
13577 .get_eeprom_len = tg3_get_eeprom_len,
13578 .get_eeprom = tg3_get_eeprom,
13579 .set_eeprom = tg3_set_eeprom,
13580 .get_ringparam = tg3_get_ringparam,
13581 .set_ringparam = tg3_set_ringparam,
13582 .get_pauseparam = tg3_get_pauseparam,
13583 .set_pauseparam = tg3_set_pauseparam,
13584 .self_test = tg3_self_test,
13585 .get_strings = tg3_get_strings,
13586 .set_phys_id = tg3_set_phys_id,
13587 .get_ethtool_stats = tg3_get_ethtool_stats,
13588 .get_coalesce = tg3_get_coalesce,
13589 .set_coalesce = tg3_set_coalesce,
13590 .get_sset_count = tg3_get_sset_count,
13591 .get_rxnfc = tg3_get_rxnfc,
13592 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13593 .get_rxfh_indir = tg3_get_rxfh_indir,
13594 .set_rxfh_indir = tg3_set_rxfh_indir,
13595 .get_channels = tg3_get_channels,
13596 .set_channels = tg3_set_channels,
13597 .get_ts_info = tg3_get_ts_info,
13600 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13601 struct rtnl_link_stats64 *stats)
13603 struct tg3 *tp = netdev_priv(dev);
13605 spin_lock_bh(&tp->lock);
13606 if (!tp->hw_stats) {
13607 spin_unlock_bh(&tp->lock);
13608 return &tp->net_stats_prev;
13611 tg3_get_nstats(tp, stats);
13612 spin_unlock_bh(&tp->lock);
13617 static void tg3_set_rx_mode(struct net_device *dev)
13619 struct tg3 *tp = netdev_priv(dev);
13621 if (!netif_running(dev))
13624 tg3_full_lock(tp, 0);
13625 __tg3_set_rx_mode(dev);
13626 tg3_full_unlock(tp);
13629 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13632 dev->mtu = new_mtu;
13634 if (new_mtu > ETH_DATA_LEN) {
13635 if (tg3_flag(tp, 5780_CLASS)) {
13636 netdev_update_features(dev);
13637 tg3_flag_clear(tp, TSO_CAPABLE);
13639 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13642 if (tg3_flag(tp, 5780_CLASS)) {
13643 tg3_flag_set(tp, TSO_CAPABLE);
13644 netdev_update_features(dev);
13646 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13650 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13652 struct tg3 *tp = netdev_priv(dev);
13654 bool reset_phy = false;
13656 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13659 if (!netif_running(dev)) {
13660 /* We'll just catch it later when the
13663 tg3_set_mtu(dev, tp, new_mtu);
13669 tg3_netif_stop(tp);
13671 tg3_full_lock(tp, 1);
13673 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13675 tg3_set_mtu(dev, tp, new_mtu);
13677 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13678 * breaks all requests to 256 bytes.
13680 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13683 err = tg3_restart_hw(tp, reset_phy);
13686 tg3_netif_start(tp);
13688 tg3_full_unlock(tp);
13696 static const struct net_device_ops tg3_netdev_ops = {
13697 .ndo_open = tg3_open,
13698 .ndo_stop = tg3_close,
13699 .ndo_start_xmit = tg3_start_xmit,
13700 .ndo_get_stats64 = tg3_get_stats64,
13701 .ndo_validate_addr = eth_validate_addr,
13702 .ndo_set_rx_mode = tg3_set_rx_mode,
13703 .ndo_set_mac_address = tg3_set_mac_addr,
13704 .ndo_do_ioctl = tg3_ioctl,
13705 .ndo_tx_timeout = tg3_tx_timeout,
13706 .ndo_change_mtu = tg3_change_mtu,
13707 .ndo_fix_features = tg3_fix_features,
13708 .ndo_set_features = tg3_set_features,
13709 #ifdef CONFIG_NET_POLL_CONTROLLER
13710 .ndo_poll_controller = tg3_poll_controller,
13714 static void tg3_get_eeprom_size(struct tg3 *tp)
13716 u32 cursize, val, magic;
13718 tp->nvram_size = EEPROM_CHIP_SIZE;
13720 if (tg3_nvram_read(tp, 0, &magic) != 0)
13723 if ((magic != TG3_EEPROM_MAGIC) &&
13724 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13725 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13729 * Size the chip by reading offsets at increasing powers of two.
13730 * When we encounter our validation signature, we know the addressing
13731 * has wrapped around, and thus have our chip size.
13735 while (cursize < tp->nvram_size) {
13736 if (tg3_nvram_read(tp, cursize, &val) != 0)
13745 tp->nvram_size = cursize;
13748 static void tg3_get_nvram_size(struct tg3 *tp)
13752 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13755 /* Selfboot format */
13756 if (val != TG3_EEPROM_MAGIC) {
13757 tg3_get_eeprom_size(tp);
13761 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13763 /* This is confusing. We want to operate on the
13764 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13765 * call will read from NVRAM and byteswap the data
13766 * according to the byteswapping settings for all
13767 * other register accesses. This ensures the data we
13768 * want will always reside in the lower 16-bits.
13769 * However, the data in NVRAM is in LE format, which
13770 * means the data from the NVRAM read will always be
13771 * opposite the endianness of the CPU. The 16-bit
13772 * byteswap then brings the data to CPU endianness.
13774 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13778 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13781 static void tg3_get_nvram_info(struct tg3 *tp)
13785 nvcfg1 = tr32(NVRAM_CFG1);
13786 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13787 tg3_flag_set(tp, FLASH);
13789 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13790 tw32(NVRAM_CFG1, nvcfg1);
13793 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13794 tg3_flag(tp, 5780_CLASS)) {
13795 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13796 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13797 tp->nvram_jedecnum = JEDEC_ATMEL;
13798 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13799 tg3_flag_set(tp, NVRAM_BUFFERED);
13801 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13802 tp->nvram_jedecnum = JEDEC_ATMEL;
13803 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13805 case FLASH_VENDOR_ATMEL_EEPROM:
13806 tp->nvram_jedecnum = JEDEC_ATMEL;
13807 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13808 tg3_flag_set(tp, NVRAM_BUFFERED);
13810 case FLASH_VENDOR_ST:
13811 tp->nvram_jedecnum = JEDEC_ST;
13812 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13813 tg3_flag_set(tp, NVRAM_BUFFERED);
13815 case FLASH_VENDOR_SAIFUN:
13816 tp->nvram_jedecnum = JEDEC_SAIFUN;
13817 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13819 case FLASH_VENDOR_SST_SMALL:
13820 case FLASH_VENDOR_SST_LARGE:
13821 tp->nvram_jedecnum = JEDEC_SST;
13822 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13826 tp->nvram_jedecnum = JEDEC_ATMEL;
13827 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13828 tg3_flag_set(tp, NVRAM_BUFFERED);
13832 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13834 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13835 case FLASH_5752PAGE_SIZE_256:
13836 tp->nvram_pagesize = 256;
13838 case FLASH_5752PAGE_SIZE_512:
13839 tp->nvram_pagesize = 512;
13841 case FLASH_5752PAGE_SIZE_1K:
13842 tp->nvram_pagesize = 1024;
13844 case FLASH_5752PAGE_SIZE_2K:
13845 tp->nvram_pagesize = 2048;
13847 case FLASH_5752PAGE_SIZE_4K:
13848 tp->nvram_pagesize = 4096;
13850 case FLASH_5752PAGE_SIZE_264:
13851 tp->nvram_pagesize = 264;
13853 case FLASH_5752PAGE_SIZE_528:
13854 tp->nvram_pagesize = 528;
13859 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13863 nvcfg1 = tr32(NVRAM_CFG1);
13865 /* NVRAM protection for TPM */
13866 if (nvcfg1 & (1 << 27))
13867 tg3_flag_set(tp, PROTECTED_NVRAM);
13869 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13870 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13871 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13872 tp->nvram_jedecnum = JEDEC_ATMEL;
13873 tg3_flag_set(tp, NVRAM_BUFFERED);
13875 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13876 tp->nvram_jedecnum = JEDEC_ATMEL;
13877 tg3_flag_set(tp, NVRAM_BUFFERED);
13878 tg3_flag_set(tp, FLASH);
13880 case FLASH_5752VENDOR_ST_M45PE10:
13881 case FLASH_5752VENDOR_ST_M45PE20:
13882 case FLASH_5752VENDOR_ST_M45PE40:
13883 tp->nvram_jedecnum = JEDEC_ST;
13884 tg3_flag_set(tp, NVRAM_BUFFERED);
13885 tg3_flag_set(tp, FLASH);
13889 if (tg3_flag(tp, FLASH)) {
13890 tg3_nvram_get_pagesize(tp, nvcfg1);
13892 /* For eeprom, set pagesize to maximum eeprom size */
13893 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13895 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13896 tw32(NVRAM_CFG1, nvcfg1);
13900 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13902 u32 nvcfg1, protect = 0;
13904 nvcfg1 = tr32(NVRAM_CFG1);
13906 /* NVRAM protection for TPM */
13907 if (nvcfg1 & (1 << 27)) {
13908 tg3_flag_set(tp, PROTECTED_NVRAM);
13912 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13914 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13915 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13916 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13917 case FLASH_5755VENDOR_ATMEL_FLASH_5:
13918 tp->nvram_jedecnum = JEDEC_ATMEL;
13919 tg3_flag_set(tp, NVRAM_BUFFERED);
13920 tg3_flag_set(tp, FLASH);
13921 tp->nvram_pagesize = 264;
13922 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13923 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13924 tp->nvram_size = (protect ? 0x3e200 :
13925 TG3_NVRAM_SIZE_512KB);
13926 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13927 tp->nvram_size = (protect ? 0x1f200 :
13928 TG3_NVRAM_SIZE_256KB);
13930 tp->nvram_size = (protect ? 0x1f200 :
13931 TG3_NVRAM_SIZE_128KB);
13933 case FLASH_5752VENDOR_ST_M45PE10:
13934 case FLASH_5752VENDOR_ST_M45PE20:
13935 case FLASH_5752VENDOR_ST_M45PE40:
13936 tp->nvram_jedecnum = JEDEC_ST;
13937 tg3_flag_set(tp, NVRAM_BUFFERED);
13938 tg3_flag_set(tp, FLASH);
13939 tp->nvram_pagesize = 256;
13940 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13941 tp->nvram_size = (protect ?
13942 TG3_NVRAM_SIZE_64KB :
13943 TG3_NVRAM_SIZE_128KB);
13944 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13945 tp->nvram_size = (protect ?
13946 TG3_NVRAM_SIZE_64KB :
13947 TG3_NVRAM_SIZE_256KB);
13949 tp->nvram_size = (protect ?
13950 TG3_NVRAM_SIZE_128KB :
13951 TG3_NVRAM_SIZE_512KB);
13956 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13960 nvcfg1 = tr32(NVRAM_CFG1);
13962 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13963 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13964 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13965 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13966 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13967 tp->nvram_jedecnum = JEDEC_ATMEL;
13968 tg3_flag_set(tp, NVRAM_BUFFERED);
13969 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13971 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13972 tw32(NVRAM_CFG1, nvcfg1);
13974 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13975 case FLASH_5755VENDOR_ATMEL_FLASH_1:
13976 case FLASH_5755VENDOR_ATMEL_FLASH_2:
13977 case FLASH_5755VENDOR_ATMEL_FLASH_3:
13978 tp->nvram_jedecnum = JEDEC_ATMEL;
13979 tg3_flag_set(tp, NVRAM_BUFFERED);
13980 tg3_flag_set(tp, FLASH);
13981 tp->nvram_pagesize = 264;
13983 case FLASH_5752VENDOR_ST_M45PE10:
13984 case FLASH_5752VENDOR_ST_M45PE20:
13985 case FLASH_5752VENDOR_ST_M45PE40:
13986 tp->nvram_jedecnum = JEDEC_ST;
13987 tg3_flag_set(tp, NVRAM_BUFFERED);
13988 tg3_flag_set(tp, FLASH);
13989 tp->nvram_pagesize = 256;
13994 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13996 u32 nvcfg1, protect = 0;
13998 nvcfg1 = tr32(NVRAM_CFG1);
14000 /* NVRAM protection for TPM */
14001 if (nvcfg1 & (1 << 27)) {
14002 tg3_flag_set(tp, PROTECTED_NVRAM);
14006 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14008 case FLASH_5761VENDOR_ATMEL_ADB021D:
14009 case FLASH_5761VENDOR_ATMEL_ADB041D:
14010 case FLASH_5761VENDOR_ATMEL_ADB081D:
14011 case FLASH_5761VENDOR_ATMEL_ADB161D:
14012 case FLASH_5761VENDOR_ATMEL_MDB021D:
14013 case FLASH_5761VENDOR_ATMEL_MDB041D:
14014 case FLASH_5761VENDOR_ATMEL_MDB081D:
14015 case FLASH_5761VENDOR_ATMEL_MDB161D:
14016 tp->nvram_jedecnum = JEDEC_ATMEL;
14017 tg3_flag_set(tp, NVRAM_BUFFERED);
14018 tg3_flag_set(tp, FLASH);
14019 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14020 tp->nvram_pagesize = 256;
14022 case FLASH_5761VENDOR_ST_A_M45PE20:
14023 case FLASH_5761VENDOR_ST_A_M45PE40:
14024 case FLASH_5761VENDOR_ST_A_M45PE80:
14025 case FLASH_5761VENDOR_ST_A_M45PE16:
14026 case FLASH_5761VENDOR_ST_M_M45PE20:
14027 case FLASH_5761VENDOR_ST_M_M45PE40:
14028 case FLASH_5761VENDOR_ST_M_M45PE80:
14029 case FLASH_5761VENDOR_ST_M_M45PE16:
14030 tp->nvram_jedecnum = JEDEC_ST;
14031 tg3_flag_set(tp, NVRAM_BUFFERED);
14032 tg3_flag_set(tp, FLASH);
14033 tp->nvram_pagesize = 256;
14038 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14041 case FLASH_5761VENDOR_ATMEL_ADB161D:
14042 case FLASH_5761VENDOR_ATMEL_MDB161D:
14043 case FLASH_5761VENDOR_ST_A_M45PE16:
14044 case FLASH_5761VENDOR_ST_M_M45PE16:
14045 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14047 case FLASH_5761VENDOR_ATMEL_ADB081D:
14048 case FLASH_5761VENDOR_ATMEL_MDB081D:
14049 case FLASH_5761VENDOR_ST_A_M45PE80:
14050 case FLASH_5761VENDOR_ST_M_M45PE80:
14051 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14053 case FLASH_5761VENDOR_ATMEL_ADB041D:
14054 case FLASH_5761VENDOR_ATMEL_MDB041D:
14055 case FLASH_5761VENDOR_ST_A_M45PE40:
14056 case FLASH_5761VENDOR_ST_M_M45PE40:
14057 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14059 case FLASH_5761VENDOR_ATMEL_ADB021D:
14060 case FLASH_5761VENDOR_ATMEL_MDB021D:
14061 case FLASH_5761VENDOR_ST_A_M45PE20:
14062 case FLASH_5761VENDOR_ST_M_M45PE20:
14063 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14069 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14071 tp->nvram_jedecnum = JEDEC_ATMEL;
14072 tg3_flag_set(tp, NVRAM_BUFFERED);
14073 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14076 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14080 nvcfg1 = tr32(NVRAM_CFG1);
14082 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14083 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14084 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14085 tp->nvram_jedecnum = JEDEC_ATMEL;
14086 tg3_flag_set(tp, NVRAM_BUFFERED);
14087 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14089 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14090 tw32(NVRAM_CFG1, nvcfg1);
14092 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14093 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14094 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14095 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14096 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14097 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14098 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14099 tp->nvram_jedecnum = JEDEC_ATMEL;
14100 tg3_flag_set(tp, NVRAM_BUFFERED);
14101 tg3_flag_set(tp, FLASH);
14103 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14104 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14105 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14106 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14107 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14109 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14110 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14111 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14113 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14114 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14115 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14119 case FLASH_5752VENDOR_ST_M45PE10:
14120 case FLASH_5752VENDOR_ST_M45PE20:
14121 case FLASH_5752VENDOR_ST_M45PE40:
14122 tp->nvram_jedecnum = JEDEC_ST;
14123 tg3_flag_set(tp, NVRAM_BUFFERED);
14124 tg3_flag_set(tp, FLASH);
14126 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14127 case FLASH_5752VENDOR_ST_M45PE10:
14128 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14130 case FLASH_5752VENDOR_ST_M45PE20:
14131 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14133 case FLASH_5752VENDOR_ST_M45PE40:
14134 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14139 tg3_flag_set(tp, NO_NVRAM);
14143 tg3_nvram_get_pagesize(tp, nvcfg1);
14144 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14145 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14149 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14153 nvcfg1 = tr32(NVRAM_CFG1);
14155 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14156 case FLASH_5717VENDOR_ATMEL_EEPROM:
14157 case FLASH_5717VENDOR_MICRO_EEPROM:
14158 tp->nvram_jedecnum = JEDEC_ATMEL;
14159 tg3_flag_set(tp, NVRAM_BUFFERED);
14160 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14162 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14163 tw32(NVRAM_CFG1, nvcfg1);
14165 case FLASH_5717VENDOR_ATMEL_MDB011D:
14166 case FLASH_5717VENDOR_ATMEL_ADB011B:
14167 case FLASH_5717VENDOR_ATMEL_ADB011D:
14168 case FLASH_5717VENDOR_ATMEL_MDB021D:
14169 case FLASH_5717VENDOR_ATMEL_ADB021B:
14170 case FLASH_5717VENDOR_ATMEL_ADB021D:
14171 case FLASH_5717VENDOR_ATMEL_45USPT:
14172 tp->nvram_jedecnum = JEDEC_ATMEL;
14173 tg3_flag_set(tp, NVRAM_BUFFERED);
14174 tg3_flag_set(tp, FLASH);
14176 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14177 case FLASH_5717VENDOR_ATMEL_MDB021D:
14178 /* Detect size with tg3_nvram_get_size() */
14180 case FLASH_5717VENDOR_ATMEL_ADB021B:
14181 case FLASH_5717VENDOR_ATMEL_ADB021D:
14182 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14185 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14189 case FLASH_5717VENDOR_ST_M_M25PE10:
14190 case FLASH_5717VENDOR_ST_A_M25PE10:
14191 case FLASH_5717VENDOR_ST_M_M45PE10:
14192 case FLASH_5717VENDOR_ST_A_M45PE10:
14193 case FLASH_5717VENDOR_ST_M_M25PE20:
14194 case FLASH_5717VENDOR_ST_A_M25PE20:
14195 case FLASH_5717VENDOR_ST_M_M45PE20:
14196 case FLASH_5717VENDOR_ST_A_M45PE20:
14197 case FLASH_5717VENDOR_ST_25USPT:
14198 case FLASH_5717VENDOR_ST_45USPT:
14199 tp->nvram_jedecnum = JEDEC_ST;
14200 tg3_flag_set(tp, NVRAM_BUFFERED);
14201 tg3_flag_set(tp, FLASH);
14203 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14204 case FLASH_5717VENDOR_ST_M_M25PE20:
14205 case FLASH_5717VENDOR_ST_M_M45PE20:
14206 /* Detect size with tg3_nvram_get_size() */
14208 case FLASH_5717VENDOR_ST_A_M25PE20:
14209 case FLASH_5717VENDOR_ST_A_M45PE20:
14210 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14213 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14218 tg3_flag_set(tp, NO_NVRAM);
14222 tg3_nvram_get_pagesize(tp, nvcfg1);
14223 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14224 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14227 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14229 u32 nvcfg1, nvmpinstrp;
14231 nvcfg1 = tr32(NVRAM_CFG1);
14232 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14234 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14235 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14236 tg3_flag_set(tp, NO_NVRAM);
14240 switch (nvmpinstrp) {
14241 case FLASH_5762_EEPROM_HD:
14242 nvmpinstrp = FLASH_5720_EEPROM_HD;
14244 case FLASH_5762_EEPROM_LD:
14245 nvmpinstrp = FLASH_5720_EEPROM_LD;
14247 case FLASH_5720VENDOR_M_ST_M45PE20:
14248 /* This pinstrap supports multiple sizes, so force it
14249 * to read the actual size from location 0xf0.
14251 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14256 switch (nvmpinstrp) {
14257 case FLASH_5720_EEPROM_HD:
14258 case FLASH_5720_EEPROM_LD:
14259 tp->nvram_jedecnum = JEDEC_ATMEL;
14260 tg3_flag_set(tp, NVRAM_BUFFERED);
14262 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14263 tw32(NVRAM_CFG1, nvcfg1);
14264 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14265 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14267 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14269 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14270 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14271 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14272 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14273 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14274 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14275 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14276 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14277 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14278 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14279 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14280 case FLASH_5720VENDOR_ATMEL_45USPT:
14281 tp->nvram_jedecnum = JEDEC_ATMEL;
14282 tg3_flag_set(tp, NVRAM_BUFFERED);
14283 tg3_flag_set(tp, FLASH);
14285 switch (nvmpinstrp) {
14286 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14287 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14288 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14289 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14291 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14292 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14293 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14294 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14296 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14297 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14298 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14301 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14302 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14306 case FLASH_5720VENDOR_M_ST_M25PE10:
14307 case FLASH_5720VENDOR_M_ST_M45PE10:
14308 case FLASH_5720VENDOR_A_ST_M25PE10:
14309 case FLASH_5720VENDOR_A_ST_M45PE10:
14310 case FLASH_5720VENDOR_M_ST_M25PE20:
14311 case FLASH_5720VENDOR_M_ST_M45PE20:
14312 case FLASH_5720VENDOR_A_ST_M25PE20:
14313 case FLASH_5720VENDOR_A_ST_M45PE20:
14314 case FLASH_5720VENDOR_M_ST_M25PE40:
14315 case FLASH_5720VENDOR_M_ST_M45PE40:
14316 case FLASH_5720VENDOR_A_ST_M25PE40:
14317 case FLASH_5720VENDOR_A_ST_M45PE40:
14318 case FLASH_5720VENDOR_M_ST_M25PE80:
14319 case FLASH_5720VENDOR_M_ST_M45PE80:
14320 case FLASH_5720VENDOR_A_ST_M25PE80:
14321 case FLASH_5720VENDOR_A_ST_M45PE80:
14322 case FLASH_5720VENDOR_ST_25USPT:
14323 case FLASH_5720VENDOR_ST_45USPT:
14324 tp->nvram_jedecnum = JEDEC_ST;
14325 tg3_flag_set(tp, NVRAM_BUFFERED);
14326 tg3_flag_set(tp, FLASH);
14328 switch (nvmpinstrp) {
14329 case FLASH_5720VENDOR_M_ST_M25PE20:
14330 case FLASH_5720VENDOR_M_ST_M45PE20:
14331 case FLASH_5720VENDOR_A_ST_M25PE20:
14332 case FLASH_5720VENDOR_A_ST_M45PE20:
14333 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14335 case FLASH_5720VENDOR_M_ST_M25PE40:
14336 case FLASH_5720VENDOR_M_ST_M45PE40:
14337 case FLASH_5720VENDOR_A_ST_M25PE40:
14338 case FLASH_5720VENDOR_A_ST_M45PE40:
14339 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14341 case FLASH_5720VENDOR_M_ST_M25PE80:
14342 case FLASH_5720VENDOR_M_ST_M45PE80:
14343 case FLASH_5720VENDOR_A_ST_M25PE80:
14344 case FLASH_5720VENDOR_A_ST_M45PE80:
14345 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14348 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14349 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14354 tg3_flag_set(tp, NO_NVRAM);
14358 tg3_nvram_get_pagesize(tp, nvcfg1);
14359 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14360 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14362 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14365 if (tg3_nvram_read(tp, 0, &val))
14368 if (val != TG3_EEPROM_MAGIC &&
14369 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14370 tg3_flag_set(tp, NO_NVRAM);
14374 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14375 static void tg3_nvram_init(struct tg3 *tp)
14377 if (tg3_flag(tp, IS_SSB_CORE)) {
14378 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14379 tg3_flag_clear(tp, NVRAM);
14380 tg3_flag_clear(tp, NVRAM_BUFFERED);
14381 tg3_flag_set(tp, NO_NVRAM);
14385 tw32_f(GRC_EEPROM_ADDR,
14386 (EEPROM_ADDR_FSM_RESET |
14387 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14388 EEPROM_ADDR_CLKPERD_SHIFT)));
14392 /* Enable seeprom accesses. */
14393 tw32_f(GRC_LOCAL_CTRL,
14394 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14397 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14398 tg3_asic_rev(tp) != ASIC_REV_5701) {
14399 tg3_flag_set(tp, NVRAM);
14401 if (tg3_nvram_lock(tp)) {
14402 netdev_warn(tp->dev,
14403 "Cannot get nvram lock, %s failed\n",
14407 tg3_enable_nvram_access(tp);
14409 tp->nvram_size = 0;
14411 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14412 tg3_get_5752_nvram_info(tp);
14413 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14414 tg3_get_5755_nvram_info(tp);
14415 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14416 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14417 tg3_asic_rev(tp) == ASIC_REV_5785)
14418 tg3_get_5787_nvram_info(tp);
14419 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14420 tg3_get_5761_nvram_info(tp);
14421 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14422 tg3_get_5906_nvram_info(tp);
14423 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14424 tg3_flag(tp, 57765_CLASS))
14425 tg3_get_57780_nvram_info(tp);
14426 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14427 tg3_asic_rev(tp) == ASIC_REV_5719)
14428 tg3_get_5717_nvram_info(tp);
14429 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14430 tg3_asic_rev(tp) == ASIC_REV_5762)
14431 tg3_get_5720_nvram_info(tp);
14433 tg3_get_nvram_info(tp);
14435 if (tp->nvram_size == 0)
14436 tg3_get_nvram_size(tp);
14438 tg3_disable_nvram_access(tp);
14439 tg3_nvram_unlock(tp);
14442 tg3_flag_clear(tp, NVRAM);
14443 tg3_flag_clear(tp, NVRAM_BUFFERED);
14445 tg3_get_eeprom_size(tp);
14449 struct subsys_tbl_ent {
14450 u16 subsys_vendor, subsys_devid;
14454 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14455 /* Broadcom boards. */
14456 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14457 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14458 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14459 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14460 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14461 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14462 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14463 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14464 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14465 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14466 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14467 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14468 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14469 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14470 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14471 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14472 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14473 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14474 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14475 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14476 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14477 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14480 { TG3PCI_SUBVENDOR_ID_3COM,
14481 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14482 { TG3PCI_SUBVENDOR_ID_3COM,
14483 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14484 { TG3PCI_SUBVENDOR_ID_3COM,
14485 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14486 { TG3PCI_SUBVENDOR_ID_3COM,
14487 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14488 { TG3PCI_SUBVENDOR_ID_3COM,
14489 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14492 { TG3PCI_SUBVENDOR_ID_DELL,
14493 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14494 { TG3PCI_SUBVENDOR_ID_DELL,
14495 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14496 { TG3PCI_SUBVENDOR_ID_DELL,
14497 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14498 { TG3PCI_SUBVENDOR_ID_DELL,
14499 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14501 /* Compaq boards. */
14502 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14503 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14504 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14505 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14506 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14507 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14508 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14509 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14510 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14511 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14514 { TG3PCI_SUBVENDOR_ID_IBM,
14515 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14518 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14522 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14523 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14524 tp->pdev->subsystem_vendor) &&
14525 (subsys_id_to_phy_id[i].subsys_devid ==
14526 tp->pdev->subsystem_device))
14527 return &subsys_id_to_phy_id[i];
14532 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14536 tp->phy_id = TG3_PHY_ID_INVALID;
14537 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14539 /* Assume an onboard device and WOL capable by default. */
14540 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14541 tg3_flag_set(tp, WOL_CAP);
14543 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14544 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14545 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14546 tg3_flag_set(tp, IS_NIC);
14548 val = tr32(VCPU_CFGSHDW);
14549 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14550 tg3_flag_set(tp, ASPM_WORKAROUND);
14551 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14552 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14553 tg3_flag_set(tp, WOL_ENABLE);
14554 device_set_wakeup_enable(&tp->pdev->dev, true);
14559 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14560 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14561 u32 nic_cfg, led_cfg;
14562 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14563 int eeprom_phy_serdes = 0;
14565 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14566 tp->nic_sram_data_cfg = nic_cfg;
14568 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14569 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14570 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14571 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14572 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14573 (ver > 0) && (ver < 0x100))
14574 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14576 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14577 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14579 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14580 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14581 eeprom_phy_serdes = 1;
14583 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14584 if (nic_phy_id != 0) {
14585 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14586 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14588 eeprom_phy_id = (id1 >> 16) << 10;
14589 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14590 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14594 tp->phy_id = eeprom_phy_id;
14595 if (eeprom_phy_serdes) {
14596 if (!tg3_flag(tp, 5705_PLUS))
14597 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14599 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14602 if (tg3_flag(tp, 5750_PLUS))
14603 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14604 SHASTA_EXT_LED_MODE_MASK);
14606 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14610 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14611 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14614 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14615 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14618 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14619 tp->led_ctrl = LED_CTRL_MODE_MAC;
14621 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14622 * read on some older 5700/5701 bootcode.
14624 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14625 tg3_asic_rev(tp) == ASIC_REV_5701)
14626 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14630 case SHASTA_EXT_LED_SHARED:
14631 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14632 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14633 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14634 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14635 LED_CTRL_MODE_PHY_2);
14638 case SHASTA_EXT_LED_MAC:
14639 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14642 case SHASTA_EXT_LED_COMBO:
14643 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14644 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14645 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14646 LED_CTRL_MODE_PHY_2);
14651 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14652 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14653 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14654 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14656 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14657 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14659 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14660 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14661 if ((tp->pdev->subsystem_vendor ==
14662 PCI_VENDOR_ID_ARIMA) &&
14663 (tp->pdev->subsystem_device == 0x205a ||
14664 tp->pdev->subsystem_device == 0x2063))
14665 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14667 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14668 tg3_flag_set(tp, IS_NIC);
14671 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14672 tg3_flag_set(tp, ENABLE_ASF);
14673 if (tg3_flag(tp, 5750_PLUS))
14674 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14677 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14678 tg3_flag(tp, 5750_PLUS))
14679 tg3_flag_set(tp, ENABLE_APE);
14681 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14682 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14683 tg3_flag_clear(tp, WOL_CAP);
14685 if (tg3_flag(tp, WOL_CAP) &&
14686 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14687 tg3_flag_set(tp, WOL_ENABLE);
14688 device_set_wakeup_enable(&tp->pdev->dev, true);
14691 if (cfg2 & (1 << 17))
14692 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14694 /* serdes signal pre-emphasis in register 0x590 set by */
14695 /* bootcode if bit 18 is set */
14696 if (cfg2 & (1 << 18))
14697 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14699 if ((tg3_flag(tp, 57765_PLUS) ||
14700 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14701 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14702 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14703 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14705 if (tg3_flag(tp, PCI_EXPRESS)) {
14708 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14709 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14710 !tg3_flag(tp, 57765_PLUS) &&
14711 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14712 tg3_flag_set(tp, ASPM_WORKAROUND);
14713 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14714 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14715 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14716 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14719 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14720 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14721 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14722 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14723 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14724 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14727 if (tg3_flag(tp, WOL_CAP))
14728 device_set_wakeup_enable(&tp->pdev->dev,
14729 tg3_flag(tp, WOL_ENABLE));
14731 device_set_wakeup_capable(&tp->pdev->dev, false);
14734 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14737 u32 val2, off = offset * 8;
14739 err = tg3_nvram_lock(tp);
14743 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14744 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14745 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14746 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14749 for (i = 0; i < 100; i++) {
14750 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14751 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14752 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14758 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14760 tg3_nvram_unlock(tp);
14761 if (val2 & APE_OTP_STATUS_CMD_DONE)
14767 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14772 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14773 tw32(OTP_CTRL, cmd);
14775 /* Wait for up to 1 ms for command to execute. */
14776 for (i = 0; i < 100; i++) {
14777 val = tr32(OTP_STATUS);
14778 if (val & OTP_STATUS_CMD_DONE)
14783 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14786 /* Read the gphy configuration from the OTP region of the chip. The gphy
14787 * configuration is a 32-bit value that straddles the alignment boundary.
14788 * We do two 32-bit reads and then shift and merge the results.
14790 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14792 u32 bhalf_otp, thalf_otp;
14794 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14796 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14799 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14801 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14804 thalf_otp = tr32(OTP_READ_DATA);
14806 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14808 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14811 bhalf_otp = tr32(OTP_READ_DATA);
14813 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14816 static void tg3_phy_init_link_config(struct tg3 *tp)
14818 u32 adv = ADVERTISED_Autoneg;
14820 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14821 adv |= ADVERTISED_1000baseT_Half |
14822 ADVERTISED_1000baseT_Full;
14824 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14825 adv |= ADVERTISED_100baseT_Half |
14826 ADVERTISED_100baseT_Full |
14827 ADVERTISED_10baseT_Half |
14828 ADVERTISED_10baseT_Full |
14831 adv |= ADVERTISED_FIBRE;
14833 tp->link_config.advertising = adv;
14834 tp->link_config.speed = SPEED_UNKNOWN;
14835 tp->link_config.duplex = DUPLEX_UNKNOWN;
14836 tp->link_config.autoneg = AUTONEG_ENABLE;
14837 tp->link_config.active_speed = SPEED_UNKNOWN;
14838 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14843 static int tg3_phy_probe(struct tg3 *tp)
14845 u32 hw_phy_id_1, hw_phy_id_2;
14846 u32 hw_phy_id, hw_phy_id_masked;
14849 /* flow control autonegotiation is default behavior */
14850 tg3_flag_set(tp, PAUSE_AUTONEG);
14851 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14853 if (tg3_flag(tp, ENABLE_APE)) {
14854 switch (tp->pci_fn) {
14856 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14859 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14862 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14865 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14870 if (!tg3_flag(tp, ENABLE_ASF) &&
14871 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14872 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14873 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14874 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14876 if (tg3_flag(tp, USE_PHYLIB))
14877 return tg3_phy_init(tp);
14879 /* Reading the PHY ID register can conflict with ASF
14880 * firmware access to the PHY hardware.
14883 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14884 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14886 /* Now read the physical PHY_ID from the chip and verify
14887 * that it is sane. If it doesn't look good, we fall back
14888 * to either the hard-coded table based PHY_ID and failing
14889 * that the value found in the eeprom area.
14891 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14892 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14894 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
14895 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14896 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
14898 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14901 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14902 tp->phy_id = hw_phy_id;
14903 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14904 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14906 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14908 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14909 /* Do nothing, phy ID already set up in
14910 * tg3_get_eeprom_hw_cfg().
14913 struct subsys_tbl_ent *p;
14915 /* No eeprom signature? Try the hardcoded
14916 * subsys device table.
14918 p = tg3_lookup_by_subsys(tp);
14920 tp->phy_id = p->phy_id;
14921 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
14922 /* For now we saw the IDs 0xbc050cd0,
14923 * 0xbc050f80 and 0xbc050c30 on devices
14924 * connected to an BCM4785 and there are
14925 * probably more. Just assume that the phy is
14926 * supported when it is connected to a SSB core
14933 tp->phy_id == TG3_PHY_ID_BCM8002)
14934 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14938 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14939 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14940 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14941 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14942 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14943 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14944 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14945 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14946 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14947 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14949 tg3_phy_init_link_config(tp);
14951 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
14952 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14953 !tg3_flag(tp, ENABLE_APE) &&
14954 !tg3_flag(tp, ENABLE_ASF)) {
14957 tg3_readphy(tp, MII_BMSR, &bmsr);
14958 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14959 (bmsr & BMSR_LSTATUS))
14960 goto skip_phy_reset;
14962 err = tg3_phy_reset(tp);
14966 tg3_phy_set_wirespeed(tp);
14968 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14969 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14970 tp->link_config.flowctrl);
14972 tg3_writephy(tp, MII_BMCR,
14973 BMCR_ANENABLE | BMCR_ANRESTART);
14978 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14979 err = tg3_init_5401phy_dsp(tp);
14983 err = tg3_init_5401phy_dsp(tp);
14989 static void tg3_read_vpd(struct tg3 *tp)
14992 unsigned int block_end, rosize, len;
14996 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15000 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15002 goto out_not_found;
15004 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15005 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15006 i += PCI_VPD_LRDT_TAG_SIZE;
15008 if (block_end > vpdlen)
15009 goto out_not_found;
15011 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15012 PCI_VPD_RO_KEYWORD_MFR_ID);
15014 len = pci_vpd_info_field_size(&vpd_data[j]);
15016 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15017 if (j + len > block_end || len != 4 ||
15018 memcmp(&vpd_data[j], "1028", 4))
15021 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15022 PCI_VPD_RO_KEYWORD_VENDOR0);
15026 len = pci_vpd_info_field_size(&vpd_data[j]);
15028 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15029 if (j + len > block_end)
15032 if (len >= sizeof(tp->fw_ver))
15033 len = sizeof(tp->fw_ver) - 1;
15034 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15035 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15040 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15041 PCI_VPD_RO_KEYWORD_PARTNO);
15043 goto out_not_found;
15045 len = pci_vpd_info_field_size(&vpd_data[i]);
15047 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15048 if (len > TG3_BPN_SIZE ||
15049 (len + i) > vpdlen)
15050 goto out_not_found;
15052 memcpy(tp->board_part_number, &vpd_data[i], len);
15056 if (tp->board_part_number[0])
15060 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15061 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15062 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15063 strcpy(tp->board_part_number, "BCM5717");
15064 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15065 strcpy(tp->board_part_number, "BCM5718");
15068 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15069 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15070 strcpy(tp->board_part_number, "BCM57780");
15071 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15072 strcpy(tp->board_part_number, "BCM57760");
15073 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15074 strcpy(tp->board_part_number, "BCM57790");
15075 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15076 strcpy(tp->board_part_number, "BCM57788");
15079 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15080 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15081 strcpy(tp->board_part_number, "BCM57761");
15082 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15083 strcpy(tp->board_part_number, "BCM57765");
15084 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15085 strcpy(tp->board_part_number, "BCM57781");
15086 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15087 strcpy(tp->board_part_number, "BCM57785");
15088 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15089 strcpy(tp->board_part_number, "BCM57791");
15090 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15091 strcpy(tp->board_part_number, "BCM57795");
15094 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15095 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15096 strcpy(tp->board_part_number, "BCM57762");
15097 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15098 strcpy(tp->board_part_number, "BCM57766");
15099 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15100 strcpy(tp->board_part_number, "BCM57782");
15101 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15102 strcpy(tp->board_part_number, "BCM57786");
15105 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15106 strcpy(tp->board_part_number, "BCM95906");
15109 strcpy(tp->board_part_number, "none");
15113 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15117 if (tg3_nvram_read(tp, offset, &val) ||
15118 (val & 0xfc000000) != 0x0c000000 ||
15119 tg3_nvram_read(tp, offset + 4, &val) ||
15126 static void tg3_read_bc_ver(struct tg3 *tp)
15128 u32 val, offset, start, ver_offset;
15130 bool newver = false;
15132 if (tg3_nvram_read(tp, 0xc, &offset) ||
15133 tg3_nvram_read(tp, 0x4, &start))
15136 offset = tg3_nvram_logical_addr(tp, offset);
15138 if (tg3_nvram_read(tp, offset, &val))
15141 if ((val & 0xfc000000) == 0x0c000000) {
15142 if (tg3_nvram_read(tp, offset + 4, &val))
15149 dst_off = strlen(tp->fw_ver);
15152 if (TG3_VER_SIZE - dst_off < 16 ||
15153 tg3_nvram_read(tp, offset + 8, &ver_offset))
15156 offset = offset + ver_offset - start;
15157 for (i = 0; i < 16; i += 4) {
15159 if (tg3_nvram_read_be32(tp, offset + i, &v))
15162 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15167 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15170 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15171 TG3_NVM_BCVER_MAJSFT;
15172 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15173 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15174 "v%d.%02d", major, minor);
15178 static void tg3_read_hwsb_ver(struct tg3 *tp)
15180 u32 val, major, minor;
15182 /* Use native endian representation */
15183 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15186 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15187 TG3_NVM_HWSB_CFG1_MAJSFT;
15188 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15189 TG3_NVM_HWSB_CFG1_MINSFT;
15191 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15194 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15196 u32 offset, major, minor, build;
15198 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15200 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15203 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15204 case TG3_EEPROM_SB_REVISION_0:
15205 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15207 case TG3_EEPROM_SB_REVISION_2:
15208 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15210 case TG3_EEPROM_SB_REVISION_3:
15211 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15213 case TG3_EEPROM_SB_REVISION_4:
15214 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15216 case TG3_EEPROM_SB_REVISION_5:
15217 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15219 case TG3_EEPROM_SB_REVISION_6:
15220 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15226 if (tg3_nvram_read(tp, offset, &val))
15229 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15230 TG3_EEPROM_SB_EDH_BLD_SHFT;
15231 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15232 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15233 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15235 if (minor > 99 || build > 26)
15238 offset = strlen(tp->fw_ver);
15239 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15240 " v%d.%02d", major, minor);
15243 offset = strlen(tp->fw_ver);
15244 if (offset < TG3_VER_SIZE - 1)
15245 tp->fw_ver[offset] = 'a' + build - 1;
15249 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15251 u32 val, offset, start;
15254 for (offset = TG3_NVM_DIR_START;
15255 offset < TG3_NVM_DIR_END;
15256 offset += TG3_NVM_DIRENT_SIZE) {
15257 if (tg3_nvram_read(tp, offset, &val))
15260 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15264 if (offset == TG3_NVM_DIR_END)
15267 if (!tg3_flag(tp, 5705_PLUS))
15268 start = 0x08000000;
15269 else if (tg3_nvram_read(tp, offset - 4, &start))
15272 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15273 !tg3_fw_img_is_valid(tp, offset) ||
15274 tg3_nvram_read(tp, offset + 8, &val))
15277 offset += val - start;
15279 vlen = strlen(tp->fw_ver);
15281 tp->fw_ver[vlen++] = ',';
15282 tp->fw_ver[vlen++] = ' ';
15284 for (i = 0; i < 4; i++) {
15286 if (tg3_nvram_read_be32(tp, offset, &v))
15289 offset += sizeof(v);
15291 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15292 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15296 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15301 static void tg3_probe_ncsi(struct tg3 *tp)
15305 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15306 if (apedata != APE_SEG_SIG_MAGIC)
15309 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15310 if (!(apedata & APE_FW_STATUS_READY))
15313 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15314 tg3_flag_set(tp, APE_HAS_NCSI);
15317 static void tg3_read_dash_ver(struct tg3 *tp)
15323 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15325 if (tg3_flag(tp, APE_HAS_NCSI))
15327 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15332 vlen = strlen(tp->fw_ver);
15334 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15336 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15337 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15338 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15339 (apedata & APE_FW_VERSION_BLDMSK));
15342 static void tg3_read_otp_ver(struct tg3 *tp)
15346 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15349 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15350 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15351 TG3_OTP_MAGIC0_VALID(val)) {
15352 u64 val64 = (u64) val << 32 | val2;
15356 for (i = 0; i < 7; i++) {
15357 if ((val64 & 0xff) == 0)
15359 ver = val64 & 0xff;
15362 vlen = strlen(tp->fw_ver);
15363 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15367 static void tg3_read_fw_ver(struct tg3 *tp)
15370 bool vpd_vers = false;
15372 if (tp->fw_ver[0] != 0)
15375 if (tg3_flag(tp, NO_NVRAM)) {
15376 strcat(tp->fw_ver, "sb");
15377 tg3_read_otp_ver(tp);
15381 if (tg3_nvram_read(tp, 0, &val))
15384 if (val == TG3_EEPROM_MAGIC)
15385 tg3_read_bc_ver(tp);
15386 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15387 tg3_read_sb_ver(tp, val);
15388 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15389 tg3_read_hwsb_ver(tp);
15391 if (tg3_flag(tp, ENABLE_ASF)) {
15392 if (tg3_flag(tp, ENABLE_APE)) {
15393 tg3_probe_ncsi(tp);
15395 tg3_read_dash_ver(tp);
15396 } else if (!vpd_vers) {
15397 tg3_read_mgmtfw_ver(tp);
15401 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15404 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15406 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15407 return TG3_RX_RET_MAX_SIZE_5717;
15408 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15409 return TG3_RX_RET_MAX_SIZE_5700;
15411 return TG3_RX_RET_MAX_SIZE_5705;
15414 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15415 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15416 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15417 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15421 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15423 struct pci_dev *peer;
15424 unsigned int func, devnr = tp->pdev->devfn & ~7;
15426 for (func = 0; func < 8; func++) {
15427 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15428 if (peer && peer != tp->pdev)
15432 /* 5704 can be configured in single-port mode, set peer to
15433 * tp->pdev in that case.
15441 * We don't need to keep the refcount elevated; there's no way
15442 * to remove one half of this device without removing the other
15449 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15451 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15452 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15455 /* All devices that use the alternate
15456 * ASIC REV location have a CPMU.
15458 tg3_flag_set(tp, CPMU_PRESENT);
15460 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15461 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15462 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15463 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15464 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15465 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15466 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15467 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15468 reg = TG3PCI_GEN2_PRODID_ASICREV;
15469 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15470 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15471 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15472 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15473 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15474 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15475 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15476 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15477 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15478 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15479 reg = TG3PCI_GEN15_PRODID_ASICREV;
15481 reg = TG3PCI_PRODID_ASICREV;
15483 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15486 /* Wrong chip ID in 5752 A0. This code can be removed later
15487 * as A0 is not in production.
15489 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15490 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15492 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15493 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15495 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15496 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15497 tg3_asic_rev(tp) == ASIC_REV_5720)
15498 tg3_flag_set(tp, 5717_PLUS);
15500 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15501 tg3_asic_rev(tp) == ASIC_REV_57766)
15502 tg3_flag_set(tp, 57765_CLASS);
15504 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15505 tg3_asic_rev(tp) == ASIC_REV_5762)
15506 tg3_flag_set(tp, 57765_PLUS);
15508 /* Intentionally exclude ASIC_REV_5906 */
15509 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15510 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15511 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15512 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15513 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15514 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15515 tg3_flag(tp, 57765_PLUS))
15516 tg3_flag_set(tp, 5755_PLUS);
15518 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15519 tg3_asic_rev(tp) == ASIC_REV_5714)
15520 tg3_flag_set(tp, 5780_CLASS);
15522 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15523 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15524 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15525 tg3_flag(tp, 5755_PLUS) ||
15526 tg3_flag(tp, 5780_CLASS))
15527 tg3_flag_set(tp, 5750_PLUS);
15529 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15530 tg3_flag(tp, 5750_PLUS))
15531 tg3_flag_set(tp, 5705_PLUS);
15534 static bool tg3_10_100_only_device(struct tg3 *tp,
15535 const struct pci_device_id *ent)
15537 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15539 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15540 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15541 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15544 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15545 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15546 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15556 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15559 u32 pci_state_reg, grc_misc_cfg;
15564 /* Force memory write invalidate off. If we leave it on,
15565 * then on 5700_BX chips we have to enable a workaround.
15566 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15567 * to match the cacheline size. The Broadcom driver have this
15568 * workaround but turns MWI off all the times so never uses
15569 * it. This seems to suggest that the workaround is insufficient.
15571 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15572 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15573 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15575 /* Important! -- Make sure register accesses are byteswapped
15576 * correctly. Also, for those chips that require it, make
15577 * sure that indirect register accesses are enabled before
15578 * the first operation.
15580 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15582 tp->misc_host_ctrl |= (misc_ctrl_reg &
15583 MISC_HOST_CTRL_CHIPREV);
15584 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15585 tp->misc_host_ctrl);
15587 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15589 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15590 * we need to disable memory and use config. cycles
15591 * only to access all registers. The 5702/03 chips
15592 * can mistakenly decode the special cycles from the
15593 * ICH chipsets as memory write cycles, causing corruption
15594 * of register and memory space. Only certain ICH bridges
15595 * will drive special cycles with non-zero data during the
15596 * address phase which can fall within the 5703's address
15597 * range. This is not an ICH bug as the PCI spec allows
15598 * non-zero address during special cycles. However, only
15599 * these ICH bridges are known to drive non-zero addresses
15600 * during special cycles.
15602 * Since special cycles do not cross PCI bridges, we only
15603 * enable this workaround if the 5703 is on the secondary
15604 * bus of these ICH bridges.
15606 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15607 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15608 static struct tg3_dev_id {
15612 } ich_chipsets[] = {
15613 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15615 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15617 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15619 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15623 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15624 struct pci_dev *bridge = NULL;
15626 while (pci_id->vendor != 0) {
15627 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15633 if (pci_id->rev != PCI_ANY_ID) {
15634 if (bridge->revision > pci_id->rev)
15637 if (bridge->subordinate &&
15638 (bridge->subordinate->number ==
15639 tp->pdev->bus->number)) {
15640 tg3_flag_set(tp, ICH_WORKAROUND);
15641 pci_dev_put(bridge);
15647 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15648 static struct tg3_dev_id {
15651 } bridge_chipsets[] = {
15652 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15653 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15656 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15657 struct pci_dev *bridge = NULL;
15659 while (pci_id->vendor != 0) {
15660 bridge = pci_get_device(pci_id->vendor,
15667 if (bridge->subordinate &&
15668 (bridge->subordinate->number <=
15669 tp->pdev->bus->number) &&
15670 (bridge->subordinate->busn_res.end >=
15671 tp->pdev->bus->number)) {
15672 tg3_flag_set(tp, 5701_DMA_BUG);
15673 pci_dev_put(bridge);
15679 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15680 * DMA addresses > 40-bit. This bridge may have other additional
15681 * 57xx devices behind it in some 4-port NIC designs for example.
15682 * Any tg3 device found behind the bridge will also need the 40-bit
15685 if (tg3_flag(tp, 5780_CLASS)) {
15686 tg3_flag_set(tp, 40BIT_DMA_BUG);
15687 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15689 struct pci_dev *bridge = NULL;
15692 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15693 PCI_DEVICE_ID_SERVERWORKS_EPB,
15695 if (bridge && bridge->subordinate &&
15696 (bridge->subordinate->number <=
15697 tp->pdev->bus->number) &&
15698 (bridge->subordinate->busn_res.end >=
15699 tp->pdev->bus->number)) {
15700 tg3_flag_set(tp, 40BIT_DMA_BUG);
15701 pci_dev_put(bridge);
15707 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15708 tg3_asic_rev(tp) == ASIC_REV_5714)
15709 tp->pdev_peer = tg3_find_peer(tp);
15711 /* Determine TSO capabilities */
15712 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15713 ; /* Do nothing. HW bug. */
15714 else if (tg3_flag(tp, 57765_PLUS))
15715 tg3_flag_set(tp, HW_TSO_3);
15716 else if (tg3_flag(tp, 5755_PLUS) ||
15717 tg3_asic_rev(tp) == ASIC_REV_5906)
15718 tg3_flag_set(tp, HW_TSO_2);
15719 else if (tg3_flag(tp, 5750_PLUS)) {
15720 tg3_flag_set(tp, HW_TSO_1);
15721 tg3_flag_set(tp, TSO_BUG);
15722 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15723 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15724 tg3_flag_clear(tp, TSO_BUG);
15725 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15726 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15727 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15728 tg3_flag_set(tp, FW_TSO);
15729 tg3_flag_set(tp, TSO_BUG);
15730 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15731 tp->fw_needed = FIRMWARE_TG3TSO5;
15733 tp->fw_needed = FIRMWARE_TG3TSO;
15736 /* Selectively allow TSO based on operating conditions */
15737 if (tg3_flag(tp, HW_TSO_1) ||
15738 tg3_flag(tp, HW_TSO_2) ||
15739 tg3_flag(tp, HW_TSO_3) ||
15740 tg3_flag(tp, FW_TSO)) {
15741 /* For firmware TSO, assume ASF is disabled.
15742 * We'll disable TSO later if we discover ASF
15743 * is enabled in tg3_get_eeprom_hw_cfg().
15745 tg3_flag_set(tp, TSO_CAPABLE);
15747 tg3_flag_clear(tp, TSO_CAPABLE);
15748 tg3_flag_clear(tp, TSO_BUG);
15749 tp->fw_needed = NULL;
15752 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15753 tp->fw_needed = FIRMWARE_TG3;
15755 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15756 tp->fw_needed = FIRMWARE_TG357766;
15760 if (tg3_flag(tp, 5750_PLUS)) {
15761 tg3_flag_set(tp, SUPPORT_MSI);
15762 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15763 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15764 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15765 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15766 tp->pdev_peer == tp->pdev))
15767 tg3_flag_clear(tp, SUPPORT_MSI);
15769 if (tg3_flag(tp, 5755_PLUS) ||
15770 tg3_asic_rev(tp) == ASIC_REV_5906) {
15771 tg3_flag_set(tp, 1SHOT_MSI);
15774 if (tg3_flag(tp, 57765_PLUS)) {
15775 tg3_flag_set(tp, SUPPORT_MSIX);
15776 tp->irq_max = TG3_IRQ_MAX_VECS;
15782 if (tp->irq_max > 1) {
15783 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15784 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15786 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15787 tg3_asic_rev(tp) == ASIC_REV_5720)
15788 tp->txq_max = tp->irq_max - 1;
15791 if (tg3_flag(tp, 5755_PLUS) ||
15792 tg3_asic_rev(tp) == ASIC_REV_5906)
15793 tg3_flag_set(tp, SHORT_DMA_BUG);
15795 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15796 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15798 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15799 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15800 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15801 tg3_asic_rev(tp) == ASIC_REV_5762)
15802 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15804 if (tg3_flag(tp, 57765_PLUS) &&
15805 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15806 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15808 if (!tg3_flag(tp, 5705_PLUS) ||
15809 tg3_flag(tp, 5780_CLASS) ||
15810 tg3_flag(tp, USE_JUMBO_BDFLAG))
15811 tg3_flag_set(tp, JUMBO_CAPABLE);
15813 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15816 if (pci_is_pcie(tp->pdev)) {
15819 tg3_flag_set(tp, PCI_EXPRESS);
15821 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15822 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15823 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15824 tg3_flag_clear(tp, HW_TSO_2);
15825 tg3_flag_clear(tp, TSO_CAPABLE);
15827 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15828 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15829 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15830 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15831 tg3_flag_set(tp, CLKREQ_BUG);
15832 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15833 tg3_flag_set(tp, L1PLLPD_EN);
15835 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15836 /* BCM5785 devices are effectively PCIe devices, and should
15837 * follow PCIe codepaths, but do not have a PCIe capabilities
15840 tg3_flag_set(tp, PCI_EXPRESS);
15841 } else if (!tg3_flag(tp, 5705_PLUS) ||
15842 tg3_flag(tp, 5780_CLASS)) {
15843 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15844 if (!tp->pcix_cap) {
15845 dev_err(&tp->pdev->dev,
15846 "Cannot find PCI-X capability, aborting\n");
15850 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15851 tg3_flag_set(tp, PCIX_MODE);
15854 /* If we have an AMD 762 or VIA K8T800 chipset, write
15855 * reordering to the mailbox registers done by the host
15856 * controller can cause major troubles. We read back from
15857 * every mailbox register write to force the writes to be
15858 * posted to the chip in order.
15860 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15861 !tg3_flag(tp, PCI_EXPRESS))
15862 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15864 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15865 &tp->pci_cacheline_sz);
15866 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15867 &tp->pci_lat_timer);
15868 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15869 tp->pci_lat_timer < 64) {
15870 tp->pci_lat_timer = 64;
15871 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15872 tp->pci_lat_timer);
15875 /* Important! -- It is critical that the PCI-X hw workaround
15876 * situation is decided before the first MMIO register access.
15878 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15879 /* 5700 BX chips need to have their TX producer index
15880 * mailboxes written twice to workaround a bug.
15882 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15884 /* If we are in PCI-X mode, enable register write workaround.
15886 * The workaround is to use indirect register accesses
15887 * for all chip writes not to mailbox registers.
15889 if (tg3_flag(tp, PCIX_MODE)) {
15892 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15894 /* The chip can have it's power management PCI config
15895 * space registers clobbered due to this bug.
15896 * So explicitly force the chip into D0 here.
15898 pci_read_config_dword(tp->pdev,
15899 tp->pm_cap + PCI_PM_CTRL,
15901 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15902 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15903 pci_write_config_dword(tp->pdev,
15904 tp->pm_cap + PCI_PM_CTRL,
15907 /* Also, force SERR#/PERR# in PCI command. */
15908 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15909 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15910 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15914 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15915 tg3_flag_set(tp, PCI_HIGH_SPEED);
15916 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15917 tg3_flag_set(tp, PCI_32BIT);
15919 /* Chip-specific fixup from Broadcom driver */
15920 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15921 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15922 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15923 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15926 /* Default fast path register access methods */
15927 tp->read32 = tg3_read32;
15928 tp->write32 = tg3_write32;
15929 tp->read32_mbox = tg3_read32;
15930 tp->write32_mbox = tg3_write32;
15931 tp->write32_tx_mbox = tg3_write32;
15932 tp->write32_rx_mbox = tg3_write32;
15934 /* Various workaround register access methods */
15935 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15936 tp->write32 = tg3_write_indirect_reg32;
15937 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15938 (tg3_flag(tp, PCI_EXPRESS) &&
15939 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15941 * Back to back register writes can cause problems on these
15942 * chips, the workaround is to read back all reg writes
15943 * except those to mailbox regs.
15945 * See tg3_write_indirect_reg32().
15947 tp->write32 = tg3_write_flush_reg32;
15950 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15951 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15952 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15953 tp->write32_rx_mbox = tg3_write_flush_reg32;
15956 if (tg3_flag(tp, ICH_WORKAROUND)) {
15957 tp->read32 = tg3_read_indirect_reg32;
15958 tp->write32 = tg3_write_indirect_reg32;
15959 tp->read32_mbox = tg3_read_indirect_mbox;
15960 tp->write32_mbox = tg3_write_indirect_mbox;
15961 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15962 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15967 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15968 pci_cmd &= ~PCI_COMMAND_MEMORY;
15969 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15971 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15972 tp->read32_mbox = tg3_read32_mbox_5906;
15973 tp->write32_mbox = tg3_write32_mbox_5906;
15974 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15975 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15978 if (tp->write32 == tg3_write_indirect_reg32 ||
15979 (tg3_flag(tp, PCIX_MODE) &&
15980 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15981 tg3_asic_rev(tp) == ASIC_REV_5701)))
15982 tg3_flag_set(tp, SRAM_USE_CONFIG);
15984 /* The memory arbiter has to be enabled in order for SRAM accesses
15985 * to succeed. Normally on powerup the tg3 chip firmware will make
15986 * sure it is enabled, but other entities such as system netboot
15987 * code might disable it.
15989 val = tr32(MEMARB_MODE);
15990 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15992 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15993 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15994 tg3_flag(tp, 5780_CLASS)) {
15995 if (tg3_flag(tp, PCIX_MODE)) {
15996 pci_read_config_dword(tp->pdev,
15997 tp->pcix_cap + PCI_X_STATUS,
15999 tp->pci_fn = val & 0x7;
16001 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16002 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16003 tg3_asic_rev(tp) == ASIC_REV_5720) {
16004 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16005 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16006 val = tr32(TG3_CPMU_STATUS);
16008 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16009 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16011 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16012 TG3_CPMU_STATUS_FSHFT_5719;
16015 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16016 tp->write32_tx_mbox = tg3_write_flush_reg32;
16017 tp->write32_rx_mbox = tg3_write_flush_reg32;
16020 /* Get eeprom hw config before calling tg3_set_power_state().
16021 * In particular, the TG3_FLAG_IS_NIC flag must be
16022 * determined before calling tg3_set_power_state() so that
16023 * we know whether or not to switch out of Vaux power.
16024 * When the flag is set, it means that GPIO1 is used for eeprom
16025 * write protect and also implies that it is a LOM where GPIOs
16026 * are not used to switch power.
16028 tg3_get_eeprom_hw_cfg(tp);
16030 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16031 tg3_flag_clear(tp, TSO_CAPABLE);
16032 tg3_flag_clear(tp, TSO_BUG);
16033 tp->fw_needed = NULL;
16036 if (tg3_flag(tp, ENABLE_APE)) {
16037 /* Allow reads and writes to the
16038 * APE register and memory space.
16040 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16041 PCISTATE_ALLOW_APE_SHMEM_WR |
16042 PCISTATE_ALLOW_APE_PSPACE_WR;
16043 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16046 tg3_ape_lock_init(tp);
16049 /* Set up tp->grc_local_ctrl before calling
16050 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16051 * will bring 5700's external PHY out of reset.
16052 * It is also used as eeprom write protect on LOMs.
16054 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16055 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16056 tg3_flag(tp, EEPROM_WRITE_PROT))
16057 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16058 GRC_LCLCTRL_GPIO_OUTPUT1);
16059 /* Unused GPIO3 must be driven as output on 5752 because there
16060 * are no pull-up resistors on unused GPIO pins.
16062 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16063 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16065 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16066 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16067 tg3_flag(tp, 57765_CLASS))
16068 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16070 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16071 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16072 /* Turn off the debug UART. */
16073 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16074 if (tg3_flag(tp, IS_NIC))
16075 /* Keep VMain power. */
16076 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16077 GRC_LCLCTRL_GPIO_OUTPUT0;
16080 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16081 tp->grc_local_ctrl |=
16082 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16084 /* Switch out of Vaux if it is a NIC */
16085 tg3_pwrsrc_switch_to_vmain(tp);
16087 /* Derive initial jumbo mode from MTU assigned in
16088 * ether_setup() via the alloc_etherdev() call
16090 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16091 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16093 /* Determine WakeOnLan speed to use. */
16094 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16095 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16096 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16097 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16098 tg3_flag_clear(tp, WOL_SPEED_100MB);
16100 tg3_flag_set(tp, WOL_SPEED_100MB);
16103 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16104 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16106 /* A few boards don't want Ethernet@WireSpeed phy feature */
16107 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16108 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16109 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16110 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16111 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16112 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16113 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16115 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16116 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16117 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16118 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16119 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16121 if (tg3_flag(tp, 5705_PLUS) &&
16122 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16123 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16124 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16125 !tg3_flag(tp, 57765_PLUS)) {
16126 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16127 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16128 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16129 tg3_asic_rev(tp) == ASIC_REV_5761) {
16130 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16131 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16132 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16133 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16134 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16136 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16139 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16140 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16141 tp->phy_otp = tg3_read_otp_phycfg(tp);
16142 if (tp->phy_otp == 0)
16143 tp->phy_otp = TG3_OTP_DEFAULT;
16146 if (tg3_flag(tp, CPMU_PRESENT))
16147 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16149 tp->mi_mode = MAC_MI_MODE_BASE;
16151 tp->coalesce_mode = 0;
16152 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16153 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16154 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16156 /* Set these bits to enable statistics workaround. */
16157 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16158 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16159 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16160 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16161 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16164 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16165 tg3_asic_rev(tp) == ASIC_REV_57780)
16166 tg3_flag_set(tp, USE_PHYLIB);
16168 err = tg3_mdio_init(tp);
16172 /* Initialize data/descriptor byte/word swapping. */
16173 val = tr32(GRC_MODE);
16174 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16175 tg3_asic_rev(tp) == ASIC_REV_5762)
16176 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16177 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16178 GRC_MODE_B2HRX_ENABLE |
16179 GRC_MODE_HTX2B_ENABLE |
16180 GRC_MODE_HOST_STACKUP);
16182 val &= GRC_MODE_HOST_STACKUP;
16184 tw32(GRC_MODE, val | tp->grc_mode);
16186 tg3_switch_clocks(tp);
16188 /* Clear this out for sanity. */
16189 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16191 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16193 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16194 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16195 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16196 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16197 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16198 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16199 void __iomem *sram_base;
16201 /* Write some dummy words into the SRAM status block
16202 * area, see if it reads back correctly. If the return
16203 * value is bad, force enable the PCIX workaround.
16205 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16207 writel(0x00000000, sram_base);
16208 writel(0x00000000, sram_base + 4);
16209 writel(0xffffffff, sram_base + 4);
16210 if (readl(sram_base) != 0x00000000)
16211 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16216 tg3_nvram_init(tp);
16218 /* If the device has an NVRAM, no need to load patch firmware */
16219 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16220 !tg3_flag(tp, NO_NVRAM))
16221 tp->fw_needed = NULL;
16223 grc_misc_cfg = tr32(GRC_MISC_CFG);
16224 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16226 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16227 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16228 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16229 tg3_flag_set(tp, IS_5788);
16231 if (!tg3_flag(tp, IS_5788) &&
16232 tg3_asic_rev(tp) != ASIC_REV_5700)
16233 tg3_flag_set(tp, TAGGED_STATUS);
16234 if (tg3_flag(tp, TAGGED_STATUS)) {
16235 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16236 HOSTCC_MODE_CLRTICK_TXBD);
16238 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16239 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16240 tp->misc_host_ctrl);
16243 /* Preserve the APE MAC_MODE bits */
16244 if (tg3_flag(tp, ENABLE_APE))
16245 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16249 if (tg3_10_100_only_device(tp, ent))
16250 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16252 err = tg3_phy_probe(tp);
16254 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16255 /* ... but do not return immediately ... */
16260 tg3_read_fw_ver(tp);
16262 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16263 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16265 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16266 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16268 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16271 /* 5700 {AX,BX} chips have a broken status block link
16272 * change bit implementation, so we must use the
16273 * status register in those cases.
16275 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16276 tg3_flag_set(tp, USE_LINKCHG_REG);
16278 tg3_flag_clear(tp, USE_LINKCHG_REG);
16280 /* The led_ctrl is set during tg3_phy_probe, here we might
16281 * have to force the link status polling mechanism based
16282 * upon subsystem IDs.
16284 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16285 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16286 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16287 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16288 tg3_flag_set(tp, USE_LINKCHG_REG);
16291 /* For all SERDES we poll the MAC status register. */
16292 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16293 tg3_flag_set(tp, POLL_SERDES);
16295 tg3_flag_clear(tp, POLL_SERDES);
16297 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16298 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16299 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16300 tg3_flag(tp, PCIX_MODE)) {
16301 tp->rx_offset = NET_SKB_PAD;
16302 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16303 tp->rx_copy_thresh = ~(u16)0;
16307 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16308 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16309 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16311 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16313 /* Increment the rx prod index on the rx std ring by at most
16314 * 8 for these chips to workaround hw errata.
16316 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16317 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16318 tg3_asic_rev(tp) == ASIC_REV_5755)
16319 tp->rx_std_max_post = 8;
16321 if (tg3_flag(tp, ASPM_WORKAROUND))
16322 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16323 PCIE_PWR_MGMT_L1_THRESH_MSK;
16328 #ifdef CONFIG_SPARC
16329 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16331 struct net_device *dev = tp->dev;
16332 struct pci_dev *pdev = tp->pdev;
16333 struct device_node *dp = pci_device_to_OF_node(pdev);
16334 const unsigned char *addr;
16337 addr = of_get_property(dp, "local-mac-address", &len);
16338 if (addr && len == 6) {
16339 memcpy(dev->dev_addr, addr, 6);
16345 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16347 struct net_device *dev = tp->dev;
16349 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16354 static int tg3_get_device_address(struct tg3 *tp)
16356 struct net_device *dev = tp->dev;
16357 u32 hi, lo, mac_offset;
16361 #ifdef CONFIG_SPARC
16362 if (!tg3_get_macaddr_sparc(tp))
16366 if (tg3_flag(tp, IS_SSB_CORE)) {
16367 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16368 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16373 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16374 tg3_flag(tp, 5780_CLASS)) {
16375 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16377 if (tg3_nvram_lock(tp))
16378 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16380 tg3_nvram_unlock(tp);
16381 } else if (tg3_flag(tp, 5717_PLUS)) {
16382 if (tp->pci_fn & 1)
16384 if (tp->pci_fn > 1)
16385 mac_offset += 0x18c;
16386 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16389 /* First try to get it from MAC address mailbox. */
16390 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16391 if ((hi >> 16) == 0x484b) {
16392 dev->dev_addr[0] = (hi >> 8) & 0xff;
16393 dev->dev_addr[1] = (hi >> 0) & 0xff;
16395 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16396 dev->dev_addr[2] = (lo >> 24) & 0xff;
16397 dev->dev_addr[3] = (lo >> 16) & 0xff;
16398 dev->dev_addr[4] = (lo >> 8) & 0xff;
16399 dev->dev_addr[5] = (lo >> 0) & 0xff;
16401 /* Some old bootcode may report a 0 MAC address in SRAM */
16402 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16405 /* Next, try NVRAM. */
16406 if (!tg3_flag(tp, NO_NVRAM) &&
16407 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16408 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16409 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16410 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16412 /* Finally just fetch it out of the MAC control regs. */
16414 hi = tr32(MAC_ADDR_0_HIGH);
16415 lo = tr32(MAC_ADDR_0_LOW);
16417 dev->dev_addr[5] = lo & 0xff;
16418 dev->dev_addr[4] = (lo >> 8) & 0xff;
16419 dev->dev_addr[3] = (lo >> 16) & 0xff;
16420 dev->dev_addr[2] = (lo >> 24) & 0xff;
16421 dev->dev_addr[1] = hi & 0xff;
16422 dev->dev_addr[0] = (hi >> 8) & 0xff;
16426 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16427 #ifdef CONFIG_SPARC
16428 if (!tg3_get_default_macaddr_sparc(tp))
16436 #define BOUNDARY_SINGLE_CACHELINE 1
16437 #define BOUNDARY_MULTI_CACHELINE 2
16439 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16441 int cacheline_size;
16445 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16447 cacheline_size = 1024;
16449 cacheline_size = (int) byte * 4;
16451 /* On 5703 and later chips, the boundary bits have no
16454 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16455 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16456 !tg3_flag(tp, PCI_EXPRESS))
16459 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16460 goal = BOUNDARY_MULTI_CACHELINE;
16462 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16463 goal = BOUNDARY_SINGLE_CACHELINE;
16469 if (tg3_flag(tp, 57765_PLUS)) {
16470 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16477 /* PCI controllers on most RISC systems tend to disconnect
16478 * when a device tries to burst across a cache-line boundary.
16479 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16481 * Unfortunately, for PCI-E there are only limited
16482 * write-side controls for this, and thus for reads
16483 * we will still get the disconnects. We'll also waste
16484 * these PCI cycles for both read and write for chips
16485 * other than 5700 and 5701 which do not implement the
16488 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16489 switch (cacheline_size) {
16494 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16495 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16496 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16498 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16499 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16504 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16505 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16509 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16510 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16513 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16514 switch (cacheline_size) {
16518 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16519 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16520 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16526 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16527 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16531 switch (cacheline_size) {
16533 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16534 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16535 DMA_RWCTRL_WRITE_BNDRY_16);
16540 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16541 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16542 DMA_RWCTRL_WRITE_BNDRY_32);
16547 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16548 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16549 DMA_RWCTRL_WRITE_BNDRY_64);
16554 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16555 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16556 DMA_RWCTRL_WRITE_BNDRY_128);
16561 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16562 DMA_RWCTRL_WRITE_BNDRY_256);
16565 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16566 DMA_RWCTRL_WRITE_BNDRY_512);
16570 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16571 DMA_RWCTRL_WRITE_BNDRY_1024);
16580 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16581 int size, bool to_device)
16583 struct tg3_internal_buffer_desc test_desc;
16584 u32 sram_dma_descs;
16587 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16589 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16590 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16591 tw32(RDMAC_STATUS, 0);
16592 tw32(WDMAC_STATUS, 0);
16594 tw32(BUFMGR_MODE, 0);
16595 tw32(FTQ_RESET, 0);
16597 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16598 test_desc.addr_lo = buf_dma & 0xffffffff;
16599 test_desc.nic_mbuf = 0x00002100;
16600 test_desc.len = size;
16603 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16604 * the *second* time the tg3 driver was getting loaded after an
16607 * Broadcom tells me:
16608 * ...the DMA engine is connected to the GRC block and a DMA
16609 * reset may affect the GRC block in some unpredictable way...
16610 * The behavior of resets to individual blocks has not been tested.
16612 * Broadcom noted the GRC reset will also reset all sub-components.
16615 test_desc.cqid_sqid = (13 << 8) | 2;
16617 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16620 test_desc.cqid_sqid = (16 << 8) | 7;
16622 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16625 test_desc.flags = 0x00000005;
16627 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16630 val = *(((u32 *)&test_desc) + i);
16631 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16632 sram_dma_descs + (i * sizeof(u32)));
16633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16638 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16640 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16643 for (i = 0; i < 40; i++) {
16647 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16649 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16650 if ((val & 0xffff) == sram_dma_descs) {
16661 #define TEST_BUFFER_SIZE 0x2000
16663 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16664 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16668 static int tg3_test_dma(struct tg3 *tp)
16670 dma_addr_t buf_dma;
16671 u32 *buf, saved_dma_rwctrl;
16674 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16675 &buf_dma, GFP_KERNEL);
16681 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16682 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16684 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16686 if (tg3_flag(tp, 57765_PLUS))
16689 if (tg3_flag(tp, PCI_EXPRESS)) {
16690 /* DMA read watermark not used on PCIE */
16691 tp->dma_rwctrl |= 0x00180000;
16692 } else if (!tg3_flag(tp, PCIX_MODE)) {
16693 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16694 tg3_asic_rev(tp) == ASIC_REV_5750)
16695 tp->dma_rwctrl |= 0x003f0000;
16697 tp->dma_rwctrl |= 0x003f000f;
16699 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16700 tg3_asic_rev(tp) == ASIC_REV_5704) {
16701 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16702 u32 read_water = 0x7;
16704 /* If the 5704 is behind the EPB bridge, we can
16705 * do the less restrictive ONE_DMA workaround for
16706 * better performance.
16708 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16709 tg3_asic_rev(tp) == ASIC_REV_5704)
16710 tp->dma_rwctrl |= 0x8000;
16711 else if (ccval == 0x6 || ccval == 0x7)
16712 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16714 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16716 /* Set bit 23 to enable PCIX hw bug fix */
16718 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16719 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16721 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16722 /* 5780 always in PCIX mode */
16723 tp->dma_rwctrl |= 0x00144000;
16724 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16725 /* 5714 always in PCIX mode */
16726 tp->dma_rwctrl |= 0x00148000;
16728 tp->dma_rwctrl |= 0x001b000f;
16731 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16732 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16734 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16735 tg3_asic_rev(tp) == ASIC_REV_5704)
16736 tp->dma_rwctrl &= 0xfffffff0;
16738 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16739 tg3_asic_rev(tp) == ASIC_REV_5701) {
16740 /* Remove this if it causes problems for some boards. */
16741 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16743 /* On 5700/5701 chips, we need to set this bit.
16744 * Otherwise the chip will issue cacheline transactions
16745 * to streamable DMA memory with not all the byte
16746 * enables turned on. This is an error on several
16747 * RISC PCI controllers, in particular sparc64.
16749 * On 5703/5704 chips, this bit has been reassigned
16750 * a different meaning. In particular, it is used
16751 * on those chips to enable a PCI-X workaround.
16753 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16756 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16759 /* Unneeded, already done by tg3_get_invariants. */
16760 tg3_switch_clocks(tp);
16763 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16764 tg3_asic_rev(tp) != ASIC_REV_5701)
16767 /* It is best to perform DMA test with maximum write burst size
16768 * to expose the 5700/5701 write DMA bug.
16770 saved_dma_rwctrl = tp->dma_rwctrl;
16771 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16772 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16777 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16780 /* Send the buffer to the chip. */
16781 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16783 dev_err(&tp->pdev->dev,
16784 "%s: Buffer write failed. err = %d\n",
16790 /* validate data reached card RAM correctly. */
16791 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16793 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16794 if (le32_to_cpu(val) != p[i]) {
16795 dev_err(&tp->pdev->dev,
16796 "%s: Buffer corrupted on device! "
16797 "(%d != %d)\n", __func__, val, i);
16798 /* ret = -ENODEV here? */
16803 /* Now read it back. */
16804 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16806 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16807 "err = %d\n", __func__, ret);
16812 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16816 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16817 DMA_RWCTRL_WRITE_BNDRY_16) {
16818 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16819 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16820 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16823 dev_err(&tp->pdev->dev,
16824 "%s: Buffer corrupted on read back! "
16825 "(%d != %d)\n", __func__, p[i], i);
16831 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16837 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16838 DMA_RWCTRL_WRITE_BNDRY_16) {
16839 /* DMA test passed without adjusting DMA boundary,
16840 * now look for chipsets that are known to expose the
16841 * DMA bug without failing the test.
16843 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16844 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16845 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16847 /* Safe to use the calculated DMA boundary. */
16848 tp->dma_rwctrl = saved_dma_rwctrl;
16851 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16855 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16860 static void tg3_init_bufmgr_config(struct tg3 *tp)
16862 if (tg3_flag(tp, 57765_PLUS)) {
16863 tp->bufmgr_config.mbuf_read_dma_low_water =
16864 DEFAULT_MB_RDMA_LOW_WATER_5705;
16865 tp->bufmgr_config.mbuf_mac_rx_low_water =
16866 DEFAULT_MB_MACRX_LOW_WATER_57765;
16867 tp->bufmgr_config.mbuf_high_water =
16868 DEFAULT_MB_HIGH_WATER_57765;
16870 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16871 DEFAULT_MB_RDMA_LOW_WATER_5705;
16872 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16873 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16874 tp->bufmgr_config.mbuf_high_water_jumbo =
16875 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16876 } else if (tg3_flag(tp, 5705_PLUS)) {
16877 tp->bufmgr_config.mbuf_read_dma_low_water =
16878 DEFAULT_MB_RDMA_LOW_WATER_5705;
16879 tp->bufmgr_config.mbuf_mac_rx_low_water =
16880 DEFAULT_MB_MACRX_LOW_WATER_5705;
16881 tp->bufmgr_config.mbuf_high_water =
16882 DEFAULT_MB_HIGH_WATER_5705;
16883 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16884 tp->bufmgr_config.mbuf_mac_rx_low_water =
16885 DEFAULT_MB_MACRX_LOW_WATER_5906;
16886 tp->bufmgr_config.mbuf_high_water =
16887 DEFAULT_MB_HIGH_WATER_5906;
16890 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16891 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16892 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16893 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16894 tp->bufmgr_config.mbuf_high_water_jumbo =
16895 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16897 tp->bufmgr_config.mbuf_read_dma_low_water =
16898 DEFAULT_MB_RDMA_LOW_WATER;
16899 tp->bufmgr_config.mbuf_mac_rx_low_water =
16900 DEFAULT_MB_MACRX_LOW_WATER;
16901 tp->bufmgr_config.mbuf_high_water =
16902 DEFAULT_MB_HIGH_WATER;
16904 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16905 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16906 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16907 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16908 tp->bufmgr_config.mbuf_high_water_jumbo =
16909 DEFAULT_MB_HIGH_WATER_JUMBO;
16912 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16913 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16916 static char *tg3_phy_string(struct tg3 *tp)
16918 switch (tp->phy_id & TG3_PHY_ID_MASK) {
16919 case TG3_PHY_ID_BCM5400: return "5400";
16920 case TG3_PHY_ID_BCM5401: return "5401";
16921 case TG3_PHY_ID_BCM5411: return "5411";
16922 case TG3_PHY_ID_BCM5701: return "5701";
16923 case TG3_PHY_ID_BCM5703: return "5703";
16924 case TG3_PHY_ID_BCM5704: return "5704";
16925 case TG3_PHY_ID_BCM5705: return "5705";
16926 case TG3_PHY_ID_BCM5750: return "5750";
16927 case TG3_PHY_ID_BCM5752: return "5752";
16928 case TG3_PHY_ID_BCM5714: return "5714";
16929 case TG3_PHY_ID_BCM5780: return "5780";
16930 case TG3_PHY_ID_BCM5755: return "5755";
16931 case TG3_PHY_ID_BCM5787: return "5787";
16932 case TG3_PHY_ID_BCM5784: return "5784";
16933 case TG3_PHY_ID_BCM5756: return "5722/5756";
16934 case TG3_PHY_ID_BCM5906: return "5906";
16935 case TG3_PHY_ID_BCM5761: return "5761";
16936 case TG3_PHY_ID_BCM5718C: return "5718C";
16937 case TG3_PHY_ID_BCM5718S: return "5718S";
16938 case TG3_PHY_ID_BCM57765: return "57765";
16939 case TG3_PHY_ID_BCM5719C: return "5719C";
16940 case TG3_PHY_ID_BCM5720C: return "5720C";
16941 case TG3_PHY_ID_BCM5762: return "5762C";
16942 case TG3_PHY_ID_BCM8002: return "8002/serdes";
16943 case 0: return "serdes";
16944 default: return "unknown";
16948 static char *tg3_bus_string(struct tg3 *tp, char *str)
16950 if (tg3_flag(tp, PCI_EXPRESS)) {
16951 strcpy(str, "PCI Express");
16953 } else if (tg3_flag(tp, PCIX_MODE)) {
16954 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16956 strcpy(str, "PCIX:");
16958 if ((clock_ctrl == 7) ||
16959 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16960 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16961 strcat(str, "133MHz");
16962 else if (clock_ctrl == 0)
16963 strcat(str, "33MHz");
16964 else if (clock_ctrl == 2)
16965 strcat(str, "50MHz");
16966 else if (clock_ctrl == 4)
16967 strcat(str, "66MHz");
16968 else if (clock_ctrl == 6)
16969 strcat(str, "100MHz");
16971 strcpy(str, "PCI:");
16972 if (tg3_flag(tp, PCI_HIGH_SPEED))
16973 strcat(str, "66MHz");
16975 strcat(str, "33MHz");
16977 if (tg3_flag(tp, PCI_32BIT))
16978 strcat(str, ":32-bit");
16980 strcat(str, ":64-bit");
16984 static void tg3_init_coal(struct tg3 *tp)
16986 struct ethtool_coalesce *ec = &tp->coal;
16988 memset(ec, 0, sizeof(*ec));
16989 ec->cmd = ETHTOOL_GCOALESCE;
16990 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16991 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16992 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16993 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16994 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16995 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16996 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16997 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16998 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17000 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17001 HOSTCC_MODE_CLRTICK_TXBD)) {
17002 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17003 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17004 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17005 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17008 if (tg3_flag(tp, 5705_PLUS)) {
17009 ec->rx_coalesce_usecs_irq = 0;
17010 ec->tx_coalesce_usecs_irq = 0;
17011 ec->stats_block_coalesce_usecs = 0;
17015 static int tg3_init_one(struct pci_dev *pdev,
17016 const struct pci_device_id *ent)
17018 struct net_device *dev;
17020 int i, err, pm_cap;
17021 u32 sndmbx, rcvmbx, intmbx;
17023 u64 dma_mask, persist_dma_mask;
17024 netdev_features_t features = 0;
17026 printk_once(KERN_INFO "%s\n", version);
17028 err = pci_enable_device(pdev);
17030 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17034 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17036 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17037 goto err_out_disable_pdev;
17040 pci_set_master(pdev);
17042 /* Find power-management capability. */
17043 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17045 dev_err(&pdev->dev,
17046 "Cannot find Power Management capability, aborting\n");
17048 goto err_out_free_res;
17051 err = pci_set_power_state(pdev, PCI_D0);
17053 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17054 goto err_out_free_res;
17057 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17060 goto err_out_power_down;
17063 SET_NETDEV_DEV(dev, &pdev->dev);
17065 tp = netdev_priv(dev);
17068 tp->pm_cap = pm_cap;
17069 tp->rx_mode = TG3_DEF_RX_MODE;
17070 tp->tx_mode = TG3_DEF_TX_MODE;
17074 tp->msg_enable = tg3_debug;
17076 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17078 if (pdev_is_ssb_gige_core(pdev)) {
17079 tg3_flag_set(tp, IS_SSB_CORE);
17080 if (ssb_gige_must_flush_posted_writes(pdev))
17081 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17082 if (ssb_gige_one_dma_at_once(pdev))
17083 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17084 if (ssb_gige_have_roboswitch(pdev))
17085 tg3_flag_set(tp, ROBOSWITCH);
17086 if (ssb_gige_is_rgmii(pdev))
17087 tg3_flag_set(tp, RGMII_MODE);
17090 /* The word/byte swap controls here control register access byte
17091 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17094 tp->misc_host_ctrl =
17095 MISC_HOST_CTRL_MASK_PCI_INT |
17096 MISC_HOST_CTRL_WORD_SWAP |
17097 MISC_HOST_CTRL_INDIR_ACCESS |
17098 MISC_HOST_CTRL_PCISTATE_RW;
17100 /* The NONFRM (non-frame) byte/word swap controls take effect
17101 * on descriptor entries, anything which isn't packet data.
17103 * The StrongARM chips on the board (one for tx, one for rx)
17104 * are running in big-endian mode.
17106 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17107 GRC_MODE_WSWAP_NONFRM_DATA);
17108 #ifdef __BIG_ENDIAN
17109 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17111 spin_lock_init(&tp->lock);
17112 spin_lock_init(&tp->indirect_lock);
17113 INIT_WORK(&tp->reset_task, tg3_reset_task);
17115 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17117 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17119 goto err_out_free_dev;
17122 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17123 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17124 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17125 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17126 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17127 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17129 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17130 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17131 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17132 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17133 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17134 tg3_flag_set(tp, ENABLE_APE);
17135 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17136 if (!tp->aperegs) {
17137 dev_err(&pdev->dev,
17138 "Cannot map APE registers, aborting\n");
17140 goto err_out_iounmap;
17144 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17145 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17147 dev->ethtool_ops = &tg3_ethtool_ops;
17148 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17149 dev->netdev_ops = &tg3_netdev_ops;
17150 dev->irq = pdev->irq;
17152 err = tg3_get_invariants(tp, ent);
17154 dev_err(&pdev->dev,
17155 "Problem fetching invariants of chip, aborting\n");
17156 goto err_out_apeunmap;
17159 /* The EPB bridge inside 5714, 5715, and 5780 and any
17160 * device behind the EPB cannot support DMA addresses > 40-bit.
17161 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17162 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17163 * do DMA address check in tg3_start_xmit().
17165 if (tg3_flag(tp, IS_5788))
17166 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17167 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17168 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17169 #ifdef CONFIG_HIGHMEM
17170 dma_mask = DMA_BIT_MASK(64);
17173 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17175 /* Configure DMA attributes. */
17176 if (dma_mask > DMA_BIT_MASK(32)) {
17177 err = pci_set_dma_mask(pdev, dma_mask);
17179 features |= NETIF_F_HIGHDMA;
17180 err = pci_set_consistent_dma_mask(pdev,
17183 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17184 "DMA for consistent allocations\n");
17185 goto err_out_apeunmap;
17189 if (err || dma_mask == DMA_BIT_MASK(32)) {
17190 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17192 dev_err(&pdev->dev,
17193 "No usable DMA configuration, aborting\n");
17194 goto err_out_apeunmap;
17198 tg3_init_bufmgr_config(tp);
17200 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
17202 /* 5700 B0 chips do not support checksumming correctly due
17203 * to hardware bugs.
17205 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17206 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17208 if (tg3_flag(tp, 5755_PLUS))
17209 features |= NETIF_F_IPV6_CSUM;
17212 /* TSO is on by default on chips that support hardware TSO.
17213 * Firmware TSO on older chips gives lower performance, so it
17214 * is off by default, but can be enabled using ethtool.
17216 if ((tg3_flag(tp, HW_TSO_1) ||
17217 tg3_flag(tp, HW_TSO_2) ||
17218 tg3_flag(tp, HW_TSO_3)) &&
17219 (features & NETIF_F_IP_CSUM))
17220 features |= NETIF_F_TSO;
17221 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17222 if (features & NETIF_F_IPV6_CSUM)
17223 features |= NETIF_F_TSO6;
17224 if (tg3_flag(tp, HW_TSO_3) ||
17225 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17226 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17227 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17228 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17229 tg3_asic_rev(tp) == ASIC_REV_57780)
17230 features |= NETIF_F_TSO_ECN;
17233 dev->features |= features;
17234 dev->vlan_features |= features;
17237 * Add loopback capability only for a subset of devices that support
17238 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17239 * loopback for the remaining devices.
17241 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17242 !tg3_flag(tp, CPMU_PRESENT))
17243 /* Add the loopback capability */
17244 features |= NETIF_F_LOOPBACK;
17246 dev->hw_features |= features;
17248 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17249 !tg3_flag(tp, TSO_CAPABLE) &&
17250 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17251 tg3_flag_set(tp, MAX_RXPEND_64);
17252 tp->rx_pending = 63;
17255 err = tg3_get_device_address(tp);
17257 dev_err(&pdev->dev,
17258 "Could not obtain valid ethernet address, aborting\n");
17259 goto err_out_apeunmap;
17263 * Reset chip in case UNDI or EFI driver did not shutdown
17264 * DMA self test will enable WDMAC and we'll see (spurious)
17265 * pending DMA on the PCI bus at that point.
17267 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17268 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17269 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17270 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17273 err = tg3_test_dma(tp);
17275 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17276 goto err_out_apeunmap;
17279 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17280 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17281 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17282 for (i = 0; i < tp->irq_max; i++) {
17283 struct tg3_napi *tnapi = &tp->napi[i];
17286 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17288 tnapi->int_mbox = intmbx;
17294 tnapi->consmbox = rcvmbx;
17295 tnapi->prodmbox = sndmbx;
17298 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17300 tnapi->coal_now = HOSTCC_MODE_NOW;
17302 if (!tg3_flag(tp, SUPPORT_MSIX))
17306 * If we support MSIX, we'll be using RSS. If we're using
17307 * RSS, the first vector only handles link interrupts and the
17308 * remaining vectors handle rx and tx interrupts. Reuse the
17309 * mailbox values for the next iteration. The values we setup
17310 * above are still useful for the single vectored mode.
17325 pci_set_drvdata(pdev, dev);
17327 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17328 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17329 tg3_asic_rev(tp) == ASIC_REV_5762)
17330 tg3_flag_set(tp, PTP_CAPABLE);
17332 if (tg3_flag(tp, 5717_PLUS)) {
17333 /* Resume a low-power mode */
17334 tg3_frob_aux_power(tp, false);
17337 tg3_timer_init(tp);
17339 tg3_carrier_off(tp);
17341 err = register_netdev(dev);
17343 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17344 goto err_out_apeunmap;
17347 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17348 tp->board_part_number,
17349 tg3_chip_rev_id(tp),
17350 tg3_bus_string(tp, str),
17353 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17354 struct phy_device *phydev;
17355 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17357 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17358 phydev->drv->name, dev_name(&phydev->dev));
17362 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17363 ethtype = "10/100Base-TX";
17364 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17365 ethtype = "1000Base-SX";
17367 ethtype = "10/100/1000Base-T";
17369 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17370 "(WireSpeed[%d], EEE[%d])\n",
17371 tg3_phy_string(tp), ethtype,
17372 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17373 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17376 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17377 (dev->features & NETIF_F_RXCSUM) != 0,
17378 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17379 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17380 tg3_flag(tp, ENABLE_ASF) != 0,
17381 tg3_flag(tp, TSO_CAPABLE) != 0);
17382 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17384 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17385 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17387 pci_save_state(pdev);
17393 iounmap(tp->aperegs);
17394 tp->aperegs = NULL;
17406 err_out_power_down:
17407 pci_set_power_state(pdev, PCI_D3hot);
17410 pci_release_regions(pdev);
17412 err_out_disable_pdev:
17413 pci_disable_device(pdev);
17414 pci_set_drvdata(pdev, NULL);
17418 static void tg3_remove_one(struct pci_dev *pdev)
17420 struct net_device *dev = pci_get_drvdata(pdev);
17423 struct tg3 *tp = netdev_priv(dev);
17425 release_firmware(tp->fw);
17427 tg3_reset_task_cancel(tp);
17429 if (tg3_flag(tp, USE_PHYLIB)) {
17434 unregister_netdev(dev);
17436 iounmap(tp->aperegs);
17437 tp->aperegs = NULL;
17444 pci_release_regions(pdev);
17445 pci_disable_device(pdev);
17446 pci_set_drvdata(pdev, NULL);
17450 #ifdef CONFIG_PM_SLEEP
17451 static int tg3_suspend(struct device *device)
17453 struct pci_dev *pdev = to_pci_dev(device);
17454 struct net_device *dev = pci_get_drvdata(pdev);
17455 struct tg3 *tp = netdev_priv(dev);
17458 if (!netif_running(dev))
17461 tg3_reset_task_cancel(tp);
17463 tg3_netif_stop(tp);
17465 tg3_timer_stop(tp);
17467 tg3_full_lock(tp, 1);
17468 tg3_disable_ints(tp);
17469 tg3_full_unlock(tp);
17471 netif_device_detach(dev);
17473 tg3_full_lock(tp, 0);
17474 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17475 tg3_flag_clear(tp, INIT_COMPLETE);
17476 tg3_full_unlock(tp);
17478 err = tg3_power_down_prepare(tp);
17482 tg3_full_lock(tp, 0);
17484 tg3_flag_set(tp, INIT_COMPLETE);
17485 err2 = tg3_restart_hw(tp, true);
17489 tg3_timer_start(tp);
17491 netif_device_attach(dev);
17492 tg3_netif_start(tp);
17495 tg3_full_unlock(tp);
17504 static int tg3_resume(struct device *device)
17506 struct pci_dev *pdev = to_pci_dev(device);
17507 struct net_device *dev = pci_get_drvdata(pdev);
17508 struct tg3 *tp = netdev_priv(dev);
17511 if (!netif_running(dev))
17514 netif_device_attach(dev);
17516 tg3_full_lock(tp, 0);
17518 tg3_flag_set(tp, INIT_COMPLETE);
17519 err = tg3_restart_hw(tp,
17520 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17524 tg3_timer_start(tp);
17526 tg3_netif_start(tp);
17529 tg3_full_unlock(tp);
17537 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17538 #define TG3_PM_OPS (&tg3_pm_ops)
17542 #define TG3_PM_OPS NULL
17544 #endif /* CONFIG_PM_SLEEP */
17547 * tg3_io_error_detected - called when PCI error is detected
17548 * @pdev: Pointer to PCI device
17549 * @state: The current pci connection state
17551 * This function is called after a PCI bus error affecting
17552 * this device has been detected.
17554 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17555 pci_channel_state_t state)
17557 struct net_device *netdev = pci_get_drvdata(pdev);
17558 struct tg3 *tp = netdev_priv(netdev);
17559 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17561 netdev_info(netdev, "PCI I/O error detected\n");
17565 if (!netif_running(netdev))
17570 tg3_netif_stop(tp);
17572 tg3_timer_stop(tp);
17574 /* Want to make sure that the reset task doesn't run */
17575 tg3_reset_task_cancel(tp);
17577 netif_device_detach(netdev);
17579 /* Clean up software state, even if MMIO is blocked */
17580 tg3_full_lock(tp, 0);
17581 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17582 tg3_full_unlock(tp);
17585 if (state == pci_channel_io_perm_failure)
17586 err = PCI_ERS_RESULT_DISCONNECT;
17588 pci_disable_device(pdev);
17596 * tg3_io_slot_reset - called after the pci bus has been reset.
17597 * @pdev: Pointer to PCI device
17599 * Restart the card from scratch, as if from a cold-boot.
17600 * At this point, the card has exprienced a hard reset,
17601 * followed by fixups by BIOS, and has its config space
17602 * set up identically to what it was at cold boot.
17604 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17606 struct net_device *netdev = pci_get_drvdata(pdev);
17607 struct tg3 *tp = netdev_priv(netdev);
17608 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17613 if (pci_enable_device(pdev)) {
17614 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17618 pci_set_master(pdev);
17619 pci_restore_state(pdev);
17620 pci_save_state(pdev);
17622 if (!netif_running(netdev)) {
17623 rc = PCI_ERS_RESULT_RECOVERED;
17627 err = tg3_power_up(tp);
17631 rc = PCI_ERS_RESULT_RECOVERED;
17640 * tg3_io_resume - called when traffic can start flowing again.
17641 * @pdev: Pointer to PCI device
17643 * This callback is called when the error recovery driver tells
17644 * us that its OK to resume normal operation.
17646 static void tg3_io_resume(struct pci_dev *pdev)
17648 struct net_device *netdev = pci_get_drvdata(pdev);
17649 struct tg3 *tp = netdev_priv(netdev);
17654 if (!netif_running(netdev))
17657 tg3_full_lock(tp, 0);
17658 tg3_flag_set(tp, INIT_COMPLETE);
17659 err = tg3_restart_hw(tp, true);
17661 tg3_full_unlock(tp);
17662 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17666 netif_device_attach(netdev);
17668 tg3_timer_start(tp);
17670 tg3_netif_start(tp);
17672 tg3_full_unlock(tp);
17680 static const struct pci_error_handlers tg3_err_handler = {
17681 .error_detected = tg3_io_error_detected,
17682 .slot_reset = tg3_io_slot_reset,
17683 .resume = tg3_io_resume
17686 static struct pci_driver tg3_driver = {
17687 .name = DRV_MODULE_NAME,
17688 .id_table = tg3_pci_tbl,
17689 .probe = tg3_init_one,
17690 .remove = tg3_remove_one,
17691 .err_handler = &tg3_err_handler,
17692 .driver.pm = TG3_PM_OPS,
17695 static int __init tg3_init(void)
17697 return pci_register_driver(&tg3_driver);
17700 static void __exit tg3_cleanup(void)
17702 pci_unregister_driver(&tg3_driver);
17705 module_init(tg3_init);
17706 module_exit(tg3_cleanup);